From d635711daa98be86d4c7fd01499c34f566b54ccb Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Fri, 10 Jun 2016 05:30:17 -0300 Subject: Linux-libre 4.6.2-gnu --- drivers/staging/lustre/Kconfig | 4 +- .../staging/lustre/include/linux/libcfs/libcfs.h | 12 +- .../lustre/include/linux/libcfs/libcfs_cpu.h | 5 - .../lustre/include/linux/libcfs/libcfs_debug.h | 2 +- .../lustre/include/linux/libcfs/libcfs_ioctl.h | 61 +- .../include/linux/libcfs/libcfs_kernelcomm.h | 118 -- .../lustre/include/linux/libcfs/libcfs_private.h | 7 +- .../lustre/include/linux/libcfs/libcfs_string.h | 2 - .../lustre/include/linux/libcfs/linux/libcfs.h | 3 - .../lustre/include/linux/libcfs/linux/linux-cpu.h | 5 - .../lustre/include/linux/libcfs/linux/linux-mem.h | 4 +- drivers/staging/lustre/include/linux/lnet/api.h | 23 +- .../staging/lustre/include/linux/lnet/lib-dlc.h | 122 ++ .../staging/lustre/include/linux/lnet/lib-lnet.h | 129 +- .../staging/lustre/include/linux/lnet/lib-types.h | 103 +- .../staging/lustre/include/linux/lnet/lnetctl.h | 104 +- drivers/staging/lustre/include/linux/lnet/lnetst.h | 104 +- drivers/staging/lustre/include/linux/lnet/nidstr.h | 9 +- .../staging/lustre/include/linux/lnet/socklnd.h | 9 +- drivers/staging/lustre/include/linux/lnet/types.h | 51 +- drivers/staging/lustre/lnet/Kconfig | 14 +- drivers/staging/lustre/lnet/Makefile | 2 +- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 705 +++---- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | 159 +- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 1048 ++++++---- .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c | 8 +- .../staging/lustre/lnet/klnds/socklnd/socklnd.c | 631 +++--- .../staging/lustre/lnet/klnds/socklnd/socklnd.h | 31 +- .../staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 789 ++++---- .../lustre/lnet/klnds/socklnd/socklnd_lib.c | 197 +- .../lustre/lnet/klnds/socklnd/socklnd_modparams.c | 11 +- .../lustre/lnet/klnds/socklnd/socklnd_proto.c | 175 +- drivers/staging/lustre/lnet/libcfs/Makefile | 17 + drivers/staging/lustre/lnet/libcfs/debug.c | 560 ++++++ drivers/staging/lustre/lnet/libcfs/fail.c | 139 ++ drivers/staging/lustre/lnet/libcfs/hash.c | 2085 +++++++++++++++++++ drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c | 227 +++ drivers/staging/lustre/lnet/libcfs/libcfs_lock.c | 185 ++ drivers/staging/lustre/lnet/libcfs/libcfs_mem.c | 196 ++ drivers/staging/lustre/lnet/libcfs/libcfs_string.c | 581 ++++++ .../staging/lustre/lnet/libcfs/linux/linux-cpu.c | 1040 ++++++++++ .../lustre/lnet/libcfs/linux/linux-crypto-adler.c | 137 ++ .../lustre/lnet/libcfs/linux/linux-crypto.c | 297 +++ .../lustre/lnet/libcfs/linux/linux-crypto.h | 29 + .../lustre/lnet/libcfs/linux/linux-curproc.c | 111 ++ .../staging/lustre/lnet/libcfs/linux/linux-debug.c | 200 ++ .../staging/lustre/lnet/libcfs/linux/linux-mem.c | 59 + .../lustre/lnet/libcfs/linux/linux-module.c | 159 ++ .../staging/lustre/lnet/libcfs/linux/linux-prim.c | 147 ++ .../lustre/lnet/libcfs/linux/linux-tracefile.c | 259 +++ drivers/staging/lustre/lnet/libcfs/module.c | 674 +++++++ drivers/staging/lustre/lnet/libcfs/prng.c | 140 ++ drivers/staging/lustre/lnet/libcfs/tracefile.c | 1208 +++++++++++ drivers/staging/lustre/lnet/libcfs/tracefile.h | 266 +++ drivers/staging/lustre/lnet/libcfs/workitem.c | 469 +++++ drivers/staging/lustre/lnet/lnet/Makefile | 2 +- drivers/staging/lustre/lnet/lnet/acceptor.c | 113 +- drivers/staging/lustre/lnet/lnet/api-ni.c | 1534 +++++++++----- drivers/staging/lustre/lnet/lnet/config.c | 315 +-- drivers/staging/lustre/lnet/lnet/lib-eq.c | 82 +- drivers/staging/lustre/lnet/lnet/lib-md.c | 105 +- drivers/staging/lustre/lnet/lnet/lib-me.c | 21 +- drivers/staging/lustre/lnet/lnet/lib-move.c | 725 ++++--- drivers/staging/lustre/lnet/lnet/lib-msg.c | 114 +- drivers/staging/lustre/lnet/lnet/lib-ptl.c | 258 ++- drivers/staging/lustre/lnet/lnet/lib-socket.c | 149 +- drivers/staging/lustre/lnet/lnet/lo.c | 14 +- drivers/staging/lustre/lnet/lnet/module.c | 121 +- drivers/staging/lustre/lnet/lnet/net_fault.c | 1025 ++++++++++ drivers/staging/lustre/lnet/lnet/nidstrings.c | 119 +- drivers/staging/lustre/lnet/lnet/peer.c | 235 ++- drivers/staging/lustre/lnet/lnet/router.c | 677 +++++-- drivers/staging/lustre/lnet/lnet/router_proc.c | 189 +- drivers/staging/lustre/lnet/selftest/brw_test.c | 208 +- drivers/staging/lustre/lnet/selftest/conctl.c | 363 ++-- drivers/staging/lustre/lnet/selftest/conrpc.c | 350 ++-- drivers/staging/lustre/lnet/selftest/conrpc.h | 63 +- drivers/staging/lustre/lnet/selftest/console.c | 537 ++--- drivers/staging/lustre/lnet/selftest/console.h | 181 +- drivers/staging/lustre/lnet/selftest/framework.c | 482 +++-- drivers/staging/lustre/lnet/selftest/module.c | 32 +- drivers/staging/lustre/lnet/selftest/ping_test.c | 55 +- drivers/staging/lustre/lnet/selftest/rpc.c | 453 +++-- drivers/staging/lustre/lnet/selftest/rpc.h | 184 +- drivers/staging/lustre/lnet/selftest/selftest.h | 332 ++-- drivers/staging/lustre/lnet/selftest/timer.c | 25 +- drivers/staging/lustre/lnet/selftest/timer.h | 14 +- drivers/staging/lustre/lustre/Kconfig | 4 +- drivers/staging/lustre/lustre/Makefile | 2 +- drivers/staging/lustre/lustre/fid/fid_request.c | 32 +- drivers/staging/lustre/lustre/fid/lproc_fid.c | 16 +- drivers/staging/lustre/lustre/fld/fld_cache.c | 33 +- drivers/staging/lustre/lustre/fld/fld_internal.h | 35 +- drivers/staging/lustre/lustre/fld/fld_request.c | 69 +- drivers/staging/lustre/lustre/fld/lproc_fld.c | 14 +- drivers/staging/lustre/lustre/include/cl_object.h | 491 ++--- drivers/staging/lustre/lustre/include/lclient.h | 7 +- .../lustre/include/linux/lustre_patchless_compat.h | 2 +- drivers/staging/lustre/lustre/include/linux/obd.h | 18 +- .../staging/lustre/lustre/include/lprocfs_status.h | 57 +- drivers/staging/lustre/lustre/include/lu_object.h | 24 +- drivers/staging/lustre/lustre/include/lu_ref.h | 4 - .../lustre/lustre/include/lustre/ll_fiemap.h | 63 +- .../lustre/include/lustre/lustre_build_version.h | 2 - .../lustre/lustre/include/lustre/lustre_idl.h | 797 +++----- .../lustre/lustre/include/lustre/lustre_user.h | 101 +- drivers/staging/lustre/lustre/include/lustre_cfg.h | 20 +- .../staging/lustre/lustre/include/lustre_disk.h | 252 +-- drivers/staging/lustre/lustre/include/lustre_dlm.h | 133 +- .../lustre/lustre/include/lustre_dlm_flags.h | 55 +- .../staging/lustre/lustre/include/lustre_export.h | 91 +- drivers/staging/lustre/lustre/include/lustre_fid.h | 20 +- drivers/staging/lustre/lustre/include/lustre_fld.h | 33 +- .../staging/lustre/lustre/include/lustre_handles.h | 3 +- .../staging/lustre/lustre/include/lustre_import.h | 3 +- .../lustre/lustre/include/lustre_kernelcomm.h | 55 + drivers/staging/lustre/lustre/include/lustre_lib.h | 27 +- .../staging/lustre/lustre/include/lustre_lite.h | 50 +- drivers/staging/lustre/lustre/include/lustre_log.h | 15 +- drivers/staging/lustre/lustre/include/lustre_mdc.h | 20 +- drivers/staging/lustre/lustre/include/lustre_net.h | 86 +- .../lustre/lustre/include/lustre_req_layout.h | 13 +- drivers/staging/lustre/lustre/include/lustre_sec.h | 128 +- drivers/staging/lustre/lustre/include/lustre_ver.h | 18 +- drivers/staging/lustre/lustre/include/obd.h | 132 +- drivers/staging/lustre/lustre/include/obd_cksum.h | 21 +- drivers/staging/lustre/lustre/include/obd_class.h | 182 +- .../staging/lustre/lustre/include/obd_support.h | 21 +- .../lustre/lustre/include/uapi_kernelcomm.h | 94 + drivers/staging/lustre/lustre/lclient/glimpse.c | 3 +- drivers/staging/lustre/lustre/lclient/lcommon_cl.c | 81 +- .../staging/lustre/lustre/lclient/lcommon_misc.c | 6 +- drivers/staging/lustre/lustre/ldlm/interval_tree.c | 8 +- drivers/staging/lustre/lustre/ldlm/ldlm_extent.c | 25 +- drivers/staging/lustre/lustre/ldlm/ldlm_flock.c | 64 +- drivers/staging/lustre/lustre/ldlm/ldlm_internal.h | 15 +- drivers/staging/lustre/lustre/ldlm/ldlm_lib.c | 107 +- drivers/staging/lustre/lustre/ldlm/ldlm_lock.c | 184 +- drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c | 91 +- drivers/staging/lustre/lustre/ldlm/ldlm_pool.c | 19 +- drivers/staging/lustre/lustre/ldlm/ldlm_request.c | 235 ++- drivers/staging/lustre/lustre/ldlm/ldlm_resource.c | 73 +- drivers/staging/lustre/lustre/libcfs/Makefile | 18 - drivers/staging/lustre/lustre/libcfs/debug.c | 559 ------ drivers/staging/lustre/lustre/libcfs/fail.c | 138 -- drivers/staging/lustre/lustre/libcfs/hash.c | 2092 -------------------- .../lustre/lustre/libcfs/kernel_user_comm.c | 242 --- drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c | 224 --- drivers/staging/lustre/lustre/libcfs/libcfs_lock.c | 187 -- drivers/staging/lustre/lustre/libcfs/libcfs_mem.c | 200 -- .../staging/lustre/lustre/libcfs/libcfs_string.c | 564 ------ .../staging/lustre/lustre/libcfs/linux/linux-cpu.c | 1042 ---------- .../lustre/libcfs/linux/linux-crypto-adler.c | 137 -- .../lustre/lustre/libcfs/linux/linux-crypto.c | 290 --- .../lustre/lustre/libcfs/linux/linux-crypto.h | 29 - .../lustre/lustre/libcfs/linux/linux-curproc.c | 112 -- .../lustre/lustre/libcfs/linux/linux-debug.c | 199 -- .../staging/lustre/lustre/libcfs/linux/linux-mem.c | 59 - .../lustre/lustre/libcfs/linux/linux-module.c | 180 -- .../lustre/lustre/libcfs/linux/linux-prim.c | 147 -- .../lustre/lustre/libcfs/linux/linux-tracefile.c | 272 --- drivers/staging/lustre/lustre/libcfs/module.c | 765 ------- drivers/staging/lustre/lustre/libcfs/prng.c | 139 -- drivers/staging/lustre/lustre/libcfs/tracefile.c | 1165 ----------- drivers/staging/lustre/lustre/libcfs/tracefile.h | 319 --- drivers/staging/lustre/lustre/libcfs/workitem.c | 465 ----- drivers/staging/lustre/lustre/llite/dcache.c | 41 +- drivers/staging/lustre/lustre/llite/dir.c | 175 +- drivers/staging/lustre/lustre/llite/file.c | 457 +++-- drivers/staging/lustre/lustre/llite/llite_close.c | 39 +- .../staging/lustre/lustre/llite/llite_internal.h | 175 +- drivers/staging/lustre/lustre/llite/llite_lib.c | 228 +-- drivers/staging/lustre/lustre/llite/llite_mmap.c | 48 +- drivers/staging/lustre/lustre/llite/llite_nfs.c | 54 +- drivers/staging/lustre/lustre/llite/llite_rmtacl.c | 18 +- drivers/staging/lustre/lustre/llite/lloop.c | 42 +- drivers/staging/lustre/lustre/llite/lproc_llite.c | 51 +- drivers/staging/lustre/lustre/llite/namei.c | 106 +- drivers/staging/lustre/lustre/llite/remote_perm.c | 4 +- drivers/staging/lustre/lustre/llite/rw.c | 136 +- drivers/staging/lustre/lustre/llite/rw26.c | 107 +- drivers/staging/lustre/lustre/llite/statahead.c | 112 +- drivers/staging/lustre/lustre/llite/super25.c | 33 +- drivers/staging/lustre/lustre/llite/symlink.c | 8 +- drivers/staging/lustre/lustre/llite/vvp_dev.c | 20 +- drivers/staging/lustre/lustre/llite/vvp_internal.h | 13 +- drivers/staging/lustre/lustre/llite/vvp_io.c | 80 +- drivers/staging/lustre/lustre/llite/vvp_object.c | 8 +- drivers/staging/lustre/lustre/llite/vvp_page.c | 51 +- drivers/staging/lustre/lustre/llite/xattr.c | 53 +- drivers/staging/lustre/lustre/llite/xattr_cache.c | 45 +- drivers/staging/lustre/lustre/lmv/lmv_fld.c | 3 +- drivers/staging/lustre/lustre/lmv/lmv_intent.c | 22 +- drivers/staging/lustre/lustre/lmv/lmv_internal.h | 10 +- drivers/staging/lustre/lustre/lmv/lmv_obd.c | 303 ++- drivers/staging/lustre/lustre/lmv/lproc_lmv.c | 4 +- .../staging/lustre/lustre/lov/lov_cl_internal.h | 93 +- drivers/staging/lustre/lustre/lov/lov_dev.c | 48 +- drivers/staging/lustre/lustre/lov/lov_ea.c | 11 +- drivers/staging/lustre/lustre/lov/lov_internal.h | 15 +- drivers/staging/lustre/lustre/lov/lov_io.c | 32 +- drivers/staging/lustre/lustre/lov/lov_lock.c | 135 +- drivers/staging/lustre/lustre/lov/lov_merge.c | 3 +- drivers/staging/lustre/lustre/lov/lov_obd.c | 203 +- drivers/staging/lustre/lustre/lov/lov_object.c | 117 +- drivers/staging/lustre/lustre/lov/lov_offset.c | 20 +- drivers/staging/lustre/lustre/lov/lov_pack.c | 36 +- drivers/staging/lustre/lustre/lov/lov_page.c | 9 +- drivers/staging/lustre/lustre/lov/lov_pool.c | 49 +- drivers/staging/lustre/lustre/lov/lov_request.c | 64 +- drivers/staging/lustre/lustre/lov/lovsub_dev.c | 7 +- drivers/staging/lustre/lustre/lov/lovsub_lock.c | 14 +- drivers/staging/lustre/lustre/lov/lovsub_object.c | 6 +- drivers/staging/lustre/lustre/lov/lovsub_page.c | 2 +- drivers/staging/lustre/lustre/lov/lproc_lov.c | 34 +- drivers/staging/lustre/lustre/mdc/mdc_internal.h | 16 +- drivers/staging/lustre/lustre/mdc/mdc_lib.c | 15 +- drivers/staging/lustre/lustre/mdc/mdc_locks.c | 176 +- drivers/staging/lustre/lustre/mdc/mdc_reint.c | 40 +- drivers/staging/lustre/lustre/mdc/mdc_request.c | 204 +- drivers/staging/lustre/lustre/mgc/mgc_request.c | 183 +- drivers/staging/lustre/lustre/obdclass/Makefile | 10 +- drivers/staging/lustre/lustre/obdclass/acl.c | 10 +- drivers/staging/lustre/lustre/obdclass/cl_io.c | 161 +- drivers/staging/lustre/lustre/obdclass/cl_lock.c | 98 +- drivers/staging/lustre/lustre/obdclass/cl_object.c | 56 +- drivers/staging/lustre/lustre/obdclass/cl_page.c | 113 +- drivers/staging/lustre/lustre/obdclass/class_obd.c | 57 +- drivers/staging/lustre/lustre/obdclass/genops.c | 109 +- .../staging/lustre/lustre/obdclass/kernelcomm.c | 246 +++ .../lustre/lustre/obdclass/linux/linux-module.c | 30 +- .../lustre/lustre/obdclass/linux/linux-obdo.c | 5 +- .../lustre/lustre/obdclass/linux/linux-sysctl.c | 14 +- drivers/staging/lustre/lustre/obdclass/llog.c | 41 +- drivers/staging/lustre/lustre/obdclass/llog_cat.c | 6 +- drivers/staging/lustre/lustre/obdclass/llog_obd.c | 10 +- drivers/staging/lustre/lustre/obdclass/llog_swab.c | 6 +- .../lustre/lustre/obdclass/lprocfs_counters.c | 10 +- .../lustre/lustre/obdclass/lprocfs_status.c | 243 ++- drivers/staging/lustre/lustre/obdclass/lu_object.c | 110 +- .../lustre/lustre/obdclass/lustre_handles.c | 15 +- .../staging/lustre/lustre/obdclass/lustre_peer.c | 12 +- .../staging/lustre/lustre/obdclass/obd_config.c | 66 +- drivers/staging/lustre/lustre/obdclass/obd_mount.c | 114 +- drivers/staging/lustre/lustre/obdclass/obdo.c | 6 +- .../staging/lustre/lustre/obdecho/echo_client.c | 499 +---- .../staging/lustre/lustre/obdecho/echo_internal.h | 5 - drivers/staging/lustre/lustre/osc/lproc_osc.c | 69 +- drivers/staging/lustre/lustre/osc/osc_cache.c | 371 ++-- .../staging/lustre/lustre/osc/osc_cl_internal.h | 41 +- drivers/staging/lustre/lustre/osc/osc_dev.c | 10 +- drivers/staging/lustre/lustre/osc/osc_internal.h | 8 +- drivers/staging/lustre/lustre/osc/osc_io.c | 32 +- drivers/staging/lustre/lustre/osc/osc_lock.c | 140 +- drivers/staging/lustre/lustre/osc/osc_object.c | 6 +- drivers/staging/lustre/lustre/osc/osc_page.c | 213 +- drivers/staging/lustre/lustre/osc/osc_quota.c | 39 +- drivers/staging/lustre/lustre/osc/osc_request.c | 365 ++-- drivers/staging/lustre/lustre/ptlrpc/client.c | 132 +- drivers/staging/lustre/lustre/ptlrpc/connection.c | 5 +- drivers/staging/lustre/lustre/ptlrpc/events.c | 68 +- drivers/staging/lustre/lustre/ptlrpc/import.c | 176 +- drivers/staging/lustre/lustre/ptlrpc/layout.c | 145 +- drivers/staging/lustre/lustre/ptlrpc/llog_client.c | 28 +- drivers/staging/lustre/lustre/ptlrpc/llog_net.c | 2 +- .../staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c | 60 +- drivers/staging/lustre/lustre/ptlrpc/niobuf.c | 76 +- drivers/staging/lustre/lustre/ptlrpc/nrs.c | 133 +- drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c | 12 +- .../staging/lustre/lustre/ptlrpc/pack_generic.c | 93 +- drivers/staging/lustre/lustre/ptlrpc/pinger.c | 31 +- .../staging/lustre/lustre/ptlrpc/ptlrpc_internal.h | 2 - .../staging/lustre/lustre/ptlrpc/ptlrpc_module.c | 2 +- drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c | 35 +- drivers/staging/lustre/lustre/ptlrpc/recover.c | 34 +- drivers/staging/lustre/lustre/ptlrpc/sec.c | 91 +- drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c | 17 +- drivers/staging/lustre/lustre/ptlrpc/sec_config.c | 33 +- drivers/staging/lustre/lustre/ptlrpc/sec_gc.c | 10 +- drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c | 6 +- drivers/staging/lustre/lustre/ptlrpc/sec_null.c | 7 +- drivers/staging/lustre/lustre/ptlrpc/sec_plain.c | 22 +- drivers/staging/lustre/lustre/ptlrpc/service.c | 309 +-- drivers/staging/lustre/lustre/ptlrpc/wiretest.c | 272 --- 284 files changed, 23709 insertions(+), 22023 deletions(-) delete mode 100644 drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h create mode 100644 drivers/staging/lustre/include/linux/lnet/lib-dlc.h create mode 100644 drivers/staging/lustre/lnet/libcfs/Makefile create mode 100644 drivers/staging/lustre/lnet/libcfs/debug.c create mode 100644 drivers/staging/lustre/lnet/libcfs/fail.c create mode 100644 drivers/staging/lustre/lnet/libcfs/hash.c create mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c create mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_lock.c create mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_mem.c create mode 100644 drivers/staging/lustre/lnet/libcfs/libcfs_string.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-module.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c create mode 100644 drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c create mode 100644 drivers/staging/lustre/lnet/libcfs/module.c create mode 100644 drivers/staging/lustre/lnet/libcfs/prng.c create mode 100644 drivers/staging/lustre/lnet/libcfs/tracefile.c create mode 100644 drivers/staging/lustre/lnet/libcfs/tracefile.h create mode 100644 drivers/staging/lustre/lnet/libcfs/workitem.c create mode 100644 drivers/staging/lustre/lnet/lnet/net_fault.c delete mode 100644 drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h create mode 100644 drivers/staging/lustre/lustre/include/lustre_kernelcomm.h create mode 100644 drivers/staging/lustre/lustre/include/uapi_kernelcomm.h delete mode 100644 drivers/staging/lustre/lustre/libcfs/Makefile delete mode 100644 drivers/staging/lustre/lustre/libcfs/debug.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/fail.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/hash.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/libcfs_lock.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/libcfs_mem.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/libcfs_string.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-module.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/module.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/prng.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/tracefile.c delete mode 100644 drivers/staging/lustre/lustre/libcfs/tracefile.h delete mode 100644 drivers/staging/lustre/lustre/libcfs/workitem.c create mode 100644 drivers/staging/lustre/lustre/obdclass/kernelcomm.c (limited to 'drivers/staging/lustre') diff --git a/drivers/staging/lustre/Kconfig b/drivers/staging/lustre/Kconfig index a224d88bf..b7d81096e 100644 --- a/drivers/staging/lustre/Kconfig +++ b/drivers/staging/lustre/Kconfig @@ -1,3 +1,3 @@ -source "drivers/staging/lustre/lustre/Kconfig" - source "drivers/staging/lustre/lnet/Kconfig" + +source "drivers/staging/lustre/lustre/Kconfig" diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h index 0d8a91ee5..40af75c42 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h @@ -42,6 +42,8 @@ #include "curproc.h" +#define LIBCFS_VERSION "0.7.0" + #define LOWEST_BIT_SET(x) ((x) & ~((x) - 1)) /* @@ -51,8 +53,6 @@ #define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \ ((hexnum) >> 8 & 0xf)) -#define LUSTRE_SRV_LNET_PID LUSTRE_LNET_PID - #include /* need both kernel and user-land acceptor */ @@ -77,7 +77,7 @@ struct cfs_psdev_ops { int (*p_close)(unsigned long, void *); int (*p_read)(struct cfs_psdev_file *, char *, unsigned long); int (*p_write)(struct cfs_psdev_file *, char *, unsigned long); - int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void *); + int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *); }; /* @@ -90,7 +90,6 @@ void cfs_enter_debugger(void); * Defined by platform */ int unshare_fs_struct(void); -sigset_t cfs_get_blocked_sigs(void); sigset_t cfs_block_allsigs(void); sigset_t cfs_block_sigs(unsigned long sigs); sigset_t cfs_block_sigsinv(unsigned long sigs); @@ -115,7 +114,6 @@ void cfs_get_random_bytes(void *buf, int size); #include "libcfs_prim.h" #include "libcfs_time.h" #include "libcfs_string.h" -#include "libcfs_kernelcomm.h" #include "libcfs_workitem.h" #include "libcfs_hash.h" #include "libcfs_fail.h" @@ -156,5 +154,9 @@ struct lnet_debugfs_symlink_def { void lustre_insert_debugfs(struct ctl_table *table, const struct lnet_debugfs_symlink_def *symlinks); +int lprocfs_call_handler(void *data, int write, loff_t *ppos, + void __user *buffer, size_t *lenp, + int (*handler)(void *data, int write, + loff_t pos, void __user *buffer, int len)); #endif /* _LIBCFS_H */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h index 1530b0458..9e62c5971 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h index a1787bb43..98430e710 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h @@ -106,7 +106,7 @@ struct ptldebug_header { #define S_LOV 0x00020000 #define S_LQUOTA 0x00040000 #define S_OSD 0x00080000 -/* unused */ +#define S_LFSCK 0x00100000 /* unused */ /* unused */ #define S_LMV 0x00800000 /* b_new_cmd */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h index 485ab2670..5ca99bd6f 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h @@ -41,11 +41,16 @@ #ifndef __LIBCFS_IOCTL_H__ #define __LIBCFS_IOCTL_H__ -#define LIBCFS_IOCTL_VERSION 0x0001000a +#define LIBCFS_IOCTL_VERSION 0x0001000a +#define LIBCFS_IOCTL_VERSION2 0x0001000b -struct libcfs_ioctl_data { +struct libcfs_ioctl_hdr { __u32 ioc_len; __u32 ioc_version; +}; + +struct libcfs_ioctl_data { + struct libcfs_ioctl_hdr ioc_hdr; __u64 ioc_nid; __u64 ioc_u64[1]; @@ -61,20 +66,15 @@ struct libcfs_ioctl_data { char *ioc_inlbuf2; __u32 ioc_plen1; /* buffers in userspace */ - char *ioc_pbuf1; + void __user *ioc_pbuf1; __u32 ioc_plen2; /* buffers in userspace */ - char *ioc_pbuf2; + void __user *ioc_pbuf2; char ioc_bulk[0]; }; #define ioc_priority ioc_u32[0] -struct libcfs_ioctl_hdr { - __u32 ioc_len; - __u32 ioc_version; -}; - struct libcfs_debug_ioctl_data { struct libcfs_ioctl_hdr hdr; unsigned int subs; @@ -90,7 +90,7 @@ do { \ struct libcfs_ioctl_handler { struct list_head item; - int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data); + int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr); }; #define DECLARE_IOCTL_HANDLER(ident, func) \ @@ -102,7 +102,6 @@ struct libcfs_ioctl_handler { /* FIXME check conflict with lustre_lib.h */ #define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long) -/* ioctls for manipulating snapshots 30- */ #define IOC_LIBCFS_TYPE 'e' #define IOC_LIBCFS_MIN_NR 30 /* libcfs ioctls */ @@ -113,18 +112,16 @@ struct libcfs_ioctl_handler { /* lnet ioctls */ #define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) #define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) -#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, long) -#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, long) -#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, long) #define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long) #define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long) -#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) +/* #define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) */ #define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long) #define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long) #define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long) #define IOC_LIBCFS_PING _IOWR('e', 61, long) -#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) +/* #define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) */ #define IOC_LIBCFS_LNETST _IOWR('e', 63, long) +#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, long) /* lnd ioctls */ #define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long) #define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long) @@ -138,7 +135,25 @@ struct libcfs_ioctl_handler { #define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long) #define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long) -#define IOC_LIBCFS_MAX_NR 80 +/* + * DLC Specific IOCTL numbers. + * In order to maintain backward compatibility with any possible external + * tools which might be accessing the IOCTL numbers, a new group of IOCTL + * number have been allocated. + */ +#define IOCTL_CONFIG_SIZE struct lnet_ioctl_config_data +#define IOC_LIBCFS_ADD_ROUTE _IOWR(IOC_LIBCFS_TYPE, 81, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_DEL_ROUTE _IOWR(IOC_LIBCFS_TYPE, 82, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_ROUTE _IOWR(IOC_LIBCFS_TYPE, 83, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_ADD_NET _IOWR(IOC_LIBCFS_TYPE, 84, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_DEL_NET _IOWR(IOC_LIBCFS_TYPE, 85, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_NET _IOWR(IOC_LIBCFS_TYPE, 86, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_CONFIG_RTR _IOWR(IOC_LIBCFS_TYPE, 87, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_ADD_BUF _IOWR(IOC_LIBCFS_TYPE, 88, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_BUF _IOWR(IOC_LIBCFS_TYPE, 89, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_PEER_INFO _IOWR(IOC_LIBCFS_TYPE, 90, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE) +#define IOC_LIBCFS_MAX_NR 91 static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data) { @@ -149,9 +164,9 @@ static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data) return len; } -static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) +static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) { - if (data->ioc_len > (1<<30)) { + if (data->ioc_hdr.ioc_len > (1 << 30)) { CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n"); return 1; } @@ -187,7 +202,7 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n"); return 1; } - if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_len) { + if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) { CERROR("LIBCFS ioctl: packlen != ioc_len\n"); return 1; } @@ -207,7 +222,9 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data) int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand); int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand); -int libcfs_ioctl_getdata(char *buf, char *end, void *arg); -int libcfs_ioctl_popdata(void *arg, void *buf, int size); +int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg, + __u32 *buf_len); +int libcfs_ioctl_popdata(void __user *arg, void *buf, int size); +int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data); #endif /* __LIBCFS_IOCTL_H__ */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h deleted file mode 100644 index 41f3d810a..000000000 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: Nathan Rutman - * - * libcfs/include/libcfs/libcfs_kernelcomm.h - * - * Kernel <-> userspace communication routines. - * The definitions below are used in the kernel and userspace. - * - */ - -#ifndef __LIBCFS_KERNELCOMM_H__ -#define __LIBCFS_KERNELCOMM_H__ - -#ifndef __LIBCFS_LIBCFS_H__ -#error Do not #include this file directly. #include instead -#endif - -/* KUC message header. - * All current and future KUC messages should use this header. - * To avoid having to include Lustre headers from libcfs, define this here. - */ -struct kuc_hdr { - __u16 kuc_magic; - __u8 kuc_transport; /* Each new Lustre feature should use a different - transport */ - __u8 kuc_flags; - __u16 kuc_msgtype; /* Message type or opcode, transport-specific */ - __u16 kuc_msglen; /* Including header */ -} __aligned(sizeof(__u64)); - -#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE) - -#define KUC_MAGIC 0x191C /*Lustre9etLinC */ -#define KUC_FL_BLOCK 0x01 /* Wait for send */ - -/* kuc_msgtype values are defined in each transport */ -enum kuc_transport_type { - KUC_TRANSPORT_GENERIC = 1, - KUC_TRANSPORT_HSM = 2, - KUC_TRANSPORT_CHANGELOG = 3, -}; - -enum kuc_generic_message_type { - KUC_MSG_SHUTDOWN = 1, -}; - -/* prototype for callback function on kuc groups */ -typedef int (*libcfs_kkuc_cb_t)(__u32 data, void *cb_arg); - -/* KUC Broadcast Groups. This determines which userspace process hears which - * messages. Mutliple transports may be used within a group, or multiple - * groups may use the same transport. Broadcast - * groups need not be used if e.g. a UID is specified instead; - * use group 0 to signify unicast. - */ -#define KUC_GRP_HSM 0x02 -#define KUC_GRP_MAX KUC_GRP_HSM - -/* Kernel methods */ -int libcfs_kkuc_msg_put(struct file *fp, void *payload); -int libcfs_kkuc_group_put(int group, void *payload); -int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group, - __u32 data); -int libcfs_kkuc_group_rem(int uid, int group); -int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, - void *cb_arg); - -#define LK_FLG_STOP 0x01 - -/* kernelcomm control structure, passed from userspace to kernel */ -typedef struct lustre_kernelcomm { - __u32 lk_wfd; - __u32 lk_rfd; - __u32 lk_uid; - __u32 lk_group; - __u32 lk_data; - __u32 lk_flags; -} __packed lustre_kernelcomm; - -/* Userspace methods */ -int libcfs_ukuc_start(lustre_kernelcomm *l, int groups); -int libcfs_ukuc_stop(lustre_kernelcomm *l); -int libcfs_ukuc_msg_get(lustre_kernelcomm *l, char *buf, int maxsize, - int transport); - -#endif /* __LIBCFS_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index a80d993b8..13335437c 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h @@ -88,7 +88,7 @@ do { \ } while (0) #ifndef LIBCFS_VMALLOC_SIZE -#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ +#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */ #endif #define LIBCFS_ALLOC_PRE(size, mask) \ @@ -387,11 +387,6 @@ int cfs_percpt_atomic_summary(atomic_t **refs); * Support for temporary event tracing with minimal Heisenberg effect. * -------------------------------------------------------------------- */ -struct libcfs_device_userstate { - int ldu_memhog_pages; - struct page *ldu_memhog_root_page; -}; - #define MKSTR(ptr) ((ptr)) ? (ptr) : "" static inline int cfs_size_round4(int val) diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h index d8d2e7dc2..e02cde5ae 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h @@ -44,8 +44,6 @@ #define __LIBCFS_STRING_H__ /* libcfs_string.c */ -/* string comparison ignoring case */ -int cfs_strncasecmp(const char *s1, const char *s2, size_t n); /* Convert a text string to a bitmask */ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), int *oldmask, int minmask, int allmask); diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h index aac59008a..d94b26616 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h @@ -118,9 +118,6 @@ do { \ #define CDEBUG_STACK() (0L) #endif /* __x86_64__ */ -/* initial pid */ -#define LUSTRE_LNET_PID 12345 - #define __current_nesting_level() (0) /** diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h index 520209f17..c04979ae0 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h index 0f2fd79e5..837eb2274 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h @@ -57,7 +57,7 @@ #include "../libcfs_cpu.h" #endif -#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) +#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1)) #define page_index(p) ((p)->index) #define memory_pressure_get() (current->flags & PF_MEMALLOC) @@ -67,7 +67,7 @@ #if BITS_PER_LONG == 32 /* limit to lowmem on 32-bit systems */ #define NUM_CACHEPAGES \ - min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) + min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4) #else #define NUM_CACHEPAGES totalram_pages #endif diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h index 75285fde1..cb0d6b481 100644 --- a/drivers/staging/lustre/include/linux/lnet/api.h +++ b/drivers/staging/lustre/include/linux/lnet/api.h @@ -48,7 +48,8 @@ /** \defgroup lnet_init_fini Initialization and cleanup * The LNet must be properly initialized before any LNet calls can be made. - * @{ */ + * @{ + */ int LNetNIInit(lnet_pid_t requested_pid); int LNetNIFini(void); /** @} lnet_init_fini */ @@ -71,7 +72,8 @@ int LNetNIFini(void); * it's an entry in the portals table of a process. * * \see LNetMEAttach - * @{ */ + * @{ + */ int LNetGetId(unsigned int index, lnet_process_id_t *id); int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order); void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle); @@ -89,7 +91,8 @@ void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle); * incoming requests based on process ID or the match bits provided in the * request. MEs can be dynamically inserted into a match list by LNetMEAttach() * and LNetMEInsert(), and removed from its list by LNetMEUnlink(). - * @{ */ + * @{ + */ int LNetMEAttach(unsigned int portal, lnet_process_id_t match_id_in, __u64 match_bits_in, @@ -120,7 +123,8 @@ int LNetMEUnlink(lnet_handle_me_t current_in); * The LNet API provides two operations to create MDs: LNetMDAttach() * and LNetMDBind(); one operation to unlink and release the resources * associated with a MD: LNetMDUnlink(). - * @{ */ + * @{ + */ int LNetMDAttach(lnet_handle_me_t current_in, lnet_md_t md_in, lnet_unlink_t unlink_in, @@ -154,7 +158,8 @@ int LNetMDUnlink(lnet_handle_md_t md_in); * event from an EQ, and LNetEQWait() can be used to block a process until * an EQ has at least one event. LNetEQPoll() can be used to test or wait * on multiple EQs. - * @{ */ + * @{ + */ int LNetEQAlloc(unsigned int count_in, lnet_eq_handler_t handler, lnet_handle_eq_t *handle_out); @@ -172,7 +177,8 @@ int LNetEQPoll(lnet_handle_eq_t *eventqs_in, * * The LNet API provides two data movement operations: LNetPut() * and LNetGet(). - * @{ */ + * @{ + */ int LNetPut(lnet_nid_t self, lnet_handle_md_t md_in, lnet_ack_req_t ack_req_in, @@ -192,11 +198,12 @@ int LNetGet(lnet_nid_t self, /** \defgroup lnet_misc Miscellaneous operations. * Miscellaneous operations. - * @{ */ - + * @{ + */ int LNetSetLazyPortal(int portal); int LNetClearLazyPortal(int portal); int LNetCtl(unsigned int cmd, void *arg); +void LNetDebugPeer(lnet_process_id_t id); /** @} lnet_misc */ diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h new file mode 100644 index 000000000..84a19e96e --- /dev/null +++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h @@ -0,0 +1,122 @@ +/* + * LGPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. + * + * LGPL HEADER END + * + */ +/* + * Copyright (c) 2014, Intel Corporation. + */ +/* + * Author: Amir Shehata + */ + +#ifndef LNET_DLC_H +#define LNET_DLC_H + +#include "../libcfs/libcfs_ioctl.h" +#include "types.h" + +#define MAX_NUM_SHOW_ENTRIES 32 +#define LNET_MAX_STR_LEN 128 +#define LNET_MAX_SHOW_NUM_CPT 128 +#define LNET_UNDEFINED_HOPS ((__u32) -1) + +struct lnet_ioctl_net_config { + char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN]; + __u32 ni_status; + __u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT]; +}; + +#define LNET_TINY_BUF_IDX 0 +#define LNET_SMALL_BUF_IDX 1 +#define LNET_LARGE_BUF_IDX 2 + +/* # different router buffer pools */ +#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1) + +struct lnet_ioctl_pool_cfg { + struct { + __u32 pl_npages; + __u32 pl_nbuffers; + __u32 pl_credits; + __u32 pl_mincredits; + } pl_pools[LNET_NRBPOOLS]; + __u32 pl_routing; +}; + +struct lnet_ioctl_config_data { + struct libcfs_ioctl_hdr cfg_hdr; + + __u32 cfg_net; + __u32 cfg_count; + __u64 cfg_nid; + __u32 cfg_ncpts; + + union { + struct { + __u32 rtr_hop; + __u32 rtr_priority; + __u32 rtr_flags; + } cfg_route; + struct { + char net_intf[LNET_MAX_STR_LEN]; + __s32 net_peer_timeout; + __s32 net_peer_tx_credits; + __s32 net_peer_rtr_credits; + __s32 net_max_tx_credits; + __u32 net_cksum_algo; + __u32 net_pad; + } cfg_net; + struct { + __u32 buf_enable; + __s32 buf_tiny; + __s32 buf_small; + __s32 buf_large; + } cfg_buffers; + } cfg_config_u; + + char cfg_bulk[0]; +}; + +struct lnet_ioctl_peer { + struct libcfs_ioctl_hdr pr_hdr; + __u32 pr_count; + __u32 pr_pad; + __u64 pr_nid; + + union { + struct { + char cr_aliveness[LNET_MAX_STR_LEN]; + __u32 cr_refcount; + __u32 cr_ni_peer_tx_credits; + __u32 cr_peer_tx_credits; + __u32 cr_peer_rtr_credits; + __u32 cr_peer_min_rtr_credits; + __u32 cr_peer_tx_qnob; + __u32 cr_ncpt; + } pr_peer_credits; + } pr_lnd_u; +}; + +struct lnet_ioctl_lnet_stats { + struct libcfs_ioctl_hdr st_hdr; + struct lnet_counters st_cntrs; +}; + +#endif /* LNET_DLC_H */ diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h index b67a6607b..dfc0208dc 100644 --- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h +++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h @@ -39,6 +39,7 @@ #include "api.h" #include "lnet.h" #include "lib-types.h" +#include "lib-dlc.h" extern lnet_t the_lnet; /* THE network */ @@ -64,6 +65,19 @@ extern lnet_t the_lnet; /* THE network */ /** exclusive lock */ #define LNET_LOCK_EX CFS_PERCPT_LOCK_EX +static inline int lnet_is_route_alive(lnet_route_t *route) +{ + /* gateway is down */ + if (!route->lr_gateway->lp_alive) + return 0; + /* no NI status, assume it's alive */ + if ((route->lr_gateway->lp_ping_feats & + LNET_PING_FEAT_NI_STATUS) == 0) + return 1; + /* has NI status, check # down NIs */ + return route->lr_downis == 0; +} + static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh) { return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE && @@ -72,25 +86,26 @@ static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh) static inline int lnet_md_exhausted(lnet_libmd_t *md) { - return (md->md_threshold == 0 || - ((md->md_options & LNET_MD_MAX_SIZE) != 0 && + return (!md->md_threshold || + ((md->md_options & LNET_MD_MAX_SIZE) && md->md_offset + md->md_max_size > md->md_length)); } static inline int lnet_md_unlinkable(lnet_libmd_t *md) { - /* Should unlink md when its refcount is 0 and either: + /* + * Should unlink md when its refcount is 0 and either: * - md has been flagged for deletion (by auto unlink or * LNetM[DE]Unlink, in the latter case md may not be exhausted). * - auto unlink is on and md is exhausted. */ - if (md->md_refcount != 0) + if (md->md_refcount) return 0; - if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0) + if (md->md_flags & LNET_MD_FLAG_ZOMBIE) return 1; - return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 && + return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) && lnet_md_exhausted(md)); } @@ -102,8 +117,10 @@ lnet_cpt_of_cookie(__u64 cookie) { unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK; - /* LNET_CPT_NUMBER doesn't have to be power2, which means we can - * get illegal cpt from it's invalid cookie */ + /* + * LNET_CPT_NUMBER doesn't have to be power2, which means we can + * get illegal cpt from it's invalid cookie + */ return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER; } @@ -183,18 +200,17 @@ lnet_md_alloc(lnet_md_t *umd) unsigned int size; unsigned int niov; - if ((umd->options & LNET_MD_KIOV) != 0) { + if (umd->options & LNET_MD_KIOV) { niov = umd->length; size = offsetof(lnet_libmd_t, md_iov.kiov[niov]); } else { - niov = ((umd->options & LNET_MD_IOVEC) != 0) ? - umd->length : 1; + niov = umd->options & LNET_MD_IOVEC ? umd->length : 1; size = offsetof(lnet_libmd_t, md_iov.iov[niov]); } LIBCFS_ALLOC(md, size); - if (md != NULL) { + if (md) { /* Set here in case of early free */ md->md_options = umd->options; md->md_niov = niov; @@ -209,7 +225,7 @@ lnet_md_free(lnet_libmd_t *md) { unsigned int size; - if ((md->md_options & LNET_MD_KIOV) != 0) + if (md->md_options & LNET_MD_KIOV) size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]); else size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]); @@ -264,7 +280,7 @@ lnet_res_lh_invalidate(lnet_libhandle_t *lh) static inline void lnet_eq2handle(lnet_handle_eq_t *handle, lnet_eq_t *eq) { - if (eq == NULL) { + if (!eq) { LNetInvalidateHandle(handle); return; } @@ -278,7 +294,7 @@ lnet_handle2eq(lnet_handle_eq_t *handle) lnet_libhandle_t *lh; lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_eq_t, eq_lh); @@ -300,7 +316,7 @@ lnet_handle2md(lnet_handle_md_t *handle) cpt = lnet_cpt_of_cookie(handle->cookie); lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], handle->cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_libmd_t, md_lh); @@ -319,7 +335,7 @@ lnet_wire_handle2md(lnet_handle_wire_t *wh) cpt = lnet_cpt_of_cookie(wh->wh_object_cookie); lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt], wh->wh_object_cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_libmd_t, md_lh); @@ -341,7 +357,7 @@ lnet_handle2me(lnet_handle_me_t *handle) cpt = lnet_cpt_of_cookie(handle->cookie); lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt], handle->cookie); - if (lh == NULL) + if (!lh) return NULL; return lh_entry(lh, lnet_me_t, me_lh); @@ -361,14 +377,14 @@ lnet_peer_decref_locked(lnet_peer_t *lp) { LASSERT(lp->lp_refcount > 0); lp->lp_refcount--; - if (lp->lp_refcount == 0) + if (!lp->lp_refcount) lnet_destroy_peer_locked(lp); } static inline int lnet_isrouter(lnet_peer_t *lp) { - return lp->lp_rtr_refcount != 0; + return lp->lp_rtr_refcount ? 1 : 0; } static inline void @@ -406,6 +422,8 @@ lnet_ni_decref(lnet_ni_t *ni) } void lnet_ni_free(lnet_ni_t *ni); +lnet_ni_t * +lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist); static inline int lnet_nid2peerhash(lnet_nid_t nid) @@ -430,24 +448,41 @@ lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt); lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt); lnet_ni_t *lnet_net2ni(__u32 net); -int lnet_init(void); -void lnet_fini(void); +extern int portal_rotor; + +int lnet_lib_init(void); +void lnet_lib_exit(void); int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, unsigned long when); void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, unsigned long when); -int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid, +int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid, unsigned int priority); int lnet_check_routes(void); int lnet_del_route(__u32 net, lnet_nid_t gw_nid); void lnet_destroy_routes(void); int lnet_get_route(int idx, __u32 *net, __u32 *hops, lnet_nid_t *gateway, __u32 *alive, __u32 *priority); +int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, + int *peer_timeout, int *peer_tx_credits, + int *peer_rtr_cr, int *max_tx_credits, + struct lnet_ioctl_net_config *net_config); +int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg); + void lnet_router_debugfs_init(void); void lnet_router_debugfs_fini(void); int lnet_rtrpools_alloc(int im_a_router); -void lnet_rtrpools_free(void); +void lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages); +int lnet_rtrpools_adjust(int tiny, int small, int large); +int lnet_rtrpools_enable(void); +void lnet_rtrpools_disable(void); +void lnet_rtrpools_free(int keep_pools); lnet_remotenet_t *lnet_find_net_locked(__u32 net); +int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, + __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr, + __s32 credits); +int lnet_dyn_del_ni(__u32 net); +int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason); int lnet_islocalnid(lnet_nid_t nid); int lnet_islocalnet(__u32 net); @@ -466,6 +501,8 @@ void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target, int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid); void lnet_return_tx_credits_locked(lnet_msg_t *msg); void lnet_return_rx_credits_locked(lnet_msg_t *msg); +void lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp); +void lnet_drop_routed_msgs_locked(struct list_head *list, int cpt); /* portals functions */ /* portals attributes */ @@ -522,13 +559,22 @@ void lnet_portals_destroy(void); /* message functions */ int lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t fromnid, void *private, int rdma_req); +int lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg); +int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg); + void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int offset, unsigned int mlen, unsigned int rlen); +void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, + int delayed, unsigned int offset, + unsigned int mlen, unsigned int rlen); + lnet_msg_t *lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *get_msg); void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len); void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc); +void lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, + unsigned int nob); void lnet_drop_delayed_msg_list(struct list_head *head, char *reason); void lnet_recv_delayed_msg_list(struct list_head *head); @@ -541,6 +587,24 @@ char *lnet_msgtyp2str(int type); void lnet_print_hdr(lnet_hdr_t *hdr); int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold); +/** \addtogroup lnet_fault_simulation @{ */ + +int lnet_fault_ctl(int cmd, struct libcfs_ioctl_data *data); +int lnet_fault_init(void); +void lnet_fault_fini(void); + +bool lnet_drop_rule_match(lnet_hdr_t *hdr); + +int lnet_delay_rule_add(struct lnet_fault_attr *attr); +int lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown); +int lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr, + struct lnet_fault_stat *stat); +void lnet_delay_rule_reset(void); +void lnet_delay_rule_check(void); +bool lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg); + +/** @} lnet_fault_simulation */ + void lnet_counters_get(lnet_counters_t *counters); void lnet_counters_reset(void); @@ -660,27 +724,30 @@ void lnet_router_checker_stop(void); void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net); void lnet_swap_pinginfo(lnet_ping_info_t *info); -int lnet_ping_target_init(void); -void lnet_ping_target_fini(void); -int lnet_ping(lnet_process_id_t id, int timeout_ms, - lnet_process_id_t *ids, int n_ids); - int lnet_parse_ip2nets(char **networksp, char *ip2nets); int lnet_parse_routes(char *route_str, int *im_a_router); int lnet_parse_networks(struct list_head *nilist, char *networks); +int lnet_net_unique(__u32 net, struct list_head *nilist); int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt); lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid); -void lnet_peer_tables_cleanup(void); +void lnet_peer_tables_cleanup(lnet_ni_t *ni); void lnet_peer_tables_destroy(void); int lnet_peer_tables_create(void); void lnet_debug_peer(lnet_nid_t nid); +int lnet_get_peer_info(__u32 peer_index, __u64 *nid, + char alivness[LNET_MAX_STR_LEN], + __u32 *cpt_iter, __u32 *refcount, + __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits, + __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis, + __u32 *peer_tx_qnob); static inline void lnet_peer_set_alive(lnet_peer_t *lp) { - lp->lp_last_alive = lp->lp_last_query = jiffies; + lp->lp_last_query = jiffies; + lp->lp_last_alive = jiffies; if (!lp->lp_alive) lnet_notify_locked(lp, 0, 1, lp->lp_last_alive); } diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h index 3bb9468e0..29c72f8c2 100644 --- a/drivers/staging/lustre/include/linux/lnet/lib-types.h +++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h @@ -38,9 +38,9 @@ #include #include #include -#include #include "types.h" +#include "lnetctl.h" /* Max payload size */ #define LNET_MAX_PAYLOAD CONFIG_LNET_MAX_PAYLOAD @@ -85,10 +85,10 @@ typedef struct lnet_msg { unsigned int msg_receiving:1; /* being received */ unsigned int msg_txcredit:1; /* taken an NI send credit */ unsigned int msg_peertxcredit:1; /* taken a peer send credit */ - unsigned int msg_rtrcredit:1; /* taken a global - router credit */ + unsigned int msg_rtrcredit:1; /* taken a global router credit */ unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */ unsigned int msg_onactivelist:1; /* on the activelist */ + unsigned int msg_rdma_get:1; struct lnet_peer *msg_txpeer; /* peer I'm sending to */ struct lnet_peer *msg_rxpeer; /* peer I received from */ @@ -113,7 +113,7 @@ typedef struct lnet_libhandle { } lnet_libhandle_t; #define lh_entry(ptr, type, member) \ - ((type *)((char *)(ptr)-(char *)(&((type *)0)->member))) + ((type *)((char *)(ptr) - (char *)(&((type *)0)->member))) typedef struct lnet_eq { struct list_head eq_list; @@ -190,7 +190,8 @@ typedef struct lnet_lnd { void (*lnd_shutdown)(struct lnet_ni *ni); int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg); - /* In data movement APIs below, payload buffers are described as a set + /* + * In data movement APIs below, payload buffers are described as a set * of 'niov' fragments which are... * EITHER * in virtual memory (struct iovec *iov != NULL) @@ -201,30 +202,36 @@ typedef struct lnet_lnd { * fragments to start from */ - /* Start sending a preformatted message. 'private' is NULL for PUT and + /* + * Start sending a preformatted message. 'private' is NULL for PUT and * GET messages; otherwise this is a response to an incoming message * and 'private' is the 'private' passed to lnet_parse(). Return * non-zero for immediate failure, otherwise complete later with - * lnet_finalize() */ + * lnet_finalize() + */ int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg); - /* Start receiving 'mlen' bytes of payload data, skipping the following + /* + * Start receiving 'mlen' bytes of payload data, skipping the following * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to * lnet_parse(). Return non-zero for immediate failure, otherwise * complete later with lnet_finalize(). This also gives back a receive - * credit if the LND does flow control. */ + * credit if the LND does flow control. + */ int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen); - /* lnet_parse() has had to delay processing of this message + /* + * lnet_parse() has had to delay processing of this message * (e.g. waiting for a forwarding buffer or send credits). Give the * LND a chance to free urgently needed resources. If called, return 0 * for success and do NOT give back a receive credit; that has to wait * until lnd_recv() gets called. On failure return < 0 and - * release resources; lnd_recv() will not be called. */ + * release resources; lnd_recv() will not be called. + */ int (*lnd_eager_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg, void **new_privatep); @@ -272,11 +279,14 @@ typedef struct lnet_ni { #define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL -/* NB: value of these features equal to LNET_PROTO_PING_VERSION_x - * of old LNet, so there shouldn't be any compatibility issue */ +/* + * NB: value of these features equal to LNET_PROTO_PING_VERSION_x + * of old LNet, so there shouldn't be any compatibility issue + */ #define LNET_PING_FEAT_INVAL (0) /* no feature */ #define LNET_PING_FEAT_BASE (1 << 0) /* just a ping */ #define LNET_PING_FEAT_NI_STATUS (1 << 1) /* return NI status */ +#define LNET_PING_FEAT_RTE_DISABLED (1 << 2) /* Routing enabled */ #define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \ LNET_PING_FEAT_NI_STATUS) @@ -343,13 +353,17 @@ typedef struct lnet_peer { struct lnet_peer_table { int pt_version; /* /proc validity stamp */ int pt_number; /* # peers extant */ + /* # zombies to go to deathrow (and not there yet) */ + int pt_zombies; struct list_head pt_deathrow; /* zombie peers */ struct list_head *pt_hash; /* NID->peer hash */ }; -/* peer aliveness is enabled only on routers for peers in a network where the - * lnet_ni_t::ni_peertimeout has been set to a positive value */ -#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \ +/* + * peer aliveness is enabled only on routers for peers in a network where the + * lnet_ni_t::ni_peertimeout has been set to a positive value + */ +#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \ (lp)->lp_ni->ni_peertimeout > 0) typedef struct { @@ -359,7 +373,7 @@ typedef struct { __u32 lr_net; /* remote network number */ int lr_seq; /* sequence for round-robin */ unsigned int lr_downis; /* number of down NIs */ - unsigned int lr_hops; /* how far I am */ + __u32 lr_hops; /* how far I am */ unsigned int lr_priority; /* route priority */ } lnet_route_t; @@ -384,7 +398,10 @@ typedef struct { struct list_head rbp_msgs; /* messages blocking for a buffer */ int rbp_npages; /* # pages in each buffer */ - int rbp_nbuffers; /* # buffers */ + /* requested number of buffers */ + int rbp_req_nbuffers; + /* # buffers actually allocated */ + int rbp_nbuffers; int rbp_credits; /* # free buffers / blocked messages */ int rbp_mincredits; /* low water mark */ @@ -398,7 +415,12 @@ typedef struct { #define LNET_PEER_HASHSIZE 503 /* prime! */ -#define LNET_NRBPOOLS 3 /* # different router buffer pools */ +#define LNET_TINY_BUF_IDX 0 +#define LNET_SMALL_BUF_IDX 1 +#define LNET_LARGE_BUF_IDX 2 + +/* # different router buffer pools */ +#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1) enum { /* Didn't match anything */ @@ -433,12 +455,16 @@ struct lnet_match_info { #define LNET_MT_HASH_BITS 8 #define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS) #define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1) -/* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash, - * the last entry is reserved for MEs with ignore-bits */ +/* + * we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash, + * the last entry is reserved for MEs with ignore-bits + */ #define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE -/* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which +/* + * __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the - * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */ + * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] + */ #define LNET_MT_BITS_U64 6 /* 2^6 bits */ #define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64) #define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1) @@ -448,8 +474,10 @@ struct lnet_match_table { /* reserved for upcoming patches, CPU partition ID */ unsigned int mt_cpt; unsigned int mt_portal; /* portal index */ - /* match table is set as "enabled" if there's non-exhausted MD - * attached on mt_mhash, it's only valid for wildcard portal */ + /* + * match table is set as "enabled" if there's non-exhausted MD + * attached on mt_mhash, it's only valid for wildcard portal + */ unsigned int mt_enabled; /* bitmap to flag whether MEs on mt_hash are exhausted or not */ __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP]; @@ -546,6 +574,8 @@ typedef struct { struct lnet_peer_table **ln_peer_tables; /* failure simulation */ struct list_head ln_test_peers; + struct list_head ln_drop_rules; + struct list_head ln_delay_rules; struct list_head ln_nis; /* LND instances */ /* NIs bond on specific CPT(s) */ @@ -553,8 +583,6 @@ typedef struct { /* dying LND instances */ struct list_head ln_nis_zombie; lnet_ni_t *ln_loni; /* the loopback NI */ - /* NI to wait for events in */ - lnet_ni_t *ln_eq_waitni; /* remote networks with routes to them */ struct list_head *ln_remote_nets_hash; @@ -584,8 +612,7 @@ typedef struct { struct mutex ln_api_mutex; struct mutex ln_lnd_mutex; - int ln_init; /* lnet_init() - called? */ + struct mutex ln_delay_mutex; /* Have I called LNetNIInit myself? */ int ln_niinit_self; /* LNetNIInit/LNetNIFini counter */ @@ -600,12 +627,24 @@ typedef struct { /* registered LNDs */ struct list_head ln_lnds; - /* space for network names */ - char *ln_network_tokens; - int ln_network_tokens_nob; /* test protocol compatibility flags */ int ln_testprotocompat; + /* + * 0 - load the NIs from the mod params + * 1 - do not load the NIs from the mod params + * Reverse logic to ensure that other calls to LNetNIInit + * need no change + */ + bool ln_nis_from_mod_params; + + /* + * waitq for router checker. As long as there are no routes in + * the list, the router checker will sleep on this queue. when + * routes are added the thread will wake up + */ + wait_queue_head_t ln_rc_waitq; + } lnet_t; #endif diff --git a/drivers/staging/lustre/include/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/linux/lnet/lnetctl.h index bdd69b2af..39575073b 100644 --- a/drivers/staging/lustre/include/linux/lnet/lnetctl.h +++ b/drivers/staging/lustre/include/linux/lnet/lnetctl.h @@ -10,10 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * header for lnet ioctl */ #ifndef _LNETCTL_H_ @@ -21,6 +17,106 @@ #include "types.h" +/** \addtogroup lnet_fault_simulation + * @{ + */ + +enum { + LNET_CTL_DROP_ADD, + LNET_CTL_DROP_DEL, + LNET_CTL_DROP_RESET, + LNET_CTL_DROP_LIST, + LNET_CTL_DELAY_ADD, + LNET_CTL_DELAY_DEL, + LNET_CTL_DELAY_RESET, + LNET_CTL_DELAY_LIST, +}; + +#define LNET_ACK_BIT BIT(0) +#define LNET_PUT_BIT BIT(1) +#define LNET_GET_BIT BIT(2) +#define LNET_REPLY_BIT BIT(3) + +/** ioctl parameter for LNet fault simulation */ +struct lnet_fault_attr { + /** + * source NID of drop rule + * LNET_NID_ANY is wildcard for all sources + * 255.255.255.255@net is wildcard for all addresses from @net + */ + lnet_nid_t fa_src; + /** destination NID of drop rule, see \a dr_src for details */ + lnet_nid_t fa_dst; + /** + * Portal mask to drop, -1 means all portals, for example: + * fa_ptl_mask = (1 << _LDLM_CB_REQUEST_PORTAL ) | + * (1 << LDLM_CANCEL_REQUEST_PORTAL) + * + * If it is non-zero then only PUT and GET will be filtered, otherwise + * there is no portal filter, all matched messages will be checked. + */ + __u64 fa_ptl_mask; + /** + * message types to drop, for example: + * dra_type = LNET_DROP_ACK_BIT | LNET_DROP_PUT_BIT + * + * If it is non-zero then only specified message types are filtered, + * otherwise all message types will be checked. + */ + __u32 fa_msg_mask; + union { + /** message drop simulation */ + struct { + /** drop rate of this rule */ + __u32 da_rate; + /** + * time interval of message drop, it is exclusive + * with da_rate + */ + __u32 da_interval; + } drop; + /** message latency simulation */ + struct { + __u32 la_rate; + /** + * time interval of message delay, it is exclusive + * with la_rate + */ + __u32 la_interval; + /** latency to delay */ + __u32 la_latency; + } delay; + __u64 space[8]; + } u; +}; + +/** fault simluation stats */ +struct lnet_fault_stat { + /** total # matched messages */ + __u64 fs_count; + /** # dropped LNET_MSG_PUT by this rule */ + __u64 fs_put; + /** # dropped LNET_MSG_ACK by this rule */ + __u64 fs_ack; + /** # dropped LNET_MSG_GET by this rule */ + __u64 fs_get; + /** # dropped LNET_MSG_REPLY by this rule */ + __u64 fs_reply; + union { + struct { + /** total # dropped messages */ + __u64 ds_dropped; + } drop; + struct { + /** total # delayed messages */ + __u64 ls_delayed; + } delay; + __u64 space[8]; + } u; +}; + +/** @} lnet_fault_simulation */ + #define LNET_DEV_ID 0 #define LNET_DEV_PATH "/dev/lnet" #define LNET_DEV_MAJOR 10 diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h index fd1e0fd36..417044552 100644 --- a/drivers/staging/lustre/include/linux/lnet/lnetst.h +++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h @@ -245,20 +245,20 @@ typedef struct { int lstio_ses_force; /* IN: force create ? */ /** IN: session features */ unsigned lstio_ses_feats; - lst_sid_t *lstio_ses_idp; /* OUT: session id */ + lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ int lstio_ses_nmlen; /* IN: name length */ - char *lstio_ses_namep; /* IN: session name */ + char __user *lstio_ses_namep; /* IN: session name */ } lstio_session_new_args_t; /* query current session */ typedef struct { - lst_sid_t *lstio_ses_idp; /* OUT: session id */ - int *lstio_ses_keyp; /* OUT: local key */ + lst_sid_t __user *lstio_ses_idp; /* OUT: session id */ + int __user *lstio_ses_keyp; /* OUT: local key */ /** OUT: session features */ - unsigned *lstio_ses_featp; - lstcon_ndlist_ent_t *lstio_ses_ndinfo; /* OUT: */ + unsigned __user *lstio_ses_featp; + lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */ int lstio_ses_nmlen; /* IN: name length */ - char *lstio_ses_namep; /* OUT: session name */ + char __user *lstio_ses_namep; /* OUT: session name */ } lstio_session_info_args_t; /* delete a session */ @@ -283,26 +283,26 @@ typedef struct { int lstio_dbg_timeout; /* IN: timeout of debug */ int lstio_dbg_nmlen; /* IN: len of name */ - char *lstio_dbg_namep; /* IN: name of + char __user *lstio_dbg_namep; /* IN: name of group|batch */ int lstio_dbg_count; /* IN: # of test nodes to debug */ - lnet_process_id_t *lstio_dbg_idsp; /* IN: id of test + lnet_process_id_t __user *lstio_dbg_idsp; /* IN: id of test nodes */ - struct list_head *lstio_dbg_resultp; /* OUT: list head of + struct list_head __user *lstio_dbg_resultp; /* OUT: list head of result buffer */ } lstio_debug_args_t; typedef struct { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + int lstio_grp_key; /* IN: session key */ + int lstio_grp_nmlen; /* IN: name length */ + char __user *lstio_grp_namep; /* IN: group name */ } lstio_group_add_args_t; typedef struct { - int lstio_grp_key; /* IN: session key */ - int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + int lstio_grp_key; /* IN: session key */ + int lstio_grp_nmlen; /* IN: name length */ + char __user *lstio_grp_namep; /* IN: group name */ } lstio_group_del_args_t; #define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */ @@ -315,22 +315,22 @@ typedef struct { int lstio_grp_opc; /* IN: OPC */ int lstio_grp_args; /* IN: arguments */ int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + char __user *lstio_grp_namep; /* IN: group name */ int lstio_grp_count; /* IN: # of nodes id */ - lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */ - struct list_head *lstio_grp_resultp; /* OUT: list head of + lnet_process_id_t __user *lstio_grp_idsp; /* IN: array of nodes */ + struct list_head __user *lstio_grp_resultp; /* OUT: list head of result buffer */ } lstio_group_update_args_t; typedef struct { int lstio_grp_key; /* IN: session key */ int lstio_grp_nmlen; /* IN: name length */ - char *lstio_grp_namep; /* IN: group name */ + char __user *lstio_grp_namep; /* IN: group name */ int lstio_grp_count; /* IN: # of nodes */ /** OUT: session features */ - unsigned *lstio_grp_featp; - lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */ - struct list_head *lstio_grp_resultp; /* OUT: list head of + unsigned __user *lstio_grp_featp; + lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */ + struct list_head __user *lstio_grp_resultp; /* OUT: list head of result buffer */ } lstio_group_nodes_args_t; @@ -338,18 +338,18 @@ typedef struct { int lstio_grp_key; /* IN: session key */ int lstio_grp_idx; /* IN: group idx */ int lstio_grp_nmlen; /* IN: name len */ - char *lstio_grp_namep; /* OUT: name */ + char __user *lstio_grp_namep; /* OUT: name */ } lstio_group_list_args_t; typedef struct { int lstio_grp_key; /* IN: session key */ int lstio_grp_nmlen; /* IN: name len */ - char *lstio_grp_namep; /* IN: name */ - lstcon_ndlist_ent_t *lstio_grp_entp; /* OUT: description of + char __user *lstio_grp_namep; /* IN: name */ + lstcon_ndlist_ent_t __user *lstio_grp_entp; /* OUT: description of group */ - int *lstio_grp_idxp; /* IN/OUT: node index */ - int *lstio_grp_ndentp; /* IN/OUT: # of nodent */ - lstcon_node_ent_t *lstio_grp_dentsp; /* OUT: nodent array */ + int __user *lstio_grp_idxp; /* IN/OUT: node index */ + int __user *lstio_grp_ndentp; /* IN/OUT: # of nodent */ + lstcon_node_ent_t __user *lstio_grp_dentsp; /* OUT: nodent array */ } lstio_group_info_args_t; #define LST_DEFAULT_BATCH "batch" /* default batch name */ @@ -357,13 +357,13 @@ typedef struct { typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ + char __user *lstio_bat_namep; /* IN: batch name */ } lstio_batch_add_args_t; typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ + char __user *lstio_bat_namep; /* IN: batch name */ } lstio_batch_del_args_t; typedef struct { @@ -371,8 +371,8 @@ typedef struct { int lstio_bat_timeout; /* IN: timeout for the batch */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ - struct list_head *lstio_bat_resultp; /* OUT: list head of + char __user *lstio_bat_namep; /* IN: batch name */ + struct list_head __user *lstio_bat_resultp; /* OUT: list head of result buffer */ } lstio_batch_run_args_t; @@ -381,8 +381,8 @@ typedef struct { int lstio_bat_force; /* IN: abort unfinished test RPC */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ - struct list_head *lstio_bat_resultp; /* OUT: list head of + char __user *lstio_bat_namep; /* IN: batch name */ + struct list_head __user *lstio_bat_resultp; /* OUT: list head of result buffer */ } lstio_batch_stop_args_t; @@ -394,8 +394,8 @@ typedef struct { int lstio_bat_timeout; /* IN: timeout for waiting */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ - struct list_head *lstio_bat_resultp; /* OUT: list head of + char __user *lstio_bat_namep; /* IN: batch name */ + struct list_head __user *lstio_bat_resultp; /* OUT: list head of result buffer */ } lstio_batch_query_args_t; @@ -403,21 +403,21 @@ typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_idx; /* IN: index */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: batch name */ + char __user *lstio_bat_namep; /* IN: batch name */ } lstio_batch_list_args_t; typedef struct { int lstio_bat_key; /* IN: session key */ int lstio_bat_nmlen; /* IN: name length */ - char *lstio_bat_namep; /* IN: name */ + char __user *lstio_bat_namep; /* IN: name */ int lstio_bat_server; /* IN: query server or not */ int lstio_bat_testidx; /* IN: test index */ - lstcon_test_batch_ent_t *lstio_bat_entp; /* OUT: batch ent */ + lstcon_test_batch_ent_t __user *lstio_bat_entp; /* OUT: batch ent */ - int *lstio_bat_idxp; /* IN/OUT: index of node */ - int *lstio_bat_ndentp; /* IN/OUT: # of nodent */ - lstcon_node_ent_t *lstio_bat_dentsp; /* array of nodent */ + int __user *lstio_bat_idxp; /* IN/OUT: index of node */ + int __user *lstio_bat_ndentp; /* IN/OUT: # of nodent */ + lstcon_node_ent_t __user *lstio_bat_dentsp; /* array of nodent */ } lstio_batch_info_args_t; /* add stat in session */ @@ -427,10 +427,10 @@ typedef struct { stat request */ int lstio_sta_nmlen; /* IN: group name length */ - char *lstio_sta_namep; /* IN: group name */ + char __user *lstio_sta_namep; /* IN: group name */ int lstio_sta_count; /* IN: # of pid */ - lnet_process_id_t *lstio_sta_idsp; /* IN: pid */ - struct list_head *lstio_sta_resultp; /* OUT: list head of + lnet_process_id_t __user *lstio_sta_idsp; /* IN: pid */ + struct list_head __user *lstio_sta_resultp; /* OUT: list head of result buffer */ } lstio_stat_args_t; @@ -445,7 +445,7 @@ typedef enum { typedef struct { int lstio_tes_key; /* IN: session key */ int lstio_tes_bat_nmlen; /* IN: batch name len */ - char *lstio_tes_bat_name; /* IN: batch name */ + char __user *lstio_tes_bat_name; /* IN: batch name */ int lstio_tes_type; /* IN: test type */ int lstio_tes_oneside; /* IN: one sided test */ int lstio_tes_loop; /* IN: loop count */ @@ -457,20 +457,20 @@ typedef struct { destination groups */ int lstio_tes_sgrp_nmlen; /* IN: source group name length */ - char *lstio_tes_sgrp_name; /* IN: group name */ + char __user *lstio_tes_sgrp_name; /* IN: group name */ int lstio_tes_dgrp_nmlen; /* IN: destination group name length */ - char *lstio_tes_dgrp_name; /* IN: group name */ + char __user *lstio_tes_dgrp_name; /* IN: group name */ int lstio_tes_param_len; /* IN: param buffer len */ - void *lstio_tes_param; /* IN: parameter for specified + void __user *lstio_tes_param; /* IN: parameter for specified test: lstio_bulk_param_t, lstio_ping_param_t, ... more */ - int *lstio_tes_retp; /* OUT: private returned + int __user *lstio_tes_retp; /* OUT: private returned value */ - struct list_head *lstio_tes_resultp; /* OUT: list head of + struct list_head __user *lstio_tes_resultp;/* OUT: list head of result buffer */ } lstio_test_args_t; diff --git a/drivers/staging/lustre/include/linux/lnet/nidstr.h b/drivers/staging/lustre/include/linux/lnet/nidstr.h index 4fc9ddce8..937fcc9e4 100644 --- a/drivers/staging/lustre/include/linux/lnet/nidstr.h +++ b/drivers/staging/lustre/include/linux/lnet/nidstr.h @@ -34,8 +34,10 @@ * Lustre Network Driver types. */ enum { - /* Only add to these values (i.e. don't ever change or redefine them): - * network addresses depend on them... */ + /* + * Only add to these values (i.e. don't ever change or redefine them): + * network addresses depend on them... + */ QSWLND = 1, SOCKLND = 2, GMLND = 3, @@ -67,6 +69,7 @@ static inline char *libcfs_lnd2str(__u32 lnd) return libcfs_lnd2str_r(lnd, libcfs_next_nidstring(), LNET_NIDSTR_SIZE); } + int libcfs_str2lnd(const char *str); char *libcfs_net2str_r(__u32 net, char *buf, size_t buf_size); static inline char *libcfs_net2str(__u32 net) @@ -74,12 +77,14 @@ static inline char *libcfs_net2str(__u32 net) return libcfs_net2str_r(net, libcfs_next_nidstring(), LNET_NIDSTR_SIZE); } + char *libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size); static inline char *libcfs_nid2str(lnet_nid_t nid) { return libcfs_nid2str_r(nid, libcfs_next_nidstring(), LNET_NIDSTR_SIZE); } + __u32 libcfs_str2net(const char *str); lnet_nid_t libcfs_str2nid(const char *str); int libcfs_str2anynid(lnet_nid_t *nid, const char *str); diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h index 599c9f662..bc32403f4 100644 --- a/drivers/staging/lustre/include/linux/lnet/socklnd.h +++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h @@ -85,14 +85,17 @@ socklnd_init_msg(ksock_msg_t *msg, int type) { msg->ksm_csum = 0; msg->ksm_type = type; - msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; + msg->ksm_zc_cookies[0] = 0; + msg->ksm_zc_cookies[1] = 0; } #define KSOCK_MSG_NOOP 0xC0 /* ksm_u empty */ #define KSOCK_MSG_LNET 0xC1 /* lnet msg */ -/* We need to know this number to parse hello msg from ksocklnd in - * other LND (usocklnd, for example) */ +/* + * We need to know this number to parse hello msg from ksocklnd in + * other LND (usocklnd, for example) + */ #define KSOCK_PROTO_V2 2 #define KSOCK_PROTO_V3 3 diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h index 11630180c..1c679cb72 100644 --- a/drivers/staging/lustre/include/linux/lnet/types.h +++ b/drivers/staging/lustre/include/linux/lnet/types.h @@ -36,10 +36,14 @@ #include /** \addtogroup lnet - * @{ */ + * @{ + */ + +#define LNET_VERSION "0.6.0" /** \addtogroup lnet_addr - * @{ */ + * @{ + */ /** Portal reserved for LNet's own use. * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments. @@ -116,10 +120,12 @@ typedef struct { lnet_pid_t pid; } WIRE_ATTR lnet_process_id_packed_t; -/* The wire handle's interface cookie only matches one network interface in +/* + * The wire handle's interface cookie only matches one network interface in * one epoch (i.e. new cookie when the interface restarts or the node * reboots). The object cookie only matches one object on that interface - * during that object's lifetime (i.e. no cookie re-use). */ + * during that object's lifetime (i.e. no cookie re-use). + */ typedef struct { __u64 wh_interface_cookie; __u64 wh_object_cookie; @@ -133,10 +139,12 @@ typedef enum { LNET_MSG_HELLO, } lnet_msg_type_t; -/* The variant fields of the portals message header are aligned on an 8 +/* + * The variant fields of the portals message header are aligned on an 8 * byte boundary in the message header. Note that all types used in these * wire structs MUST be fixed size and the smaller types are placed at the - * end. */ + * end. + */ typedef struct lnet_ack { lnet_handle_wire_t dst_wmd; __u64 match_bits; @@ -185,7 +193,8 @@ typedef struct { } msg; } WIRE_ATTR lnet_hdr_t; -/* A HELLO message contains a magic number and protocol version +/* + * A HELLO message contains a magic number and protocol version * code in the header's dest_nid, the peer's NID in the src_nid, and * LNET_MSG_HELLO in the type field. All other common fields are zero * (including payload_size; i.e. no payload). @@ -208,8 +217,10 @@ typedef struct { #define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */ /* Placeholder for a future "unified" protocol across all LNDs */ -/* Current LNDs that receive a request with this magic will respond with a - * "stub" reply using their current protocol */ +/* + * Current LNDs that receive a request with this magic will respond with a + * "stub" reply using their current protocol + */ #define LNET_PROTO_MAGIC 0x45726963 /* ! */ #define LNET_PROTO_TCP_VERSION_MAJOR 1 @@ -258,7 +269,7 @@ typedef struct lnet_counters { #define LNET_MAX_INTERFACES 16 -/* +/** * Objects maintained by the LNet are accessed through handles. Handle types * have names of the form lnet_handle_xx_t, where xx is one of the two letter * object type codes ('eq' for event queue, 'md' for memory descriptor, and @@ -318,7 +329,8 @@ typedef struct { /** @} lnet_addr */ /** \addtogroup lnet_me - * @{ */ + * @{ + */ /** * Specifies whether the match entry or memory descriptor should be unlinked @@ -348,7 +360,8 @@ typedef enum { /** @} lnet_me */ /** \addtogroup lnet_md - * @{ */ + * @{ + */ /** * Defines the visible parts of a memory descriptor. Values of this type @@ -450,9 +463,11 @@ typedef struct { lnet_handle_eq_t eq_handle; } lnet_md_t; -/* Max Transfer Unit (minimum supported everywhere). +/* + * Max Transfer Unit (minimum supported everywhere). * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks) - * these limits are system wide and not interface-local. */ + * these limits are system wide and not interface-local. + */ #define LNET_MTU_BITS 20 #define LNET_MTU (1 << LNET_MTU_BITS) @@ -499,14 +514,15 @@ typedef struct { /** * Starting offset of the fragment within the page. Note that the * end of the fragment must not pass the end of the page; i.e., - * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. + * kiov_len + kiov_offset <= PAGE_SIZE. */ unsigned int kiov_offset; } lnet_kiov_t; /** @} lnet_md */ /** \addtogroup lnet_eq - * @{ */ + * @{ + */ /** * Six types of events can be logged in an event queue. @@ -640,7 +656,8 @@ typedef void (*lnet_eq_handler_t)(lnet_event_t *event); /** @} lnet_eq */ /** \addtogroup lnet_data - * @{ */ + * @{ + */ /** * Specify whether an acknowledgment should be sent by target when the PUT diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig index 00850eeb6..2b5930150 100644 --- a/drivers/staging/lustre/lnet/Kconfig +++ b/drivers/staging/lustre/lnet/Kconfig @@ -1,10 +1,16 @@ config LNET - tristate "Lustre networking subsystem" - depends on LUSTRE_FS + tristate "Lustre networking subsystem (LNet)" + depends on INET && m + help + The Lustre network layer, also known as LNet, is a networking abstaction + level API that was initially created to allow Lustre Filesystem to utilize + very different networks like tcp and ib verbs in a uniform way. In the + case of Lustre routers only the LNet layer is required. Lately other + projects are also looking into using LNet as their networking API as well. config LNET_MAX_PAYLOAD - int "Lustre lnet max transfer payload (default 2MB)" - depends on LUSTRE_FS + int "Lustre lnet max transfer payload (default 1MB)" + depends on LNET default "1048576" help This option defines the maximum size of payload in bytes that lnet diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile index f6f03e304..0a380fe88 100644 --- a/drivers/staging/lustre/lnet/Makefile +++ b/drivers/staging/lustre/lnet/Makefile @@ -1 +1 @@ -obj-$(CONFIG_LNET) += lnet/ klnds/ selftest/ +obj-$(CONFIG_LNET) += libcfs/ lnet/ klnds/ selftest/ diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index cb74ae731..0d32e6541 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -42,15 +42,7 @@ #include #include "o2iblnd.h" -static lnd_t the_o2iblnd = { - .lnd_type = O2IBLND, - .lnd_startup = kiblnd_startup, - .lnd_shutdown = kiblnd_shutdown, - .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, - .lnd_send = kiblnd_send, - .lnd_recv = kiblnd_recv, -}; +static lnd_t the_o2iblnd; kib_data_t kiblnd_data; @@ -63,7 +55,7 @@ static __u32 kiblnd_cksum(void *ptr, int nob) sum = ((sum << 1) | (sum >> 31)) + *c++; /* ensure I don't return 0 (== no checksum) */ - return (sum == 0) ? 1 : sum; + return !sum ? 1 : sum; } static char *kiblnd_msgtype2str(int type) @@ -145,7 +137,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) int i; LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || - msg->ibm_type == IBLND_MSG_PUT_ACK); + msg->ibm_type == IBLND_MSG_PUT_ACK); rd = msg->ibm_type == IBLND_MSG_GET_REQ ? &msg->ibm_u.get.ibgm_rd : @@ -189,8 +181,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, { kib_net_t *net = ni->ni_data; - /* CAVEAT EMPTOR! all message fields not set here should have been - * initialised previously. */ + /* + * CAVEAT EMPTOR! all message fields not set here should have been + * initialised previously. + */ msg->ibm_magic = IBLND_MSG_MAGIC; msg->ibm_version = version; /* ibm_type */ @@ -249,11 +243,13 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob) return -EPROTO; } - /* checksum must be computed with ibm_cksum zero and BEFORE anything - * gets flipped */ + /* + * checksum must be computed with ibm_cksum zero and BEFORE anything + * gets flipped + */ msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum; msg->ibm_cksum = 0; - if (msg_cksum != 0 && + if (msg_cksum && msg_cksum != kiblnd_cksum(msg, msg_nob)) { CERROR("Bad checksum\n"); return -EPROTO; @@ -326,21 +322,21 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) int cpt = lnet_cpt_of_nid(nid); unsigned long flags; - LASSERT(net != NULL); + LASSERT(net); LASSERT(nid != LNET_NID_ANY); LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); - if (peer == NULL) { + if (!peer) { CERROR("Cannot allocate peer\n"); return -ENOMEM; } - memset(peer, 0, sizeof(*peer)); /* zero flags etc */ - peer->ibp_ni = ni; peer->ibp_nid = nid; peer->ibp_error = 0; peer->ibp_last_alive = 0; + peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS; + peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits; atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ @@ -350,7 +346,7 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(net->ibn_shutdown == 0); + LASSERT(!net->ibn_shutdown); /* npeers only grows with the global lock held */ atomic_inc(&net->ibn_npeers); @@ -365,38 +361,36 @@ void kiblnd_destroy_peer(kib_peer_t *peer) { kib_net_t *net = peer->ibp_ni->ni_data; - LASSERT(net != NULL); - LASSERT(atomic_read(&peer->ibp_refcount) == 0); + LASSERT(net); + LASSERT(!atomic_read(&peer->ibp_refcount)); LASSERT(!kiblnd_peer_active(peer)); - LASSERT(peer->ibp_connecting == 0); - LASSERT(peer->ibp_accepting == 0); - LASSERT(list_empty(&peer->ibp_conns)); + LASSERT(kiblnd_peer_idle(peer)); LASSERT(list_empty(&peer->ibp_tx_queue)); LIBCFS_FREE(peer, sizeof(*peer)); - /* NB a peer's connections keep a reference on their peer until + /* + * NB a peer's connections keep a reference on their peer until * they are destroyed, so we can be assured that _all_ state to do * with this peer has been cleaned up when its refcount drops to - * zero. */ + * zero. + */ atomic_dec(&net->ibn_npeers); } kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) { - /* the caller is responsible for accounting the additional reference - * that this creates */ + /* + * the caller is responsible for accounting the additional reference + * that this creates + */ struct list_head *peer_list = kiblnd_nid2peerlist(nid); struct list_head *tmp; kib_peer_t *peer; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, kib_peer_t, ibp_list); - - LASSERT(peer->ibp_connecting > 0 || /* creating conns */ - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); /* active conn */ + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_nid != nid) continue; @@ -431,13 +425,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -474,8 +464,10 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer) } /* NB closing peer's last conn unlinked it. */ } - /* NB peer now unlinked; might even be freed if the peer table had the - * last ref on it. */ + /* + * NB peer now unlinked; might even be freed if the peer table had the + * last ref on it. + */ } static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) @@ -493,7 +485,8 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (nid != LNET_NID_ANY) { - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; } else { lo = 0; hi = kiblnd_data.kib_peer_hash_size - 1; @@ -502,9 +495,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -516,7 +507,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) LASSERT(list_empty(&peer->ibp_conns)); list_splice_init(&peer->ibp_tx_queue, - &zombies); + &zombies); } kiblnd_del_peer_locked(peer); @@ -544,11 +535,8 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -558,7 +546,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) continue; conn = list_entry(ctmp, kib_conn_t, - ibc_list); + ibc_list); kiblnd_conn_addref(conn); read_unlock_irqrestore( &kiblnd_data.kib_global_lock, @@ -597,12 +585,12 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) int mtu; /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ - if (cmid->route.path_rec == NULL) + if (!cmid->route.path_rec) return; mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); LASSERT(mtu >= 0); - if (mtu != 0) + if (mtu) cmid->route.path_rec->mtu = mtu; } @@ -619,13 +607,13 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 0; mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); - if (mask == NULL) + if (!mask) return 0; /* hash NID to CPU id in this partition... */ off = do_div(nid, cpumask_weight(mask)); for_each_cpu(i, mask) { - if (off-- == 0) + if (!off--) return i % vectors; } @@ -634,15 +622,17 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) } kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, - int state, int version) + int state, int version) { - /* CAVEAT EMPTOR: + /* + * CAVEAT EMPTOR: * If the new conn is created successfully it takes over the caller's * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself * is destroyed. On failure, the caller's ref on 'peer' remains and * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has - * its ref on 'cmid'). */ + * its ref on 'cmid'). + */ rwlock_t *glock = &kiblnd_data.kib_global_lock; kib_net_t *net = peer->ibp_ni->ni_data; kib_dev_t *dev; @@ -656,7 +646,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, int rc; int i; - LASSERT(net != NULL); + LASSERT(net); LASSERT(!in_interrupt()); dev = net->ibn_dev; @@ -668,14 +658,14 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, sizeof(*init_qp_attr)); - if (init_qp_attr == NULL) { + if (!init_qp_attr) { CERROR("Can't allocate qp_attr for %s\n", libcfs_nid2str(peer->ibp_nid)); goto failed_0; } LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); - if (conn == NULL) { + if (!conn) { CERROR("Can't allocate connection for %s\n", libcfs_nid2str(peer->ibp_nid)); goto failed_1; @@ -686,6 +676,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, conn->ibc_peer = peer; /* I take the caller's ref */ cmid->context = conn; /* for future CM callbacks */ conn->ibc_cmid = cmid; + conn->ibc_max_frags = peer->ibp_max_frags; + conn->ibc_queue_depth = peer->ibp_queue_depth; INIT_LIST_HEAD(&conn->ibc_early_rxs); INIT_LIST_HEAD(&conn->ibc_tx_noops); @@ -697,7 +689,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt, sizeof(*conn->ibc_connvars)); - if (conn->ibc_connvars == NULL) { + if (!conn->ibc_connvars) { CERROR("Can't allocate in-progress connection state\n"); goto failed_2; } @@ -731,42 +723,42 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, write_unlock_irqrestore(glock, flags); LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, - IBLND_RX_MSGS(version) * sizeof(kib_rx_t)); - if (conn->ibc_rxs == NULL) { + IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); + if (!conn->ibc_rxs) { CERROR("Cannot allocate RX buffers\n"); goto failed_2; } rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt, - IBLND_RX_MSG_PAGES(version)); - if (rc != 0) + IBLND_RX_MSG_PAGES(conn)); + if (rc) goto failed_2; kiblnd_map_rx_descs(conn); - cq_attr.cqe = IBLND_CQ_ENTRIES(version); + cq_attr.cqe = IBLND_CQ_ENTRIES(conn); cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); cq = ib_create_cq(cmid->device, kiblnd_cq_completion, kiblnd_cq_event, conn, &cq_attr); if (IS_ERR(cq)) { - CERROR("Can't create CQ: %ld, cqe: %d\n", - PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); + CERROR("Failed to create CQ with %d CQEs: %ld\n", + IBLND_CQ_ENTRIES(conn), PTR_ERR(cq)); goto failed_2; } conn->ibc_cq = cq; rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - if (rc != 0) { - CERROR("Can't request completion notificiation: %d\n", rc); + if (rc) { + CERROR("Can't request completion notification: %d\n", rc); goto failed_2; } init_qp_attr->event_handler = kiblnd_qp_event; init_qp_attr->qp_context = conn; - init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version); - init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version); + init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn); + init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn); init_qp_attr->cap.max_send_sge = 1; init_qp_attr->cap.max_recv_sge = 1; init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; @@ -777,7 +769,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, conn->ibc_sched = sched; rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr); - if (rc != 0) { + if (rc) { CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n", rc, init_qp_attr->cap.max_send_wr, init_qp_attr->cap.max_recv_wr); @@ -787,33 +779,37 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); /* 1 ref for caller and each rxmsg */ - atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version)); - conn->ibc_nrx = IBLND_RX_MSGS(version); + atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn)); + conn->ibc_nrx = IBLND_RX_MSGS(conn); /* post receives */ - for (i = 0; i < IBLND_RX_MSGS(version); i++) { + for (i = 0; i < IBLND_RX_MSGS(conn); i++) { rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT); - if (rc != 0) { + if (rc) { CERROR("Can't post rxmsg: %d\n", rc); /* Make posted receives complete */ kiblnd_abort_receives(conn); - /* correct # of posted buffers - * NB locking needed now I'm racing with completion */ + /* + * correct # of posted buffers + * NB locking needed now I'm racing with completion + */ spin_lock_irqsave(&sched->ibs_lock, flags); - conn->ibc_nrx -= IBLND_RX_MSGS(version) - i; + conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i; spin_unlock_irqrestore(&sched->ibs_lock, flags); - /* cmid will be destroyed by CM(ofed) after cm_callback + /* + * cmid will be destroyed by CM(ofed) after cm_callback * returned, so we can't refer it anymore - * (by kiblnd_connd()->kiblnd_destroy_conn) */ + * (by kiblnd_connd()->kiblnd_destroy_conn) + */ rdma_destroy_qp(conn->ibc_cmid); conn->ibc_cmid = NULL; /* Drop my own and unused rxbuffer refcounts */ - while (i++ <= IBLND_RX_MSGS(version)) + while (i++ <= IBLND_RX_MSGS(conn)) kiblnd_conn_decref(conn); return NULL; @@ -822,7 +818,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, /* Init successful! */ LASSERT(state == IBLND_CONN_ACTIVE_CONNECT || - state == IBLND_CONN_PASSIVE_WAIT); + state == IBLND_CONN_PASSIVE_WAIT); conn->ibc_state = state; /* 1 more conn */ @@ -830,29 +826,29 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, return conn; failed_2: - kiblnd_destroy_conn(conn); + kiblnd_destroy_conn(conn, true); failed_1: LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); failed_0: return NULL; } -void kiblnd_destroy_conn(kib_conn_t *conn) +void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; kib_peer_t *peer = conn->ibc_peer; int rc; LASSERT(!in_interrupt()); - LASSERT(atomic_read(&conn->ibc_refcount) == 0); + LASSERT(!atomic_read(&conn->ibc_refcount)); LASSERT(list_empty(&conn->ibc_early_rxs)); LASSERT(list_empty(&conn->ibc_tx_noops)); LASSERT(list_empty(&conn->ibc_tx_queue)); LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd)); LASSERT(list_empty(&conn->ibc_tx_queue_nocred)); LASSERT(list_empty(&conn->ibc_active_txs)); - LASSERT(conn->ibc_noops_posted == 0); - LASSERT(conn->ibc_nsends_posted == 0); + LASSERT(!conn->ibc_noops_posted); + LASSERT(!conn->ibc_nsends_posted); switch (conn->ibc_state) { default: @@ -861,7 +857,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn) case IBLND_CONN_DISCONNECTED: /* connvars should have been freed already */ - LASSERT(conn->ibc_connvars == NULL); + LASSERT(!conn->ibc_connvars); break; case IBLND_CONN_INIT: @@ -869,28 +865,27 @@ void kiblnd_destroy_conn(kib_conn_t *conn) } /* conn->ibc_cmid might be destroyed by CM already */ - if (cmid != NULL && cmid->qp != NULL) + if (cmid && cmid->qp) rdma_destroy_qp(cmid); - if (conn->ibc_cq != NULL) { + if (conn->ibc_cq) { rc = ib_destroy_cq(conn->ibc_cq); - if (rc != 0) + if (rc) CWARN("Error destroying CQ: %d\n", rc); } - if (conn->ibc_rx_pages != NULL) + if (conn->ibc_rx_pages) kiblnd_unmap_rx_descs(conn); - if (conn->ibc_rxs != NULL) { + if (conn->ibc_rxs) { LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS(conn->ibc_version) - * sizeof(kib_rx_t)); + IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); } - if (conn->ibc_connvars != NULL) + if (conn->ibc_connvars) LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); - if (conn->ibc_hdev != NULL) + if (conn->ibc_hdev) kiblnd_hdev_decref(conn->ibc_hdev); /* See CAVEAT EMPTOR above in kiblnd_create_conn */ @@ -927,7 +922,7 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) } int kiblnd_close_stale_conns_locked(kib_peer_t *peer, - int version, __u64 incarnation) + int version, __u64 incarnation) { kib_conn_t *conn; struct list_head *ctmp; @@ -967,20 +962,18 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (nid != LNET_NID_ANY) - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - else { + if (nid != LNET_NID_ANY) { + lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + } else { lo = 0; hi = kiblnd_data.kib_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -998,10 +991,10 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) if (nid == LNET_NID_ANY) return 0; - return (count == 0) ? -ENOENT : 0; + return !count ? -ENOENT : 0; } -int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) +static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; int rc = -EINVAL; @@ -1027,14 +1020,14 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) rc = 0; conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); - if (conn == NULL) { + if (!conn) { rc = -ENOENT; break; } - LASSERT(conn->ibc_cmid != NULL); + LASSERT(conn->ibc_cmid); data->ioc_nid = conn->ibc_peer->ibp_nid; - if (conn->ibc_cmid->route.path_rec == NULL) + if (!conn->ibc_cmid->route.path_rec) data->ioc_u32[0] = 0; /* iWarp has no path MTU */ else data->ioc_u32[0] = @@ -1054,7 +1047,7 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) return rc; } -void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) +static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { unsigned long last_alive = 0; unsigned long now = cfs_time_current(); @@ -1065,21 +1058,19 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) read_lock_irqsave(glock, flags); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL) { - LASSERT(peer->ibp_connecting > 0 || /* creating conns */ - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); /* active conn */ + if (peer) last_alive = peer->ibp_last_alive; - } read_unlock_irqrestore(glock, flags); - if (last_alive != 0) + if (last_alive) *when = last_alive; - /* peer is not persistent in hash, trigger peer creation - * and connection establishment with a NULL tx */ - if (peer == NULL) + /* + * peer is not persistent in hash, trigger peer creation + * and connection establishment with a NULL tx + */ + if (!peer) kiblnd_launch_tx(ni, NULL, nid); CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", @@ -1087,13 +1078,13 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) last_alive ? cfs_duration_sec(now - last_alive) : -1); } -void kiblnd_free_pages(kib_pages_t *p) +static void kiblnd_free_pages(kib_pages_t *p) { int npages = p->ibp_npages; int i; for (i = 0; i < npages; i++) { - if (p->ibp_pages[i] != NULL) + if (p->ibp_pages[i]) __free_page(p->ibp_pages[i]); } @@ -1107,7 +1098,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, offsetof(kib_pages_t, ibp_pages[npages])); - if (p == NULL) { + if (!p) { CERROR("Can't allocate descriptor for %d pages\n", npages); return -ENOMEM; } @@ -1119,7 +1110,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) p->ibp_pages[i] = alloc_pages_node( cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_NOFS, 0); - if (p->ibp_pages[i] == NULL) { + if (!p->ibp_pages[i]) { CERROR("Can't allocate page %d of %d\n", i, npages); kiblnd_free_pages(p); return -ENOMEM; @@ -1135,10 +1126,10 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn) kib_rx_t *rx; int i; - LASSERT(conn->ibc_rxs != NULL); - LASSERT(conn->ibc_hdev != NULL); + LASSERT(conn->ibc_rxs); + LASSERT(conn->ibc_hdev); - for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (i = 0; i < IBLND_RX_MSGS(conn); i++) { rx = &conn->ibc_rxs[i]; LASSERT(rx->rx_nob >= 0); /* not posted */ @@ -1162,7 +1153,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) int ipg; int i; - for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) { pg = conn->ibc_rx_pages->ibp_pages[ipg]; rx = &conn->ibc_rxs[i]; @@ -1174,7 +1165,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) IBLND_MSG_SIZE, DMA_FROM_DEVICE); LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev, - rx->rx_msgaddr)); + rx->rx_msgaddr)); KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n", @@ -1187,7 +1178,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) if (pg_off == PAGE_SIZE) { pg_off = 0; ipg++; - LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version)); + LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn)); } } } @@ -1198,9 +1189,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) kib_tx_t *tx; int i; - LASSERT(tpo->tpo_pool.po_allocated == 0); + LASSERT(!tpo->tpo_pool.po_allocated); - if (hdev == NULL) + if (!hdev) return; for (i = 0; i < tpo->tpo_pool.po_size; i++) { @@ -1224,9 +1215,10 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); while (dev->ibd_failover) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (i++ % 50 == 0) + if (!(i++ % 50)) CDEBUG(D_NET, "%s: Wait for failover\n", dev->ibd_ifname); + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cfs_time_seconds(1) / 100); read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1252,7 +1244,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) int ipage; int i; - LASSERT(net != NULL); + LASSERT(net); dev = net->ibn_dev; @@ -1260,7 +1252,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE); /* No fancy arithmetic when we do the buffer calculations */ - CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0); + CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE)); tpo->tpo_hdev = kiblnd_current_hdev(dev); @@ -1275,7 +1267,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE); LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, - tx->tx_msgaddr)); + tx->tx_msgaddr)); KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); list_add(&tx->tx_list, &pool->po_free_list); @@ -1291,68 +1283,32 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) } } -struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) +struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, + int negotiated_nfrags) { - __u64 index; - - LASSERT(hdev->ibh_mrs[0] != NULL); - - if (hdev->ibh_nmrs == 1) - return hdev->ibh_mrs[0]; - - index = addr >> hdev->ibh_mr_shift; + __u16 nfrags = (negotiated_nfrags != -1) ? + negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand; - if (index < hdev->ibh_nmrs && - index == ((addr + size - 1) >> hdev->ibh_mr_shift)) - return hdev->ibh_mrs[index]; - - return NULL; -} - -struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) -{ - struct ib_mr *prev_mr; - struct ib_mr *mr; - int i; - - LASSERT(hdev->ibh_mrs[0] != NULL); + LASSERT(hdev->ibh_mrs); if (*kiblnd_tunables.kib_map_on_demand > 0 && - *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags) + nfrags <= rd->rd_nfrags) return NULL; - if (hdev->ibh_nmrs == 1) - return hdev->ibh_mrs[0]; - - for (i = 0, mr = prev_mr = NULL; - i < rd->rd_nfrags; i++) { - mr = kiblnd_find_dma_mr(hdev, - rd->rd_frags[i].rf_addr, - rd->rd_frags[i].rf_nob); - if (prev_mr == NULL) - prev_mr = mr; - - if (mr == NULL || prev_mr != mr) { - /* Can't covered by one single MR */ - mr = NULL; - break; - } - } - - return mr; + return hdev->ibh_mrs; } static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) { - LASSERT(pool->fpo_map_count == 0); + LASSERT(!pool->fpo_map_count); - if (pool->fpo_fmr_pool != NULL) + if (pool->fpo_fmr_pool) ib_destroy_fmr_pool(pool->fpo_fmr_pool); - if (pool->fpo_hdev != NULL) + if (pool->fpo_hdev) kiblnd_hdev_decref(pool->fpo_hdev); - LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t)); + LIBCFS_FREE(pool, sizeof(*pool)); } static void kiblnd_destroy_fmr_pool_list(struct list_head *head) @@ -1387,7 +1343,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_dev_t *dev = fps->fps_net->ibn_dev; kib_fmr_pool_t *fpo; struct ib_fmr_pool_param param = { - .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, + .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE, .page_shift = PAGE_SHIFT, .access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE), @@ -1399,7 +1355,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, int rc; LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); - if (fpo == NULL) + if (!fpo) return -ENOMEM; fpo->fpo_hdev = kiblnd_current_hdev(dev); @@ -1410,7 +1366,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, CERROR("Failed to create FMR pool: %d\n", rc); kiblnd_hdev_decref(fpo->fpo_hdev); - LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t)); + LIBCFS_FREE(fpo, sizeof(*fpo)); return rc; } @@ -1424,7 +1380,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies) { - if (fps->fps_net == NULL) /* intialized? */ + if (!fps->fps_net) /* intialized? */ return; spin_lock(&fps->fps_lock); @@ -1434,7 +1390,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, kib_fmr_pool_t, fpo_list); fpo->fpo_failed = 1; list_del(&fpo->fpo_list); - if (fpo->fpo_map_count == 0) + if (!fpo->fpo_map_count) list_add(&fpo->fpo_list, zombies); else list_add(&fpo->fpo_list, &fps->fps_failed_pool_list); @@ -1445,7 +1401,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) { - if (fps->fps_net != NULL) { /* initialized? */ + if (fps->fps_net) { /* initialized? */ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list); } @@ -1458,7 +1414,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_fmr_pool_t *fpo; int rc; - memset(fps, 0, sizeof(kib_fmr_poolset_t)); + memset(fps, 0, sizeof(*fps)); fps->fps_net = net; fps->fps_cpt = cpt; @@ -1469,7 +1425,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, INIT_LIST_HEAD(&fps->fps_failed_pool_list); rc = kiblnd_create_fmr_pool(fps, &fpo); - if (rc == 0) + if (!rc) list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); return rc; @@ -1477,7 +1433,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) { - if (fpo->fpo_map_count != 0) /* still in use */ + if (fpo->fpo_map_count) /* still in use */ return 0; if (fpo->fpo_failed) return 1; @@ -1494,11 +1450,11 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) int rc; rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); - LASSERT(rc == 0); + LASSERT(!rc); - if (status != 0) { + if (status) { rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool); - LASSERT(rc == 0); + LASSERT(!rc); } fmr->fmr_pool = NULL; @@ -1563,11 +1519,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, if (fps->fps_increasing) { spin_unlock(&fps->fps_lock); - CDEBUG(D_NET, - "Another thread is allocating new FMR pool, waiting for her to complete\n"); + CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n"); schedule(); goto again; - } if (time_before(cfs_time_current(), fps->fps_next_retry)) { @@ -1583,7 +1537,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, rc = kiblnd_create_fmr_pool(fps, &fpo); spin_lock(&fps->fps_lock); fps->fps_increasing = 0; - if (rc == 0) { + if (!rc) { fps->fps_version++; list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); } else { @@ -1597,7 +1551,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, static void kiblnd_fini_pool(kib_pool_t *pool) { LASSERT(list_empty(&pool->po_free_list)); - LASSERT(pool->po_allocated == 0); + LASSERT(!pool->po_allocated); CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); } @@ -1606,7 +1560,7 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) { CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); - memset(pool, 0, sizeof(kib_pool_t)); + memset(pool, 0, sizeof(*pool)); INIT_LIST_HEAD(&pool->po_free_list); pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); pool->po_owner = ps; @@ -1621,14 +1575,14 @@ static void kiblnd_destroy_pool_list(struct list_head *head) pool = list_entry(head->next, kib_pool_t, po_list); list_del(&pool->po_list); - LASSERT(pool->po_owner != NULL); + LASSERT(pool->po_owner); pool->po_owner->ps_pool_destroy(pool); } } static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) { - if (ps->ps_net == NULL) /* intialized? */ + if (!ps->ps_net) /* intialized? */ return; spin_lock(&ps->ps_lock); @@ -1637,7 +1591,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) kib_pool_t, po_list); po->po_failed = 1; list_del(&po->po_list); - if (po->po_allocated == 0) + if (!po->po_allocated) list_add(&po->po_list, zombies); else list_add(&po->po_list, &ps->ps_failed_pool_list); @@ -1647,7 +1601,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) static void kiblnd_fini_poolset(kib_poolset_t *ps) { - if (ps->ps_net != NULL) { /* initialized? */ + if (ps->ps_net) { /* initialized? */ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); kiblnd_destroy_pool_list(&ps->ps_pool_list); } @@ -1663,7 +1617,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, kib_pool_t *pool; int rc; - memset(ps, 0, sizeof(kib_poolset_t)); + memset(ps, 0, sizeof(*ps)); ps->ps_cpt = cpt; ps->ps_net = net; @@ -1680,7 +1634,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, INIT_LIST_HEAD(&ps->ps_failed_pool_list); rc = ps->ps_pool_create(ps, size, &pool); - if (rc == 0) + if (!rc) list_add(&pool->po_list, &ps->ps_pool_list); else CERROR("Failed to create the first pool for %s\n", ps->ps_name); @@ -1690,7 +1644,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) { - if (pool->po_allocated != 0) /* still in use */ + if (pool->po_allocated) /* still in use */ return 0; if (pool->po_failed) return 1; @@ -1706,7 +1660,7 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) spin_lock(&ps->ps_lock); - if (ps->ps_node_fini != NULL) + if (ps->ps_node_fini) ps->ps_node_fini(pool, node); LASSERT(pool->po_allocated > 0); @@ -1731,6 +1685,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) { struct list_head *node; kib_pool_t *pool; + unsigned int interval = 1; + unsigned long time_before; + unsigned int trips = 0; int rc; again: @@ -1744,7 +1701,7 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) node = pool->po_free_list.next; list_del(node); - if (ps->ps_node_init != NULL) { + if (ps->ps_node_init) { /* still hold the lock */ ps->ps_node_init(pool, node); } @@ -1756,9 +1713,15 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) if (ps->ps_increasing) { /* another thread is allocating a new pool */ spin_unlock(&ps->ps_lock); - CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n", - ps->ps_name); - schedule(); + trips++; + CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n", + ps->ps_name, interval, trips); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(interval); + if (interval < cfs_time_seconds(1)) + interval *= 2; + goto again; } @@ -1772,12 +1735,14 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) spin_unlock(&ps->ps_lock); CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); - + time_before = cfs_time_current(); rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); + CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete", + cfs_time_current() - time_before); spin_lock(&ps->ps_lock); ps->ps_increasing = 0; - if (rc == 0) { + if (!rc) { list_add_tail(&pool->po_list, &ps->ps_pool_list); } else { ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY); @@ -1794,37 +1759,37 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); int i; - LASSERT(pool->po_allocated == 0); + LASSERT(!pool->po_allocated); - if (tpo->tpo_tx_pages != NULL) { + if (tpo->tpo_tx_pages) { kiblnd_unmap_tx_pool(tpo); kiblnd_free_pages(tpo->tpo_tx_pages); } - if (tpo->tpo_tx_descs == NULL) + if (!tpo->tpo_tx_descs) goto out; for (i = 0; i < pool->po_size; i++) { kib_tx_t *tx = &tpo->tpo_tx_descs[i]; list_del(&tx->tx_list); - if (tx->tx_pages != NULL) + if (tx->tx_pages) LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV * sizeof(*tx->tx_pages)); - if (tx->tx_frags != NULL) + if (tx->tx_frags) LIBCFS_FREE(tx->tx_frags, IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); - if (tx->tx_wrq != NULL) + if (tx->tx_wrq) LIBCFS_FREE(tx->tx_wrq, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); - if (tx->tx_sge != NULL) + if (tx->tx_sge) LIBCFS_FREE(tx->tx_sge, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge)); - if (tx->tx_rd != NULL) + if (tx->tx_rd) LIBCFS_FREE(tx->tx_rd, offsetof(kib_rdma_desc_t, rd_frags[IBLND_MAX_RDMA_FRAGS])); @@ -1834,7 +1799,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) pool->po_size * sizeof(kib_tx_t)); out: kiblnd_fini_pool(pool); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + LIBCFS_FREE(tpo, sizeof(*tpo)); } static int kiblnd_tx_pool_size(int ncpts) @@ -1853,7 +1818,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_tx_pool_t *tpo; LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); - if (tpo == NULL) { + if (!tpo) { CERROR("Failed to allocate TX pool\n"); return -ENOMEM; } @@ -1864,15 +1829,15 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, tpo->tpo_tx_pages = NULL; npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; - if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) { + if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) { CERROR("Can't allocate tx pages: %d\n", npg); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + LIBCFS_FREE(tpo, sizeof(*tpo)); return -ENOMEM; } LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, size * sizeof(kib_tx_t)); - if (tpo->tpo_tx_descs == NULL) { + if (!tpo->tpo_tx_descs) { CERROR("Can't allocate %d tx descriptors\n", size); ps->ps_pool_destroy(pool); return -ENOMEM; @@ -1884,17 +1849,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_tx_t *tx = &tpo->tpo_tx_descs[i]; tx->tx_pool = tpo; - if (ps->ps_net->ibn_fmr_ps != NULL) { + if (ps->ps_net->ibn_fmr_ps) { LIBCFS_CPT_ALLOC(tx->tx_pages, lnet_cpt_table(), ps->ps_cpt, LNET_MAX_IOV * sizeof(*tx->tx_pages)); - if (tx->tx_pages == NULL) + if (!tx->tx_pages) break; } LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); - if (tx->tx_frags == NULL) + if (!tx->tx_frags) break; sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS); @@ -1902,19 +1867,19 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); - if (tx->tx_wrq == NULL) + if (!tx->tx_wrq) break; LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge)); - if (tx->tx_sge == NULL) + if (!tx->tx_sge) break; LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, offsetof(kib_rdma_desc_t, rd_frags[IBLND_MAX_RDMA_FRAGS])); - if (tx->tx_rd == NULL) + if (!tx->tx_rd) break; } @@ -1945,23 +1910,23 @@ static void kiblnd_net_fini_pools(kib_net_t *net) kib_tx_poolset_t *tps; kib_fmr_poolset_t *fps; - if (net->ibn_tx_ps != NULL) { + if (net->ibn_tx_ps) { tps = net->ibn_tx_ps[i]; kiblnd_fini_poolset(&tps->tps_poolset); } - if (net->ibn_fmr_ps != NULL) { + if (net->ibn_fmr_ps) { fps = net->ibn_fmr_ps[i]; kiblnd_fini_fmr_poolset(fps); } } - if (net->ibn_tx_ps != NULL) { + if (net->ibn_tx_ps) { cfs_percpt_free(net->ibn_tx_ps); net->ibn_tx_ps = NULL; } - if (net->ibn_fmr_ps != NULL) { + if (net->ibn_fmr_ps) { cfs_percpt_free(net->ibn_fmr_ps); net->ibn_fmr_ps = NULL; } @@ -1975,8 +1940,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) int i; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (*kiblnd_tunables.kib_map_on_demand == 0 && - net->ibn_dev->ibd_hdev->ibh_nmrs == 1) { + if (!*kiblnd_tunables.kib_map_on_demand) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); goto create_tx_pool; } @@ -1996,7 +1960,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) * TX pool must be created later than FMR, see LU-2268 * for details */ - LASSERT(net->ibn_tx_ps == NULL); + LASSERT(!net->ibn_tx_ps); /* * premapping can fail if ibd_nmr > 1, so we always create @@ -2005,56 +1969,45 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), sizeof(kib_fmr_poolset_t)); - if (net->ibn_fmr_ps == NULL) { + if (!net->ibn_fmr_ps) { CERROR("Failed to allocate FMR pool array\n"); rc = -ENOMEM; goto failed; } for (i = 0; i < ncpts; i++) { - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net, kiblnd_fmr_pool_size(ncpts), kiblnd_fmr_flush_trigger(ncpts)); - if (rc == -ENOSYS && i == 0) /* no FMR */ - break; - - if (rc != 0) { /* a real error */ + if (rc) { CERROR("Can't initialize FMR pool for CPT %d: %d\n", cpt, rc); goto failed; } } - if (i > 0) { + if (i > 0) LASSERT(i == ncpts); - goto create_tx_pool; - } - - cfs_percpt_free(net->ibn_fmr_ps); - net->ibn_fmr_ps = NULL; - - CWARN("Device does not support FMR\n"); - goto failed; create_tx_pool: net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), sizeof(kib_tx_poolset_t)); - if (net->ibn_tx_ps == NULL) { + if (!net->ibn_tx_ps) { CERROR("Failed to allocate tx pool array\n"); rc = -ENOMEM; goto failed; } for (i = 0; i < ncpts; i++) { - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, cpt, net, "TX", kiblnd_tx_pool_size(ncpts), kiblnd_create_tx_pool, kiblnd_destroy_tx_pool, kiblnd_tx_init, NULL); - if (rc != 0) { + if (rc) { CERROR("Can't initialize TX pool for CPT %d: %d\n", cpt, rc); goto failed; @@ -2064,14 +2017,16 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) return 0; failed: kiblnd_net_fini_pools(net); - LASSERT(rc != 0); + LASSERT(rc); return rc; } static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) { - /* It's safe to assume a HCA can handle a page size - * matching that of the native system */ + /* + * It's safe to assume a HCA can handle a page size + * matching that of the native system + */ hdev->ibh_page_shift = PAGE_SHIFT; hdev->ibh_page_size = 1 << PAGE_SHIFT; hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); @@ -2082,44 +2037,28 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) return 0; } - for (hdev->ibh_mr_shift = 0; - hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) { - if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) || - hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1) - return 0; - } - CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size); return -EINVAL; } static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) { - int i; - - if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL) + if (!hdev->ibh_mrs) return; - for (i = 0; i < hdev->ibh_nmrs; i++) { - if (hdev->ibh_mrs[i] == NULL) - break; + ib_dereg_mr(hdev->ibh_mrs); - ib_dereg_mr(hdev->ibh_mrs[i]); - } - - LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs); - hdev->ibh_mrs = NULL; - hdev->ibh_nmrs = 0; + hdev->ibh_mrs = NULL; } void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) { kiblnd_hdev_cleanup_mrs(hdev); - if (hdev->ibh_pd != NULL) + if (hdev->ibh_pd) ib_dealloc_pd(hdev->ibh_pd); - if (hdev->ibh_cmid != NULL) + if (hdev->ibh_cmid) rdma_destroy_id(hdev->ibh_cmid); LIBCFS_FREE(hdev, sizeof(*hdev)); @@ -2132,18 +2071,9 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; rc = kiblnd_hdev_get_attr(hdev); - if (rc != 0) + if (rc) return rc; - LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs)); - if (hdev->ibh_mrs == NULL) { - CERROR("Failed to allocate MRs table\n"); - return -ENOMEM; - } - - hdev->ibh_mrs[0] = NULL; - hdev->ibh_nmrs = 1; - mr = ib_get_dma_mr(hdev->ibh_pd, acflags); if (IS_ERR(mr)) { CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr)); @@ -2151,7 +2081,7 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) return PTR_ERR(mr); } - hdev->ibh_mrs[0] = mr; + hdev->ibh_mrs = mr; return 0; } @@ -2170,12 +2100,13 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) struct sockaddr_in dstaddr; int rc; - if (dev->ibd_hdev == NULL || /* initializing */ - dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */ + if (!dev->ibd_hdev || /* initializing */ + !dev->ibd_hdev->ibh_cmid || /* listener is dead */ *kiblnd_tunables.kib_dev_failover > 1) /* debugging */ return 1; - /* XXX: it's UGLY, but I don't have better way to find + /* + * XXX: it's UGLY, but I don't have better way to find * ib-bonding HCA failover because: * * a. no reliable CM event for HCA failover... @@ -2184,7 +2115,8 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) * We have only two choices at this point: * * a. rdma_bind_addr(), it will conflict with listener cmid - * b. rdma_resolve_addr() to zero addr */ + * b. rdma_resolve_addr() to zero addr + */ cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cmid)) { @@ -2201,7 +2133,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) dstaddr.sin_family = AF_INET; rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr, (struct sockaddr *)&dstaddr, 1); - if (rc != 0 || cmid->device == NULL) { + if (rc || !cmid->device) { CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", dev->ibd_ifname, &dev->ibd_ifip, cmid->device, rc); @@ -2230,24 +2162,27 @@ int kiblnd_dev_failover(kib_dev_t *dev) int i; LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || - dev->ibd_can_failover || - dev->ibd_hdev == NULL); + dev->ibd_can_failover || !dev->ibd_hdev); rc = kiblnd_dev_need_failover(dev); if (rc <= 0) goto out; - if (dev->ibd_hdev != NULL && - dev->ibd_hdev->ibh_cmid != NULL) { - /* XXX it's not good to close old listener at here, + if (dev->ibd_hdev && + dev->ibd_hdev->ibh_cmid) { + /* + * XXX it's not good to close old listener at here, * because we can fail to create new listener. * But we have to close it now, otherwise rdma_bind_addr - * will return EADDRINUSE... How crap! */ + * will return EADDRINUSE... How crap! + */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); cmid = dev->ibd_hdev->ibh_cmid; - /* make next schedule of kiblnd_dev_need_failover() - * return 1 for me */ + /* + * make next schedule of kiblnd_dev_need_failover() + * return 1 for me + */ dev->ibd_hdev->ibh_cmid = NULL; write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -2269,7 +2204,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) /* Bind to failover device or port */ rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr); - if (rc != 0 || cmid->device == NULL) { + if (rc || !cmid->device) { CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", dev->ibd_ifname, &dev->ibd_ifip, cmid->device, rc); @@ -2278,7 +2213,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) } LIBCFS_ALLOC(hdev, sizeof(*hdev)); - if (hdev == NULL) { + if (!hdev) { CERROR("Failed to allocate kib_hca_dev\n"); rdma_destroy_id(cmid); rc = -ENOMEM; @@ -2300,13 +2235,13 @@ int kiblnd_dev_failover(kib_dev_t *dev) hdev->ibh_pd = pd; rc = rdma_listen(cmid, 0); - if (rc != 0) { + if (rc) { CERROR("Can't start new listener: %d\n", rc); goto out; } rc = kiblnd_hdev_setup_mrs(hdev); - if (rc != 0) { + if (rc) { CERROR("Can't setup device: %d\n", rc); goto out; } @@ -2334,10 +2269,10 @@ int kiblnd_dev_failover(kib_dev_t *dev) kiblnd_destroy_pool_list(&zombie_ppo); if (!list_empty(&zombie_fpo)) kiblnd_destroy_fmr_pool_list(&zombie_fpo); - if (hdev != NULL) + if (hdev) kiblnd_hdev_decref(hdev); - if (rc != 0) + if (rc) dev->ibd_failed_failover++; else dev->ibd_failed_failover = 0; @@ -2347,13 +2282,13 @@ int kiblnd_dev_failover(kib_dev_t *dev) void kiblnd_destroy_dev(kib_dev_t *dev) { - LASSERT(dev->ibd_nnets == 0); + LASSERT(!dev->ibd_nnets); LASSERT(list_empty(&dev->ibd_nets)); list_del(&dev->ibd_fail_list); list_del(&dev->ibd_list); - if (dev->ibd_hdev != NULL) + if (dev->ibd_hdev) kiblnd_hdev_decref(dev->ibd_hdev); LIBCFS_FREE(dev, sizeof(*dev)); @@ -2369,7 +2304,7 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) int rc; rc = lnet_ipif_query(ifname, &up, &ip, &netmask); - if (rc != 0) { + if (rc) { CERROR("Can't query IPoIB interface %s: %d\n", ifname, rc); return NULL; @@ -2381,11 +2316,11 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) } LIBCFS_ALLOC(dev, sizeof(*dev)); - if (dev == NULL) + if (!dev) return NULL; netdev = dev_get_by_name(&init_net, ifname); - if (netdev == NULL) { + if (!netdev) { dev->ibd_can_failover = 0; } else { dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER); @@ -2400,14 +2335,13 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) /* initialize the device */ rc = kiblnd_dev_failover(dev); - if (rc != 0) { + if (rc) { CERROR("Can't initialize device: %d\n", rc); LIBCFS_FREE(dev, sizeof(*dev)); return NULL; } - list_add_tail(&dev->ibd_list, - &kiblnd_data.kib_devs); + list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs); return dev; } @@ -2424,18 +2358,22 @@ static void kiblnd_base_shutdown(void) case IBLND_INIT_ALL: case IBLND_INIT_DATA: - LASSERT(kiblnd_data.kib_peers != NULL); + LASSERT(kiblnd_data.kib_peers); for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) LASSERT(list_empty(&kiblnd_data.kib_peers[i])); LASSERT(list_empty(&kiblnd_data.kib_connd_zombies)); LASSERT(list_empty(&kiblnd_data.kib_connd_conns)); + LASSERT(list_empty(&kiblnd_data.kib_reconn_list)); + LASSERT(list_empty(&kiblnd_data.kib_reconn_wait)); /* flag threads to terminate; wake and wait for them to die */ kiblnd_data.kib_shutdown = 1; - /* NB: we really want to stop scheduler threads net by net + /* + * NB: we really want to stop scheduler threads net by net * instead of the whole module, this should be improved - * with dynamic configuration LNet */ + * with dynamic configuration LNet + */ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) wake_up_all(&sched->ibs_waitq); @@ -2443,7 +2381,7 @@ static void kiblnd_base_shutdown(void) wake_up_all(&kiblnd_data.kib_failover_waitq); i = 2; - while (atomic_read(&kiblnd_data.kib_nthreads) != 0) { + while (atomic_read(&kiblnd_data.kib_nthreads)) { i++; /* power of 2 ? */ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, @@ -2459,20 +2397,20 @@ static void kiblnd_base_shutdown(void) break; } - if (kiblnd_data.kib_peers != NULL) { + if (kiblnd_data.kib_peers) { LIBCFS_FREE(kiblnd_data.kib_peers, sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); } - if (kiblnd_data.kib_scheds != NULL) + if (kiblnd_data.kib_scheds) cfs_percpt_free(kiblnd_data.kib_scheds); kiblnd_data.kib_init = IBLND_INIT_NOTHING; module_put(THIS_MODULE); } -void kiblnd_shutdown(lnet_ni_t *ni) +static void kiblnd_shutdown(lnet_ni_t *ni) { kib_net_t *net = ni->ni_data; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; @@ -2481,7 +2419,7 @@ void kiblnd_shutdown(lnet_ni_t *ni) LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); - if (net == NULL) + if (!net) goto out; write_lock_irqsave(g_lock, flags); @@ -2498,7 +2436,7 @@ void kiblnd_shutdown(lnet_ni_t *ni) /* Wait for all peer state to clean up */ i = 2; - while (atomic_read(&net->ibn_npeers) != 0) { + while (atomic_read(&net->ibn_npeers)) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */ "%s: waiting for %d peers to disconnect\n", @@ -2519,10 +2457,9 @@ void kiblnd_shutdown(lnet_ni_t *ni) /* fall through */ case IBLND_INIT_NOTHING: - LASSERT(atomic_read(&net->ibn_nconns) == 0); + LASSERT(!atomic_read(&net->ibn_nconns)); - if (net->ibn_dev != NULL && - net->ibn_dev->ibd_nnets == 0) + if (net->ibn_dev && !net->ibn_dev->ibd_nnets) kiblnd_destroy_dev(net->ibn_dev); break; @@ -2558,7 +2495,7 @@ static int kiblnd_base_startup(void) kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; LIBCFS_ALLOC(kiblnd_data.kib_peers, sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); - if (kiblnd_data.kib_peers == NULL) + if (!kiblnd_data.kib_peers) goto failed; for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]); @@ -2566,12 +2503,15 @@ static int kiblnd_base_startup(void) spin_lock_init(&kiblnd_data.kib_connd_lock); INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns); INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies); + INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list); + INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait); + init_waitqueue_head(&kiblnd_data.kib_connd_waitq); init_waitqueue_head(&kiblnd_data.kib_failover_waitq); kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*sched)); - if (kiblnd_data.kib_scheds == NULL) + if (!kiblnd_data.kib_scheds) goto failed; cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { @@ -2585,8 +2525,10 @@ static int kiblnd_base_startup(void) if (*kiblnd_tunables.kib_nscheds > 0) { nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds); } else { - /* max to half of CPUs, another half is reserved for - * upper layer modules */ + /* + * max to half of CPUs, another half is reserved for + * upper layer modules + */ nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs); } @@ -2601,16 +2543,16 @@ static int kiblnd_base_startup(void) /*****************************************************/ rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn o2iblnd connd: %d\n", rc); goto failed; } - if (*kiblnd_tunables.kib_dev_failover != 0) + if (*kiblnd_tunables.kib_dev_failover) rc = kiblnd_thread_start(kiblnd_failover_thread, NULL, "kiblnd_failover"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn o2iblnd failover thread: %d\n", rc); goto failed; } @@ -2632,7 +2574,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) int nthrs; int i; - if (sched->ibs_nthreads == 0) { + if (!sched->ibs_nthreads) { if (*kiblnd_tunables.kib_nscheds > 0) { nthrs = sched->ibs_nthreads_max; } else { @@ -2655,7 +2597,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name); - if (rc == 0) + if (!rc) continue; CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", @@ -2677,14 +2619,14 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, for (i = 0; i < ncpts; i++) { struct kib_sched_info *sched; - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; sched = kiblnd_data.kib_scheds[cpt]; if (!newdev && sched->ibs_nthreads > 0) continue; rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]); - if (rc != 0) { + if (rc) { CERROR("Failed to start scheduler threads for %s\n", dev->ibd_ifname); return rc; @@ -2702,30 +2644,30 @@ static kib_dev_t *kiblnd_dev_search(char *ifname) colon = strchr(ifname, ':'); list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + if (!strcmp(&dev->ibd_ifname[0], ifname)) return dev; - if (alias != NULL) + if (alias) continue; colon2 = strchr(dev->ibd_ifname, ':'); - if (colon != NULL) + if (colon) *colon = 0; - if (colon2 != NULL) + if (colon2) *colon2 = 0; - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + if (!strcmp(&dev->ibd_ifname[0], ifname)) alias = dev; - if (colon != NULL) + if (colon) *colon = ':'; - if (colon2 != NULL) + if (colon2) *colon2 = ':'; } return alias; } -int kiblnd_startup(lnet_ni_t *ni) +static int kiblnd_startup(lnet_ni_t *ni) { char *ifname; kib_dev_t *ibdev = NULL; @@ -2739,13 +2681,13 @@ int kiblnd_startup(lnet_ni_t *ni) if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { rc = kiblnd_base_startup(); - if (rc != 0) + if (rc) return rc; } LIBCFS_ALLOC(net, sizeof(*net)); ni->ni_data = net; - if (net == NULL) + if (!net) goto net_failed; ktime_get_real_ts64(&tv); @@ -2757,11 +2699,11 @@ int kiblnd_startup(lnet_ni_t *ni) ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits; ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits; - if (ni->ni_interfaces[0] != NULL) { + if (ni->ni_interfaces[0]) { /* Use the IPoIB interface specified in 'networks=' */ CLASSERT(LNET_MAX_INTERFACES > 1); - if (ni->ni_interfaces[1] != NULL) { + if (ni->ni_interfaces[1]) { CERROR("Multiple interfaces not supported\n"); goto failed; } @@ -2778,12 +2720,12 @@ int kiblnd_startup(lnet_ni_t *ni) ibdev = kiblnd_dev_search(ifname); - newdev = ibdev == NULL; + newdev = !ibdev; /* hmm...create kib_dev even for alias */ - if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0) + if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname)) ibdev = kiblnd_create_dev(ifname); - if (ibdev == NULL) + if (!ibdev) goto failed; net->ibn_dev = ibdev; @@ -2791,11 +2733,11 @@ int kiblnd_startup(lnet_ni_t *ni) rc = kiblnd_dev_start_threads(ibdev, newdev, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) + if (rc) goto failed; rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) { + if (rc) { CERROR("Failed to initialize NI pools: %d\n", rc); goto failed; } @@ -2810,7 +2752,7 @@ int kiblnd_startup(lnet_ni_t *ni) return 0; failed: - if (net->ibn_dev == NULL && ibdev != NULL) + if (!net->ibn_dev && ibdev) kiblnd_destroy_dev(ibdev); net_failed: @@ -2820,25 +2762,35 @@ net_failed: return -ENETDOWN; } -static void __exit kiblnd_module_fini(void) +static lnd_t the_o2iblnd = { + .lnd_type = O2IBLND, + .lnd_startup = kiblnd_startup, + .lnd_shutdown = kiblnd_shutdown, + .lnd_ctl = kiblnd_ctl, + .lnd_query = kiblnd_query, + .lnd_send = kiblnd_send, + .lnd_recv = kiblnd_recv, +}; + +static void __exit ko2iblnd_exit(void) { lnet_unregister_lnd(&the_o2iblnd); } -static int __init kiblnd_module_init(void) +static int __init ko2iblnd_init(void) { int rc; CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, - ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, - ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); rc = kiblnd_tunables_init(); - if (rc != 0) + if (rc) return rc; lnet_register_lnd(&the_o2iblnd); @@ -2847,8 +2799,9 @@ static int __init kiblnd_module_init(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00"); +MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver"); +MODULE_VERSION("2.7.0"); MODULE_LICENSE("GPL"); -module_init(kiblnd_module_init); -module_exit(kiblnd_module_fini); +module_init(ko2iblnd_init); +module_exit(ko2iblnd_exit); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index 025faa9f8..bfcbdd167 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -60,17 +60,17 @@ #include #include +#include +#include +#include +#include + #define DEBUG_SUBSYSTEM S_LND #include "../../../include/linux/libcfs/libcfs.h" #include "../../../include/linux/lnet/lnet.h" #include "../../../include/linux/lnet/lib-lnet.h" -#include -#include -#include -#include - #define IBLND_PEER_HASH_SIZE 101 /* # peer lists */ /* # scheduler loops before reschedule */ #define IBLND_RESCHED 100 @@ -146,9 +146,9 @@ kiblnd_concurrent_sends_v1(void) #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1) #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0) -#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */ +#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */ #define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */ -#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \ +#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \ *kiblnd_tunables.kib_map_on_demand : \ IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */ #define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \ @@ -162,18 +162,17 @@ kiblnd_concurrent_sends_v1(void) #define IBLND_FMR_POOL 256 #define IBLND_FMR_POOL_FLUSH 192 -/* TX messages (shared by all connections) */ -#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx) - -/* RX messages (per connection) */ -#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v)) -#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE) -#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE) +#define IBLND_RX_MSGS(c) \ + ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version)) +#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE) +#define IBLND_RX_MSG_PAGES(c) \ + ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE) /* WRs and CQEs (per connection) */ -#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v) -#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v)) -#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v)) +#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c) +#define IBLND_SEND_WRS(c) \ + ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version)) +#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c)) struct kib_hca_dev; @@ -209,8 +208,7 @@ typedef struct kib_hca_dev { __u64 ibh_page_mask; /* page mask of current HCA */ int ibh_mr_shift; /* bits shift of max MR size */ __u64 ibh_mr_size; /* size of MR */ - int ibh_nmrs; /* # of global MRs */ - struct ib_mr **ibh_mrs; /* global MR */ + struct ib_mr *ibh_mrs; /* global MR */ struct ib_pd *ibh_pd; /* PD */ kib_dev_t *ibh_dev; /* owner */ atomic_t ibh_ref; /* refcount */ @@ -350,6 +348,16 @@ typedef struct { void *kib_connd; /* the connd task (serialisation assertions) */ struct list_head kib_connd_conns; /* connections to setup/teardown */ struct list_head kib_connd_zombies; /* connections with zero refcount */ + /* connections to reconnect */ + struct list_head kib_reconn_list; + /* peers wait for reconnection */ + struct list_head kib_reconn_wait; + /** + * The second that peers are pulled out from \a kib_reconn_wait + * for reconnection. + */ + time64_t kib_reconn_sec; + wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */ spinlock_t kib_connd_lock; /* serialise */ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ @@ -465,10 +473,10 @@ typedef struct { #define IBLND_REJECT_FATAL 3 /* Anything else */ #define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ #define IBLND_REJECT_CONN_STALE 5 /* stale peer */ -#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match */ - /* mine */ -#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't */ - /* match mine */ +/* peer's rdma frags doesn't match mine */ +#define IBLND_REJECT_RDMA_FRAGS 6 +/* peer's msg queue size doesn't match mine */ +#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /***********************************************************************/ @@ -527,6 +535,8 @@ typedef struct kib_conn { struct list_head ibc_list; /* stash on peer's conn list */ struct list_head ibc_sched_list; /* schedule for attention */ __u16 ibc_version; /* version of connection */ + /* reconnect later */ + __u16 ibc_reconnect:1; __u64 ibc_incarnation; /* which instance of the peer */ atomic_t ibc_refcount; /* # users */ int ibc_state; /* what's happening */ @@ -536,6 +546,10 @@ typedef struct kib_conn { int ibc_outstanding_credits; /* # credits to return */ int ibc_reserved_credits; /* # ACK/DONE msg credits */ int ibc_comms_error; /* set on comms error */ + /* connections queue depth */ + __u16 ibc_queue_depth; + /* connections max frags */ + __u16 ibc_max_frags; unsigned int ibc_nrx:16; /* receive buffers owned */ unsigned int ibc_scheduled:1; /* scheduled for attention */ unsigned int ibc_ready:1; /* CQ callback fired */ @@ -572,18 +586,29 @@ typedef struct kib_peer { struct list_head ibp_list; /* stash on global peer list */ lnet_nid_t ibp_nid; /* who's on the other end(s) */ lnet_ni_t *ibp_ni; /* LNet interface */ - atomic_t ibp_refcount; /* # users */ struct list_head ibp_conns; /* all active connections */ struct list_head ibp_tx_queue; /* msgs waiting for a conn */ - __u16 ibp_version; /* version of peer */ __u64 ibp_incarnation; /* incarnation of peer */ - int ibp_connecting; /* current active connection attempts - */ - int ibp_accepting; /* current passive connection attempts - */ - int ibp_error; /* errno on closing this peer */ - unsigned long ibp_last_alive; /* when (in jiffies) I was last alive - */ + /* when (in jiffies) I was last alive */ + unsigned long ibp_last_alive; + /* # users */ + atomic_t ibp_refcount; + /* version of peer */ + __u16 ibp_version; + /* current passive connection attempts */ + unsigned short ibp_accepting; + /* current active connection attempts */ + unsigned short ibp_connecting; + /* reconnect this peer later */ + unsigned short ibp_reconnecting:1; + /* # consecutive reconnection attempts to this peer */ + unsigned int ibp_reconnected; + /* errno on closing this peer */ + int ibp_error; + /* max map_on_demand */ + __u16 ibp_max_frags; + /* max_peer_credits */ + __u16 ibp_queue_depth; } kib_peer_t; extern kib_data_t kiblnd_data; @@ -611,7 +636,7 @@ kiblnd_dev_can_failover(kib_dev_t *dev) if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */ return 0; - if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */ + if (!*kiblnd_tunables.kib_dev_failover) /* disabled */ return 0; if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */ @@ -661,6 +686,20 @@ do { \ kiblnd_destroy_peer(peer); \ } while (0) +static inline bool +kiblnd_peer_connecting(kib_peer_t *peer) +{ + return peer->ibp_connecting || + peer->ibp_reconnecting || + peer->ibp_accepting; +} + +static inline bool +kiblnd_peer_idle(kib_peer_t *peer) +{ + return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns); +} + static inline struct list_head * kiblnd_nid2peerlist(lnet_nid_t nid) { @@ -691,7 +730,8 @@ kiblnd_send_keepalive(kib_conn_t *conn) { return (*kiblnd_tunables.kib_keepalive > 0) && cfs_time_after(jiffies, conn->ibc_last_send + - *kiblnd_tunables.kib_keepalive*HZ); + msecs_to_jiffies(*kiblnd_tunables.kib_keepalive * + MSEC_PER_SEC)); } static inline int @@ -710,16 +750,16 @@ kiblnd_need_noop(kib_conn_t *conn) /* No tx to piggyback NOOP onto or no credit to send a tx */ return (list_empty(&conn->ibc_tx_queue) || - conn->ibc_credits == 0); + !conn->ibc_credits); } if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */ !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */ - conn->ibc_credits == 0) /* no credit */ + !conn->ibc_credits) /* no credit */ return 0; if (conn->ibc_credits == 1 && /* last credit reserved for */ - conn->ibc_outstanding_credits == 0) /* giving back credits */ + !conn->ibc_outstanding_credits) /* giving back credits */ return 0; /* No tx to piggyback NOOP onto or no credit to send a tx */ @@ -755,18 +795,19 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q) /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */ /* lowest bits of the work request id to stash the work item type. */ -#define IBLND_WID_TX 0 -#define IBLND_WID_RDMA 1 -#define IBLND_WID_RX 2 -#define IBLND_WID_MASK 3UL +#define IBLND_WID_INVAL 0 +#define IBLND_WID_TX 1 +#define IBLND_WID_RX 2 +#define IBLND_WID_RDMA 3 +#define IBLND_WID_MASK 3UL static inline __u64 kiblnd_ptr2wreqid(void *ptr, int type) { unsigned long lptr = (unsigned long)ptr; - LASSERT((lptr & IBLND_WID_MASK) == 0); - LASSERT((type & ~IBLND_WID_MASK) == 0); + LASSERT(!(lptr & IBLND_WID_MASK)); + LASSERT(!(type & ~IBLND_WID_MASK)); return (__u64)(lptr | type); } @@ -907,9 +948,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, - kib_rdma_desc_t *rd); -struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, - __u64 addr, __u64 size); + kib_rdma_desc_t *rd, + int negotiated_nfrags); void kiblnd_map_rx_descs(kib_conn_t *conn); void kiblnd_unmap_rx_descs(kib_conn_t *conn); void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node); @@ -919,11 +959,6 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, __u64 iov, kib_fmr_t *fmr); void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status); -int kiblnd_startup(lnet_ni_t *ni); -void kiblnd_shutdown(lnet_ni_t *ni); -int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg); -void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when); - int kiblnd_tunables_init(void); void kiblnd_tunables_fini(void); @@ -933,7 +968,6 @@ int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name); int kiblnd_failover_thread(void *arg); int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages); -void kiblnd_free_pages(kib_pages_t *p); int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event); @@ -942,39 +976,30 @@ int kiblnd_translate_mtu(int value); int kiblnd_dev_failover(kib_dev_t *dev); int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid); void kiblnd_destroy_peer(kib_peer_t *peer); +bool kiblnd_reconnect_peer(kib_peer_t *peer); void kiblnd_destroy_dev(kib_dev_t *dev); void kiblnd_unlink_peer_locked(kib_peer_t *peer); -void kiblnd_peer_alive(kib_peer_t *peer); kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid); -void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error); int kiblnd_close_stale_conns_locked(kib_peer_t *peer, - int version, __u64 incarnation); + int version, __u64 incarnation); int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why); -void kiblnd_connreq_done(kib_conn_t *conn, int status); kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, - int state, int version); -void kiblnd_destroy_conn(kib_conn_t *conn); + int state, int version); +void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn); void kiblnd_close_conn(kib_conn_t *conn, int error); void kiblnd_close_conn_locked(kib_conn_t *conn, int error); -int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, - int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie); - void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid); -void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn); -void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn); -void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob); void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, - int status); -void kiblnd_check_sends (kib_conn_t *conn); + int status); void kiblnd_qp_event(struct ib_event *event, void *arg); void kiblnd_cq_event(struct ib_event *event, void *arg); void kiblnd_cq_completion(struct ib_cq *cq, void *arg); void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, - int credits, lnet_nid_t dstnid, __u64 dststamp); + int credits, lnet_nid_t dstnid, __u64 dststamp); int kiblnd_unpack_msg(kib_msg_t *msg, int nob); int kiblnd_post_rx(kib_rx_t *rx, int credit); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index c7b9ccb13..2323e8d3a 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -40,6 +40,15 @@ #include "o2iblnd.h" +static void kiblnd_peer_alive(kib_peer_t *peer); +static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error); +static void kiblnd_check_sends(kib_conn_t *conn); +static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, + int type, int body_nob); +static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie); +static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn); +static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn); static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx); static void @@ -50,12 +59,12 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) int rc; int i; - LASSERT(net != NULL); + LASSERT(net); LASSERT(!in_interrupt()); LASSERT(!tx->tx_queued); /* mustn't be queued for sending */ - LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */ + LASSERT(!tx->tx_sending); /* mustn't be awaiting sent callback */ LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ - LASSERT(tx->tx_pool != NULL); + LASSERT(tx->tx_pool); kiblnd_unmap_tx(ni, tx); @@ -64,7 +73,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; rc = tx->tx_status; - if (tx->tx_conn != NULL) { + if (tx->tx_conn) { LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni); kiblnd_conn_decref(tx->tx_conn); @@ -78,7 +87,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) /* delay finalize until my descs have been freed */ for (i = 0; i < 2; i++) { - if (lntmsg[i] == NULL) + if (!lntmsg[i]) continue; lnet_finalize(ni, lntmsg[i], rc); @@ -111,19 +120,19 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; node = kiblnd_pool_alloc_node(&tps->tps_poolset); - if (node == NULL) + if (!node) return NULL; - tx = container_of(node, kib_tx_t, tx_list); + tx = list_entry(node, kib_tx_t, tx_list); - LASSERT(tx->tx_nwrq == 0); + LASSERT(!tx->tx_nwrq); LASSERT(!tx->tx_queued); - LASSERT(tx->tx_sending == 0); + LASSERT(!tx->tx_sending); LASSERT(!tx->tx_waiting); - LASSERT(tx->tx_status == 0); - LASSERT(tx->tx_conn == NULL); - LASSERT(tx->tx_lntmsg[0] == NULL); - LASSERT(tx->tx_lntmsg[1] == NULL); - LASSERT(tx->tx_nfrags == 0); + LASSERT(!tx->tx_status); + LASSERT(!tx->tx_conn); + LASSERT(!tx->tx_lntmsg[0]); + LASSERT(!tx->tx_lntmsg[1]); + LASSERT(!tx->tx_nfrags); return tx; } @@ -149,17 +158,15 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) kib_conn_t *conn = rx->rx_conn; kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; struct ib_recv_wr *bad_wrq = NULL; - struct ib_mr *mr; + struct ib_mr *mr = conn->ibc_hdev->ibh_mrs; int rc; - LASSERT(net != NULL); + LASSERT(net); LASSERT(!in_interrupt()); LASSERT(credit == IBLND_POSTRX_NO_CREDIT || credit == IBLND_POSTRX_PEER_CREDIT || credit == IBLND_POSTRX_RSRVD_CREDIT); - - mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); - LASSERT(mr != NULL); + LASSERT(mr); rx->rx_sge.lkey = mr->lkey; rx->rx_sge.addr = rx->rx_msgaddr; @@ -185,7 +192,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) */ kiblnd_conn_addref(conn); rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); - if (unlikely(rc != 0)) { + if (unlikely(rc)) { CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); rx->rx_nob = 0; @@ -194,7 +201,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ goto out; - if (unlikely(rc != 0)) { + if (unlikely(rc)) { kiblnd_close_conn(conn, rc); kiblnd_drop_rx(rx); /* No more posts for this rx */ goto out; @@ -225,7 +232,7 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); LASSERT(!tx->tx_queued); - LASSERT(tx->tx_sending != 0 || tx->tx_waiting); + LASSERT(tx->tx_sending || tx->tx_waiting); if (tx->tx_cookie != cookie) continue; @@ -251,7 +258,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) spin_lock(&conn->ibc_lock); tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); - if (tx == NULL) { + if (!tx) { spin_unlock(&conn->ibc_lock); CWARN("Unmatched completion type %x cookie %#llx from %s\n", @@ -260,7 +267,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) return; } - if (tx->tx_status == 0) { /* success so far */ + if (!tx->tx_status) { /* success so far */ if (status < 0) /* failed? */ tx->tx_status = status; else if (txtype == IBLND_MSG_GET_REQ) @@ -269,7 +276,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) tx->tx_waiting = 0; - idle = !tx->tx_queued && (tx->tx_sending == 0); + idle = !tx->tx_queued && !tx->tx_sending; if (idle) list_del(&tx->tx_list); @@ -285,7 +292,7 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) lnet_ni_t *ni = conn->ibc_peer->ibp_ni; kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't get tx for completion %x for %s\n", type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); return; @@ -316,19 +323,18 @@ kiblnd_handle_rx(kib_rx_t *rx) msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - if (credits != 0) { + if (credits) { /* Have I received credits that will let me send? */ spin_lock(&conn->ibc_lock); if (conn->ibc_credits + credits > - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { + conn->ibc_queue_depth) { rc2 = conn->ibc_credits; spin_unlock(&conn->ibc_lock); CERROR("Bad credits from %s: %d + %d > %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc2, credits, - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); + rc2, credits, conn->ibc_queue_depth); kiblnd_close_conn(conn, -EPROTO); kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); @@ -360,7 +366,7 @@ kiblnd_handle_rx(kib_rx_t *rx) break; } - if (credits != 0) /* credit already posted */ + if (credits) /* credit already posted */ post_credit = IBLND_POSTRX_NO_CREDIT; else /* a keepalive NOOP */ post_credit = IBLND_POSTRX_PEER_CREDIT; @@ -396,12 +402,12 @@ kiblnd_handle_rx(kib_rx_t *rx) spin_lock(&conn->ibc_lock); tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, - msg->ibm_u.putack.ibpam_src_cookie); - if (tx != NULL) + msg->ibm_u.putack.ibpam_src_cookie); + if (tx) list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); - if (tx == NULL) { + if (!tx) { CERROR("Unmatched PUT_ACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); rc = -EPROTO; @@ -409,10 +415,11 @@ kiblnd_handle_rx(kib_rx_t *rx) } LASSERT(tx->tx_waiting); - /* CAVEAT EMPTOR: I could be racing with tx_complete, but... + /* + * CAVEAT EMPTOR: I could be racing with tx_complete, but... * (a) I can overwrite tx_msg since my peer has received it! - * (b) tx_waiting set tells tx_complete() it's not done. */ - + * (b) tx_waiting set tells tx_complete() it's not done. + */ tx->tx_nwrq = 0; /* overwrite PUT_REQ */ rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE, @@ -469,7 +476,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) int rc; int err = -EIO; - LASSERT(net != NULL); + LASSERT(net); LASSERT(rx->rx_nob < 0); /* was posted */ rx->rx_nob = 0; /* isn't now */ @@ -486,9 +493,9 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) rx->rx_nob = nob; rc = kiblnd_unpack_msg(msg, rx->rx_nob); - if (rc != 0) { + if (rc) { CERROR("Error %d unpacking rx from %s\n", - rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); goto failed; } @@ -497,7 +504,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) msg->ibm_srcstamp != conn->ibc_incarnation || msg->ibm_dststamp != net->ibn_incarnation) { CERROR("Stale rx from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + libcfs_nid2str(conn->ibc_peer->ibp_nid)); err = -ESTALE; goto failed; } @@ -537,7 +544,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr) if (is_vmalloc_addr((void *)vaddr)) { page = vmalloc_to_page((void *)vaddr); - LASSERT(page != NULL); + LASSERT(page); return page; } #ifdef CONFIG_HIGHMEM @@ -549,7 +556,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr) } #endif page = virt_to_page(vaddr); - LASSERT(page != NULL); + LASSERT(page); return page; } @@ -565,8 +572,8 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) int rc; int i; - LASSERT(tx->tx_pool != NULL); - LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); + LASSERT(tx->tx_pool); + LASSERT(tx->tx_pool->tpo_pool.po_owner); hdev = tx->tx_pool->tpo_hdev; @@ -582,13 +589,15 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) fps = net->ibn_fmr_ps[cpt]; rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr); - if (rc != 0) { + if (rc) { CERROR("Can't map %d pages: %d\n", npages, rc); return rc; } - /* If rd is not tx_rd, it's going to get sent to a peer, who will need - * the rkey */ + /* + * If rd is not tx_rd, it's going to get sent to a peer, who will need + * the rkey + */ rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey : tx->fmr.fmr_pfmr->fmr->lkey; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; @@ -602,14 +611,14 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) { kib_net_t *net = ni->ni_data; - LASSERT(net != NULL); + LASSERT(net); if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) { kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); tx->fmr.fmr_pfmr = NULL; } - if (tx->tx_nfrags != 0) { + if (tx->tx_nfrags) { kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); tx->tx_nfrags = 0; @@ -625,8 +634,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob; int i; - /* If rd is not tx_rd, it's going to get sent to a peer and I'm the - * RDMA sink */ + /* + * If rd is not tx_rd, it's going to get sent to a peer and I'm the + * RDMA sink + */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; tx->tx_nfrags = nfrags; @@ -641,15 +652,15 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, nob += rd->rd_frags[i].rf_nob; } - /* looking for pre-mapping MR */ - mr = kiblnd_find_rd_dma_mr(hdev, rd); - if (mr != NULL) { + mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ? + tx->tx_conn->ibc_max_frags : -1); + if (mr) { /* found pre-mapping MR */ rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; return 0; } - if (net->ibn_fmr_ps != NULL) + if (net->ibn_fmr_ps) return kiblnd_fmr_map_tx(net, tx, rd, nob); return -EINVAL; @@ -668,7 +679,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, LASSERT(nob > 0); LASSERT(niov > 0); - LASSERT(net != NULL); + LASSERT(net); while (offset >= iov->iov_len) { offset -= iov->iov_len; @@ -684,7 +695,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, vaddr = ((unsigned long)iov->iov_base) + offset; page_offset = vaddr & (PAGE_SIZE - 1); page = kiblnd_kvaddr_to_page(vaddr); - if (page == NULL) { + if (!page) { CERROR("Can't find page\n"); return -EFAULT; } @@ -710,7 +721,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, static int kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, - int nkiov, lnet_kiov_t *kiov, int offset, int nob) + int nkiov, lnet_kiov_t *kiov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct scatterlist *sg; @@ -720,7 +731,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, LASSERT(nob > 0); LASSERT(nkiov > 0); - LASSERT(net != NULL); + LASSERT(net); while (offset >= kiov->kiov_len) { offset -= kiov->kiov_len; @@ -750,26 +761,24 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, static int kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) - __releases(conn->ibc_lock) - __acquires(conn->ibc_lock) + __must_hold(&conn->ibc_lock) { kib_msg_t *msg = tx->tx_msg; kib_peer_t *peer = conn->ibc_peer; int ver = conn->ibc_version; int rc; int done; - struct ib_send_wr *bad_wrq; LASSERT(tx->tx_queued); /* We rely on this for QP sizing */ LASSERT(tx->tx_nwrq > 0); - LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); + LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags); - LASSERT(credit == 0 || credit == 1); + LASSERT(!credit || credit == 1); LASSERT(conn->ibc_outstanding_credits >= 0); - LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth); LASSERT(conn->ibc_credits >= 0); - LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { /* tx completions outstanding... */ @@ -778,13 +787,13 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) return -EAGAIN; } - if (credit != 0 && conn->ibc_credits == 0) { /* no credits */ + if (credit && !conn->ibc_credits) { /* no credits */ CDEBUG(D_NET, "%s: no credits\n", libcfs_nid2str(peer->ibp_nid)); return -EAGAIN; } - if (credit != 0 && !IBLND_OOB_CAPABLE(ver) && + if (credit && !IBLND_OOB_CAPABLE(ver) && conn->ibc_credits == 1 && /* last credit reserved */ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ CDEBUG(D_NET, "%s: not using last credit\n", @@ -800,9 +809,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) (!kiblnd_need_noop(conn) || /* redundant NOOP */ (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */ conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) { - /* OK to drop when posted enough NOOPs, since + /* + * OK to drop when posted enough NOOPs, since * kiblnd_check_sends will queue NOOP again when - * posted NOOPs complete */ + * posted NOOPs complete + */ spin_unlock(&conn->ibc_lock); kiblnd_tx_done(peer->ibp_ni, tx); spin_lock(&conn->ibc_lock); @@ -821,12 +832,14 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) if (msg->ibm_type == IBLND_MSG_NOOP) conn->ibc_noops_posted++; - /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA + /* + * CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA * PUT. If so, it was first queued here as a PUT_REQ, sent and * stashed on ibc_active_txs, matched by an incoming PUT_ACK, * and then re-queued here. It's (just) possible that * tx_sending is non-zero if we've not done the tx_complete() - * from the first send; hence the ++ rather than = below. */ + * from the first send; hence the ++ rather than = below. + */ tx->tx_sending++; list_add(&tx->tx_list, &conn->ibc_active_txs); @@ -838,16 +851,25 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) /* close_conn will launch failover */ rc = -ENETDOWN; } else { - rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq); + struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr; + + LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), + "bad wr_id %llx, opc %d, flags %d, peer: %s\n", + wrq->wr_id, wrq->opcode, wrq->send_flags, + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + wrq = NULL; + rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq); } conn->ibc_last_send = jiffies; - if (rc == 0) + if (!rc) return 0; - /* NB credits are transferred in the actual - * message, which can only be the last work item */ + /* + * NB credits are transferred in the actual + * message, which can only be the last work item + */ conn->ibc_credits += credit; conn->ibc_outstanding_credits += msg->ibm_credits; conn->ibc_nsends_posted--; @@ -858,7 +880,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) tx->tx_waiting = 0; tx->tx_sending--; - done = (tx->tx_sending == 0); + done = !tx->tx_sending; if (done) list_del(&tx->tx_list); @@ -881,7 +903,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) return -EIO; } -void +static void kiblnd_check_sends(kib_conn_t *conn) { int ver = conn->ibc_version; @@ -899,13 +921,13 @@ kiblnd_check_sends(kib_conn_t *conn) LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); LASSERT(!IBLND_OOB_CAPABLE(ver) || - conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); + conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); LASSERT(conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && !list_empty(&conn->ibc_tx_queue_rsrvd)) { tx = list_entry(conn->ibc_tx_queue_rsrvd.next, - kib_tx_t, tx_list); + kib_tx_t, tx_list); list_del(&tx->tx_list); list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; @@ -915,23 +937,21 @@ kiblnd_check_sends(kib_conn_t *conn) spin_unlock(&conn->ibc_lock); tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx != NULL) + if (tx) kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); spin_lock(&conn->ibc_lock); - if (tx != NULL) + if (tx) kiblnd_queue_tx_locked(tx, conn); } - kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */ - for (;;) { int credit; if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; tx = list_entry(conn->ibc_tx_queue_nocred.next, - kib_tx_t, tx_list); + kib_tx_t, tx_list); } else if (!list_empty(&conn->ibc_tx_noops)) { LASSERT(!IBLND_OOB_CAPABLE(ver)); credit = 1; @@ -940,17 +960,16 @@ kiblnd_check_sends(kib_conn_t *conn) } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; tx = list_entry(conn->ibc_tx_queue.next, - kib_tx_t, tx_list); - } else + kib_tx_t, tx_list); + } else { break; + } - if (kiblnd_post_tx_locked(conn, tx, credit) != 0) + if (kiblnd_post_tx_locked(conn, tx, credit)) break; } spin_unlock(&conn->ibc_lock); - - kiblnd_conn_decref(conn); /* ...until here */ } static void @@ -976,9 +995,10 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) spin_lock(&conn->ibc_lock); - /* I could be racing with rdma completion. Whoever makes 'tx' idle - * gets to free it, which also drops its ref on 'conn'. */ - + /* + * I could be racing with rdma completion. Whoever makes 'tx' idle + * gets to free it, which also drops its ref on 'conn'. + */ tx->tx_sending--; conn->ibc_nsends_posted--; if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP) @@ -989,7 +1009,7 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) tx->tx_status = -EIO; } - idle = (tx->tx_sending == 0) && /* This is the final callback */ + idle = !tx->tx_sending && /* This is the final callback */ !tx->tx_waiting && /* Not waiting for peer */ !tx->tx_queued; /* Not re-queued (PUT_DONE) */ if (idle) @@ -1007,24 +1027,22 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) kiblnd_conn_decref(conn); /* ...until here */ } -void +static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; int nob = offsetof(kib_msg_t, ibm_u) + body_nob; - struct ib_mr *mr; + struct ib_mr *mr = hdev->ibh_mrs; LASSERT(tx->tx_nwrq >= 0); LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); LASSERT(nob <= IBLND_MSG_SIZE); + LASSERT(mr); kiblnd_init_msg(tx->tx_msg, type, body_nob); - mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); - LASSERT(mr != NULL); - sge->lkey = mr->lkey; sge->addr = tx->tx_msgaddr; sge->length = nob; @@ -1041,25 +1059,23 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) tx->tx_nwrq++; } -int +static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, - int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { kib_msg_t *ibmsg = tx->tx_msg; kib_rdma_desc_t *srcrd = tx->tx_rd; struct ib_sge *sge = &tx->tx_sge[0]; struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next; int rc = resid; - int srcidx; - int dstidx; + int srcidx = 0; + int dstidx = 0; int wrknob; LASSERT(!in_interrupt()); - LASSERT(tx->tx_nwrq == 0); + LASSERT(!tx->tx_nwrq); LASSERT(type == IBLND_MSG_GET_DONE || - type == IBLND_MSG_PUT_DONE); - - srcidx = dstidx = 0; + type == IBLND_MSG_PUT_DONE); while (resid > 0) { if (srcidx >= srcrd->rd_nfrags) { @@ -1074,10 +1090,10 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, break; } - if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) { - CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n", + if (tx->tx_nwrq >= conn->ibc_max_frags) { + CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), - IBLND_RDMA_FRAGS(conn->ibc_version), + conn->ibc_max_frags, srcidx, srcrd->rd_nfrags, dstidx, dstrd->rd_nfrags); rc = -EMSGSIZE; @@ -1127,7 +1143,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, return rc; } -void +static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { struct list_head *q; @@ -1137,9 +1153,11 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); tx->tx_queued = 1; - tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); + tx->tx_deadline = jiffies + + msecs_to_jiffies(*kiblnd_tunables.kib_timeout * + MSEC_PER_SEC); - if (tx->tx_conn == NULL) { + if (!tx->tx_conn) { kiblnd_conn_addref(conn); tx->tx_conn = conn; LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); @@ -1180,7 +1198,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) list_add_tail(&tx->tx_list, q); } -void +static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) { spin_lock(&conn->ibc_lock); @@ -1200,19 +1218,19 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, /* allow the port to be reused */ rc = rdma_set_reuseaddr(cmid, 1); - if (rc != 0) { + if (rc) { CERROR("Unable to set reuse on cmid: %d\n", rc); return rc; } /* look for a free privileged port */ - for (port = PROT_SOCK-1; port > 0; port--) { + for (port = PROT_SOCK - 1; port > 0; port--) { srcaddr->sin_port = htons(port); rc = rdma_resolve_addr(cmid, (struct sockaddr *)srcaddr, (struct sockaddr *)dstaddr, timeout_ms); - if (rc == 0) { + if (!rc) { CDEBUG(D_NET, "bound to port %hu\n", port); return 0; } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) { @@ -1237,8 +1255,9 @@ kiblnd_connect_peer(kib_peer_t *peer) struct sockaddr_in dstaddr; int rc; - LASSERT(net != NULL); + LASSERT(net); LASSERT(peer->ibp_connecting > 0); + LASSERT(!peer->ibp_reconnecting); cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, IB_QPT_RC); @@ -1271,14 +1290,14 @@ kiblnd_connect_peer(kib_peer_t *peer) (struct sockaddr *)&dstaddr, *kiblnd_tunables.kib_timeout * 1000); } - if (rc != 0) { + if (rc) { /* Can't initiate address resolution: */ CERROR("Can't resolve addr for %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); goto failed2; } - LASSERT(cmid->device != NULL); + LASSERT(cmid->device); CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, &dev->ibd_ifip, cmid->device->name); @@ -1286,12 +1305,64 @@ kiblnd_connect_peer(kib_peer_t *peer) return; failed2: + kiblnd_peer_connect_failed(peer, 1, rc); kiblnd_peer_decref(peer); /* cmid's ref */ rdma_destroy_id(cmid); + return; failed: kiblnd_peer_connect_failed(peer, 1, rc); } +bool +kiblnd_reconnect_peer(kib_peer_t *peer) +{ + rwlock_t *glock = &kiblnd_data.kib_global_lock; + char *reason = NULL; + struct list_head txs; + unsigned long flags; + + INIT_LIST_HEAD(&txs); + + write_lock_irqsave(glock, flags); + if (!peer->ibp_reconnecting) { + if (peer->ibp_accepting) + reason = "accepting"; + else if (peer->ibp_connecting) + reason = "connecting"; + else if (!list_empty(&peer->ibp_conns)) + reason = "connected"; + else /* connected then closed */ + reason = "closed"; + + goto no_reconnect; + } + + LASSERT(!peer->ibp_accepting && !peer->ibp_connecting && + list_empty(&peer->ibp_conns)); + peer->ibp_reconnecting = 0; + + if (!kiblnd_peer_active(peer)) { + list_splice_init(&peer->ibp_tx_queue, &txs); + reason = "unlinked"; + goto no_reconnect; + } + + peer->ibp_connecting++; + peer->ibp_reconnected++; + write_unlock_irqrestore(glock, flags); + + kiblnd_connect_peer(peer); + return true; + +no_reconnect: + write_unlock_irqrestore(glock, flags); + + CWARN("Abort reconnection of %s: %s\n", + libcfs_nid2str(peer->ibp_nid), reason); + kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED); + return false; +} + void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) { @@ -1302,25 +1373,28 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) unsigned long flags; int rc; - /* If I get here, I've committed to send, so I complete the tx with - * failure on any problems */ - - LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ - LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ + /* + * If I get here, I've committed to send, so I complete the tx with + * failure on any problems + */ + LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */ + LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */ - /* First time, just use a read lock since I expect to find my peer - * connected */ + /* + * First time, just use a read lock since I expect to find my peer + * connected + */ read_lock_irqsave(g_lock, flags); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL && !list_empty(&peer->ibp_conns)) { + if (peer && !list_empty(&peer->ibp_conns)) { /* Found a peer with an established connection */ conn = kiblnd_get_conn_locked(peer); kiblnd_conn_addref(conn); /* 1 ref for me... */ read_unlock_irqrestore(g_lock, flags); - if (tx != NULL) + if (tx) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ return; @@ -1331,14 +1405,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_lock(g_lock); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL) { + if (peer) { if (list_empty(&peer->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT(peer->ibp_connecting != 0 || - peer->ibp_accepting != 0); - if (tx != NULL) + LASSERT(kiblnd_peer_connecting(peer)); + if (tx) list_add_tail(&tx->tx_list, - &peer->ibp_tx_queue); + &peer->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer); @@ -1346,7 +1419,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - if (tx != NULL) + if (tx) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ } @@ -1357,9 +1430,9 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) /* Allocate a peer ready to add to the peer table and retry */ rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { + if (rc) { CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); - if (tx != NULL) { + if (tx) { tx->tx_status = -EHOSTUNREACH; tx->tx_waiting = 0; kiblnd_tx_done(ni, tx); @@ -1370,14 +1443,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); - if (peer2 != NULL) { + if (peer2) { if (list_empty(&peer2->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT(peer2->ibp_connecting != 0 || - peer2->ibp_accepting != 0); - if (tx != NULL) + LASSERT(kiblnd_peer_connecting(peer2)); + if (tx) list_add_tail(&tx->tx_list, - &peer2->ibp_tx_queue); + &peer2->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer2); @@ -1385,7 +1457,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - if (tx != NULL) + if (tx) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ } @@ -1395,13 +1467,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) } /* Brand new peer */ - LASSERT(peer->ibp_connecting == 0); + LASSERT(!peer->ibp_connecting); peer->ibp_connecting = 1; /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); + LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown); - if (tx != NULL) + if (tx) list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); kiblnd_peer_addref(peer); @@ -1437,13 +1509,13 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(!payload_nob || payload_niov > 0); LASSERT(payload_niov <= LNET_MAX_IOV); /* Thread context */ LASSERT(!in_interrupt()); /* payload is either all vaddrs or all pages */ - LASSERT(!(payload_kiov != NULL && payload_iov != NULL)); + LASSERT(!(payload_kiov && payload_iov)); switch (type) { default: @@ -1451,7 +1523,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) return -EIO; case LNET_MSG_ACK: - LASSERT(payload_nob == 0); + LASSERT(!payload_nob); break; case LNET_MSG_GET: @@ -1464,7 +1536,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) break; /* send IMMEDIATE */ tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate txd for GET to %s\n", libcfs_nid2str(target.nid)); return -ENOMEM; @@ -1472,7 +1544,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) ibmsg = tx->tx_msg; rd = &ibmsg->ibm_u.get.ibgm_rd; - if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) + if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV)) rc = kiblnd_setup_rd_iov(ni, tx, rd, lntmsg->msg_md->md_niov, lntmsg->msg_md->md_iov.iov, @@ -1482,7 +1554,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) lntmsg->msg_md->md_niov, lntmsg->msg_md->md_iov.kiov, 0, lntmsg->msg_md->md_length); - if (rc != 0) { + if (rc) { CERROR("Can't setup GET sink for %s: %d\n", libcfs_nid2str(target.nid), rc); kiblnd_tx_done(ni, tx); @@ -1496,7 +1568,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); - if (tx->tx_lntmsg[1] == NULL) { + if (!tx->tx_lntmsg[1]) { CERROR("Can't create reply for GET -> %s\n", libcfs_nid2str(target.nid)); kiblnd_tx_done(ni, tx); @@ -1516,14 +1588,14 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) break; /* send IMMEDIATE */ tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate %s txd for %s\n", type == LNET_MSG_PUT ? "PUT" : "REPLY", libcfs_nid2str(target.nid)); return -ENOMEM; } - if (payload_kiov == NULL) + if (!payload_kiov) rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, payload_niov, payload_iov, payload_offset, payload_nob); @@ -1531,7 +1603,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, payload_niov, payload_kiov, payload_offset, payload_nob); - if (rc != 0) { + if (rc) { CERROR("Can't setup PUT src for %s: %d\n", libcfs_nid2str(target.nid), rc); kiblnd_tx_done(ni, tx); @@ -1555,16 +1627,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) <= IBLND_MSG_SIZE); tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't send %d to %s: tx descs exhausted\n", - type, libcfs_nid2str(target.nid)); + type, libcfs_nid2str(target.nid)); return -ENOMEM; } ibmsg = tx->tx_msg; ibmsg->ibm_u.immediate.ibim_hdr = *hdr; - if (payload_kiov != NULL) + if (payload_kiov) lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), payload_niov, payload_kiov, @@ -1596,22 +1668,22 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't get tx for REPLY to %s\n", libcfs_nid2str(target.nid)); goto failed_0; } - if (nob == 0) + if (!nob) rc = 0; - else if (kiov == NULL) + else if (!kiov) rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, niov, iov, offset, nob); else rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, niov, kiov, offset, nob); - if (rc != 0) { + if (rc) { CERROR("Can't setup GET src for %s: %d\n", libcfs_nid2str(target.nid), rc); goto failed_1; @@ -1627,12 +1699,11 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) goto failed_1; } - if (nob == 0) { + if (!nob) { /* No RDMA: local completion may happen now! */ lnet_finalize(ni, lntmsg, 0); } else { - /* RDMA: lnet_finalize(lntmsg) when it - * completes */ + /* RDMA: lnet_finalize(lntmsg) when it completes */ tx->tx_lntmsg[0] = lntmsg; } @@ -1647,8 +1718,8 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, - unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { kib_rx_t *rx = private; kib_msg_t *rxmsg = rx->rx_msg; @@ -1661,7 +1732,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, LASSERT(mlen <= rlen); LASSERT(!in_interrupt()); /* Either all pages or all vaddrs */ - LASSERT(!(kiov != NULL && iov != NULL)); + LASSERT(!(kiov && iov)); switch (rxmsg->ibm_type) { default: @@ -1671,13 +1742,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]); if (nob > rx->rx_nob) { CERROR("Immediate message from %s too big: %d(%d)\n", - libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), - nob, rx->rx_nob); + libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), + nob, rx->rx_nob); rc = -EPROTO; break; } - if (kiov != NULL) + if (kiov) lnet_copy_flat2kiov(niov, kiov, offset, IBLND_MSG_SIZE, rxmsg, offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), @@ -1694,7 +1765,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, kib_msg_t *txmsg; kib_rdma_desc_t *rd; - if (mlen == 0) { + if (!mlen) { lnet_finalize(ni, lntmsg, 0); kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0, rxmsg->ibm_u.putreq.ibprm_cookie); @@ -1702,7 +1773,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, } tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate tx for %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); /* Not replying will break the connection */ @@ -1712,13 +1783,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, txmsg = tx->tx_msg; rd = &txmsg->ibm_u.putack.ibpam_rd; - if (kiov == NULL) + if (!kiov) rc = kiblnd_setup_rd_iov(ni, tx, rd, niov, iov, offset, mlen); else rc = kiblnd_setup_rd_kiov(ni, tx, rd, niov, kiov, offset, mlen); - if (rc != 0) { + if (rc) { CERROR("Can't setup PUT sink for %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_tx_done(ni, tx); @@ -1744,7 +1815,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, } case IBLND_MSG_GET_REQ: - if (lntmsg != NULL) { + if (lntmsg) { /* Optimized GET; RDMA lntmsg's payload */ kiblnd_reply(ni, rx, lntmsg); } else { @@ -1778,7 +1849,7 @@ kiblnd_thread_fini(void) atomic_dec(&kiblnd_data.kib_nthreads); } -void +static void kiblnd_peer_alive(kib_peer_t *peer) { /* This is racy, but everyone's only writing cfs_time_current() */ @@ -1795,10 +1866,7 @@ kiblnd_peer_notify(kib_peer_t *peer) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (list_empty(&peer->ibp_conns) && - peer->ibp_accepting == 0 && - peer->ibp_connecting == 0 && - peer->ibp_error != 0) { + if (kiblnd_peer_idle(peer) && peer->ibp_error) { error = peer->ibp_error; peer->ibp_error = 0; @@ -1807,7 +1875,7 @@ kiblnd_peer_notify(kib_peer_t *peer) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (error != 0) + if (error) lnet_notify(peer->ibp_ni, peer->ibp_nid, 0, last_alive); } @@ -1815,25 +1883,27 @@ kiblnd_peer_notify(kib_peer_t *peer) void kiblnd_close_conn_locked(kib_conn_t *conn, int error) { - /* This just does the immediate housekeeping. 'error' is zero for a + /* + * This just does the immediate housekeeping. 'error' is zero for a * normal shutdown which can happen only after the connection has been * established. If the connection is established, schedule the - * connection to be finished off by the connd. Otherwise the connd is + * connection to be finished off by the connd. Otherwise the connd is * already dealing with it (either to set it up or tear it down). - * Caller holds kib_global_lock exclusively in irq context */ + * Caller holds kib_global_lock exclusively in irq context + */ kib_peer_t *peer = conn->ibc_peer; kib_dev_t *dev; unsigned long flags; - LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED); - if (error != 0 && conn->ibc_comms_error == 0) + if (error && !conn->ibc_comms_error) conn->ibc_comms_error = error; if (conn->ibc_state != IBLND_CONN_ESTABLISHED) return; /* already being handled */ - if (error == 0 && + if (!error && list_empty(&conn->ibc_tx_noops) && list_empty(&conn->ibc_tx_queue) && list_empty(&conn->ibc_tx_queue_rsrvd) && @@ -1843,12 +1913,12 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) libcfs_nid2str(peer->ibp_nid)); } else { CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", - libcfs_nid2str(peer->ibp_nid), error, - list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", - list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", - list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", - list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", - list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); + libcfs_nid2str(peer->ibp_nid), error, + list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", + list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", + list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", + list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", + list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); } dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev; @@ -1865,7 +1935,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); - if (error != 0 && + if (error && kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); @@ -1929,8 +1999,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) if (txs == &conn->ibc_active_txs) { LASSERT(!tx->tx_queued); - LASSERT(tx->tx_waiting || - tx->tx_sending != 0); + LASSERT(tx->tx_waiting || tx->tx_sending); } else { LASSERT(tx->tx_queued); } @@ -1938,7 +2007,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) tx->tx_status = -ECONNABORTED; tx->tx_waiting = 0; - if (tx->tx_sending == 0) { + if (!tx->tx_sending) { tx->tx_queued = 0; list_del(&tx->tx_list); list_add(&tx->tx_list, &zombies); @@ -1958,14 +2027,17 @@ kiblnd_finalise_conn(kib_conn_t *conn) kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); - /* abort_receives moves QP state to IB_QPS_ERR. This is only required + /* + * abort_receives moves QP state to IB_QPS_ERR. This is only required * for connections that didn't get as far as being connected, because - * rdma_disconnect() does this for free. */ + * rdma_disconnect() does this for free. + */ kiblnd_abort_receives(conn); - /* Complete all tx descs not waiting for sends to complete. - * NB we should be safe from RDMA now that the QP has changed state */ - + /* + * Complete all tx descs not waiting for sends to complete. + * NB we should be safe from RDMA now that the QP has changed state + */ kiblnd_abort_txs(conn, &conn->ibc_tx_noops); kiblnd_abort_txs(conn, &conn->ibc_tx_queue); kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); @@ -1975,13 +2047,13 @@ kiblnd_finalise_conn(kib_conn_t *conn) kiblnd_handle_early_rxs(conn); } -void +static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) { LIST_HEAD(zombies); unsigned long flags; - LASSERT(error != 0); + LASSERT(error); LASSERT(!in_interrupt()); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1994,14 +2066,14 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) peer->ibp_accepting--; } - if (peer->ibp_connecting != 0 || - peer->ibp_accepting != 0) { + if (kiblnd_peer_connecting(peer)) { /* another connection attempt under way... */ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + flags); return; } + peer->ibp_reconnected = 0; if (list_empty(&peer->ibp_conns)) { /* Take peer's blocked transmits to complete with error */ list_add(&zombies, &peer->ibp_tx_queue); @@ -2029,7 +2101,7 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); } -void +static void kiblnd_connreq_done(kib_conn_t *conn, int status) { kib_peer_t *peer = conn->ibc_peer; @@ -2047,14 +2119,14 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) LASSERT(!in_interrupt()); LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && - peer->ibp_connecting > 0) || + peer->ibp_connecting > 0) || (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && - peer->ibp_accepting > 0)); + peer->ibp_accepting > 0)); LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); conn->ibc_connvars = NULL; - if (status != 0) { + if (status) { /* failed to establish connection */ kiblnd_peer_connect_failed(peer, active, status); kiblnd_finalise_conn(conn); @@ -2068,16 +2140,19 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); kiblnd_peer_alive(peer); - /* Add conn to peer's list and nuke any dangling conns from a different - * peer instance... */ + /* + * Add conn to peer's list and nuke any dangling conns from a different + * peer instance... + */ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ list_add(&conn->ibc_list, &peer->ibp_conns); + peer->ibp_reconnected = 0; if (active) peer->ibp_connecting--; else peer->ibp_accepting--; - if (peer->ibp_version == 0) { + if (!peer->ibp_version) { peer->ibp_version = conn->ibc_version; peer->ibp_incarnation = conn->ibc_incarnation; } @@ -2095,7 +2170,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) list_del_init(&peer->ibp_tx_queue); if (!kiblnd_peer_active(peer) || /* peer has been deleted */ - conn->ibc_comms_error != 0) { /* error has happened already */ + conn->ibc_comms_error) { /* error has happened already */ lnet_ni_t *ni = peer->ibp_ni; /* start to shut down connection */ @@ -2107,6 +2182,16 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) return; } + /** + * refcount taken by cmid is not reliable after I released the glock + * because this connection is visible to other threads now, another + * thread can find and close this connection right after I released + * the glock, if kiblnd_cm_callback for RDMA_CM_EVENT_DISCONNECTED is + * called, it can release the connection refcount taken by cmid. + * It means the connection could be destroyed before I finish my + * operations on it. + */ + kiblnd_conn_addref(conn); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); /* Schedule blocked txs */ @@ -2122,6 +2207,8 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) /* schedule blocked rxs */ kiblnd_handle_early_rxs(conn); + + kiblnd_conn_decref(conn); } static void @@ -2131,7 +2218,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) rc = rdma_reject(cmid, rej, sizeof(*rej)); - if (rc != 0) + if (rc) CWARN("Error %d sending reject\n", rc); } @@ -2159,14 +2246,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) /* cmid inherits 'context' from the corresponding listener id */ ibdev = (kib_dev_t *)cmid->context; - LASSERT(ibdev != NULL); + LASSERT(ibdev); memset(&rej, 0, sizeof(rej)); rej.ibr_magic = IBLND_MSG_MAGIC; rej.ibr_why = IBLND_REJECT_FATAL; rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; - peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); + peer_addr = (struct sockaddr_in *)&cmid->route.addr.dst_addr; if (*kiblnd_tunables.kib_require_priv_port && ntohs(peer_addr->sin_port) >= PROT_SOCK) { __u32 ip = ntohl(peer_addr->sin_addr.s_addr); @@ -2181,12 +2268,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - /* Future protocol version compatibility support! If the + /* + * Future protocol version compatibility support! If the * o2iblnd-specific protocol changes, or when LNET unifies * protocols over all LNDs, the initial connection will * negotiate a protocol version. I trap this here to avoid * console errors; the reject tells the peer which protocol I - * speak. */ + * speak. + */ if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) goto failed; @@ -2200,7 +2289,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; rc = kiblnd_unpack_msg(reqmsg, priv_nob); - if (rc != 0) { + if (rc) { CERROR("Can't parse connection request: %d\n", rc); goto failed; } @@ -2208,17 +2297,17 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) nid = reqmsg->ibm_srcnid; ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); - if (ni != NULL) { + if (ni) { net = (kib_net_t *)ni->ni_data; rej.ibr_incarnation = net->ibn_incarnation; } - if (ni == NULL || /* no matching net */ + if (!ni || /* no matching net */ ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ net->ibn_dev != ibdev) { /* wrong device */ - CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n", + CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid), - ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), + !ni ? "NA" : libcfs_nid2str(ni->ni_nid), ibdev->ibd_ifname, ibdev->ibd_nnets, &ibdev->ibd_ifip, libcfs_nid2str(reqmsg->ibm_dstnid)); @@ -2227,7 +2316,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } /* check time stamp as soon as possible */ - if (reqmsg->ibm_dststamp != 0 && + if (reqmsg->ibm_dststamp && reqmsg->ibm_dststamp != net->ibn_incarnation) { CWARN("Stale connection request\n"); rej.ibr_why = IBLND_REJECT_CONN_STALE; @@ -2243,10 +2332,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - if (reqmsg->ibm_u.connparams.ibcp_queue_depth != + if (reqmsg->ibm_u.connparams.ibcp_queue_depth > IBLND_MSG_QUEUE_SIZE(version)) { - CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n", - libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, + CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n", + libcfs_nid2str(nid), + reqmsg->ibm_u.connparams.ibcp_queue_depth, IBLND_MSG_QUEUE_SIZE(version)); if (version == IBLND_MSG_VERSION) @@ -2255,18 +2345,28 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - if (reqmsg->ibm_u.connparams.ibcp_max_frags != + if (reqmsg->ibm_u.connparams.ibcp_max_frags > IBLND_RDMA_FRAGS(version)) { - CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n", - libcfs_nid2str(nid), version, - reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); + CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(version)); - if (version == IBLND_MSG_VERSION) + if (version >= IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; goto failed; + } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < + IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) { + CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(version)); + + if (version >= IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; + goto failed; } if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { @@ -2279,17 +2379,21 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) /* assume 'nid' is a new peer; create */ rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { + if (rc) { CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } + /* We have validated the peer's parameters so use those */ + peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags; + peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; + write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); - if (peer2 != NULL) { - if (peer2->ibp_version == 0) { + if (peer2) { + if (!peer2->ibp_version) { peer2->ibp_version = version; peer2->ibp_incarnation = reqmsg->ibm_srcstamp; } @@ -2298,10 +2402,16 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || peer2->ibp_version != version) { kiblnd_close_peer_conns_locked(peer2, -ESTALE); + + if (kiblnd_peer_active(peer2)) { + peer2->ibp_incarnation = reqmsg->ibm_srcstamp; + peer2->ibp_version = version; + } write_unlock_irqrestore(g_lock, flags); - CWARN("Conn stale %s [old ver: %x, new ver: %x]\n", - libcfs_nid2str(nid), peer2->ibp_version, version); + CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n", + libcfs_nid2str(nid), peer2->ibp_version, version, + peer2->ibp_incarnation, reqmsg->ibm_srcstamp); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_CONN_STALE; @@ -2309,7 +2419,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } /* tie-break connection race in favour of the higher NID */ - if (peer2->ibp_connecting != 0 && + if (peer2->ibp_connecting && nid < ni->ni_nid) { write_unlock_irqrestore(g_lock, flags); @@ -2320,24 +2430,37 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } + /** + * passive connection is allowed even this peer is waiting for + * reconnection. + */ + peer2->ibp_reconnecting = 0; peer2->ibp_accepting++; kiblnd_peer_addref(peer2); + /** + * Race with kiblnd_launch_tx (active connect) to create peer + * so copy validated parameters since we now know what the + * peer's limits are + */ + peer2->ibp_max_frags = peer->ibp_max_frags; + peer2->ibp_queue_depth = peer->ibp_queue_depth; + write_unlock_irqrestore(g_lock, flags); kiblnd_peer_decref(peer); peer = peer2; } else { /* Brand new peer */ - LASSERT(peer->ibp_accepting == 0); - LASSERT(peer->ibp_version == 0 && - peer->ibp_incarnation == 0); + LASSERT(!peer->ibp_accepting); + LASSERT(!peer->ibp_version && + !peer->ibp_incarnation); peer->ibp_accepting = 1; peer->ibp_version = version; peer->ibp_incarnation = reqmsg->ibm_srcstamp; /* I have a ref on ni that prevents it being shutdown */ - LASSERT(net->ibn_shutdown == 0); + LASSERT(!net->ibn_shutdown); kiblnd_peer_addref(peer); list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); @@ -2345,31 +2468,33 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) write_unlock_irqrestore(g_lock, flags); } - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); - if (conn == NULL) { + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, + version); + if (!conn) { kiblnd_peer_connect_failed(peer, 0, -ENOMEM); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } - /* conn now "owns" cmid, so I return success from here on to ensure the - * CM callback doesn't destroy cmid. */ - + /* + * conn now "owns" cmid, so I return success from here on to ensure the + * CM callback doesn't destroy cmid. + */ conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); - LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) - <= IBLND_RX_MSGS(version)); + conn->ibc_credits = conn->ibc_queue_depth; + conn->ibc_reserved_credits = conn->ibc_queue_depth; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); ackmsg = &conn->ibc_connvars->cv_msg; memset(ackmsg, 0, sizeof(*ackmsg)); kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, sizeof(ackmsg->ibm_u.connparams)); - ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); + ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); @@ -2385,7 +2510,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); rc = rdma_accept(cmid, &cp); - if (rc != 0) { + if (rc) { CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc); rej.ibr_version = version; rej.ibr_why = IBLND_REJECT_FATAL; @@ -2399,7 +2524,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) return 0; failed: - if (ni != NULL) + if (ni) lnet_ni_decref(ni); rej.ibr_version = version; @@ -2411,45 +2536,82 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } static void -kiblnd_reconnect(kib_conn_t *conn, int version, - __u64 incarnation, int why, kib_connparams_t *cp) +kiblnd_check_reconnect(kib_conn_t *conn, int version, + __u64 incarnation, int why, kib_connparams_t *cp) { + rwlock_t *glock = &kiblnd_data.kib_global_lock; kib_peer_t *peer = conn->ibc_peer; char *reason; - int retry = 0; + int msg_size = IBLND_MSG_SIZE; + int frag_num = -1; + int queue_dep = -1; + bool reconnect; unsigned long flags; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ + LASSERT(!peer->ibp_reconnecting); - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + if (cp) { + msg_size = cp->ibcp_max_msg_size; + frag_num = cp->ibcp_max_frags; + queue_dep = cp->ibcp_queue_depth; + } - /* retry connection if it's still needed and no other connection + write_lock_irqsave(glock, flags); + /** + * retry connection if it's still needed and no other connection * attempts (active or passive) are in progress * NB: reconnect is still needed even when ibp_tx_queue is * empty if ibp_version != version because reconnect may be - * initiated by kiblnd_query() */ - if ((!list_empty(&peer->ibp_tx_queue) || - peer->ibp_version != version) && - peer->ibp_connecting == 1 && - peer->ibp_accepting == 0) { - retry = 1; - peer->ibp_connecting++; - - peer->ibp_version = version; - peer->ibp_incarnation = incarnation; + * initiated by kiblnd_query() + */ + reconnect = (!list_empty(&peer->ibp_tx_queue) || + peer->ibp_version != version) && + peer->ibp_connecting == 1 && + !peer->ibp_accepting; + if (!reconnect) { + reason = "no need"; + goto out; } - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - if (!retry) - return; - switch (why) { default: reason = "Unknown"; break; + case IBLND_REJECT_RDMA_FRAGS: + if (!cp) { + reason = "can't negotiate max frags"; + goto out; + } + if (!*kiblnd_tunables.kib_map_on_demand) { + reason = "map_on_demand must be enabled"; + goto out; + } + if (conn->ibc_max_frags <= frag_num) { + reason = "unsupported max frags"; + goto out; + } + + peer->ibp_max_frags = frag_num; + reason = "rdma fragments"; + break; + + case IBLND_REJECT_MSG_QUEUE_SIZE: + if (!cp) { + reason = "can't negotiate queue depth"; + goto out; + } + if (conn->ibc_queue_depth <= queue_dep) { + reason = "unsupported queue depth"; + goto out; + } + + peer->ibp_queue_depth = queue_dep; + reason = "queue depth"; + break; + case IBLND_REJECT_CONN_STALE: reason = "stale"; break; @@ -2463,14 +2625,24 @@ kiblnd_reconnect(kib_conn_t *conn, int version, break; } - CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n", - libcfs_nid2str(peer->ibp_nid), - reason, IBLND_MSG_VERSION, version, - cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version), - cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), - cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); + conn->ibc_reconnect = 1; + peer->ibp_reconnecting = 1; + peer->ibp_version = version; + if (incarnation) + peer->ibp_incarnation = incarnation; +out: + write_unlock_irqrestore(glock, flags); - kiblnd_connect_peer(peer); + CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", + libcfs_nid2str(peer->ibp_nid), + reconnect ? "reconnect" : "don't reconnect", + reason, IBLND_MSG_VERSION, version, msg_size, + conn->ibc_queue_depth, queue_dep, + conn->ibc_max_frags, frag_num); + /** + * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer + * while destroying the zombie + */ } static void @@ -2483,8 +2655,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) switch (reason) { case IB_CM_REJ_STALE_CONN: - kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0, - IBLND_REJECT_CONN_STALE, NULL); + kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0, + IBLND_REJECT_CONN_STALE, NULL); break; case IB_CM_REJ_INVALID_SERVICE_ID: @@ -2521,9 +2693,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) if (priv_nob >= sizeof(kib_rej_t) && rej->ibr_version > IBLND_MSG_VERSION_1) { - /* priv_nob is always 148 in current version + /* + * priv_nob is always 148 in current version * of OFED, so we still need to check version. - * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */ + * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) + */ cp = &rej->ibr_cp; if (flip) { @@ -2564,24 +2738,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) case IBLND_REJECT_CONN_RACE: case IBLND_REJECT_CONN_STALE: case IBLND_REJECT_CONN_UNCOMPAT: - kiblnd_reconnect(conn, rej->ibr_version, - incarnation, rej->ibr_why, cp); - break; - case IBLND_REJECT_MSG_QUEUE_SIZE: - CERROR("%s rejected: incompatible message queue depth %d, %d\n", - libcfs_nid2str(peer->ibp_nid), - cp != NULL ? cp->ibcp_queue_depth : - IBLND_MSG_QUEUE_SIZE(rej->ibr_version), - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); - break; - case IBLND_REJECT_RDMA_FRAGS: - CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n", - libcfs_nid2str(peer->ibp_nid), - cp != NULL ? cp->ibcp_max_frags : - IBLND_RDMA_FRAGS(rej->ibr_version), - IBLND_RDMA_FRAGS(conn->ibc_version)); + kiblnd_check_reconnect(conn, rej->ibr_version, + incarnation, + rej->ibr_why, cp); break; case IBLND_REJECT_NO_RESOURCES: @@ -2623,9 +2784,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) int rc = kiblnd_unpack_msg(msg, priv_nob); unsigned long flags; - LASSERT(net != NULL); + LASSERT(net); - if (rc != 0) { + if (rc) { CERROR("Can't unpack connack from %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); goto failed; @@ -2645,22 +2806,22 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) goto failed; } - if (msg->ibm_u.connparams.ibcp_queue_depth != - IBLND_MSG_QUEUE_SIZE(ver)) { - CERROR("%s has incompatible queue depth %d(%d wanted)\n", + if (msg->ibm_u.connparams.ibcp_queue_depth > + conn->ibc_queue_depth) { + CERROR("%s has incompatible queue depth %d (<=%d wanted)\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(ver)); + conn->ibc_queue_depth); rc = -EPROTO; goto failed; } - if (msg->ibm_u.connparams.ibcp_max_frags != - IBLND_RDMA_FRAGS(ver)) { - CERROR("%s has incompatible max_frags %d (%d wanted)\n", + if (msg->ibm_u.connparams.ibcp_max_frags > + conn->ibc_max_frags) { + CERROR("%s has incompatible max_frags %d (<=%d wanted)\n", libcfs_nid2str(peer->ibp_nid), msg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(ver)); + conn->ibc_max_frags); rc = -EPROTO; goto failed; } @@ -2682,7 +2843,7 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) rc = -ESTALE; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (rc != 0) { + if (rc) { CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n", libcfs_nid2str(peer->ibp_nid), rc, msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); @@ -2690,21 +2851,24 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) } conn->ibc_incarnation = msg->ibm_srcstamp; - conn->ibc_credits = - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); - LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) - <= IBLND_RX_MSGS(ver)); + conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn)); kiblnd_connreq_done(conn, 0); return; failed: - /* NB My QP has already established itself, so I handle anything going + /* + * NB My QP has already established itself, so I handle anything going * wrong here by setting ibc_comms_error. * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then - * immediately tears it down. */ - - LASSERT(rc != 0); + * immediately tears it down. + */ + LASSERT(rc); conn->ibc_comms_error = rc; kiblnd_connreq_done(conn, 0); } @@ -2724,28 +2888,30 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); incarnation = peer->ibp_incarnation; - version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : - peer->ibp_version; + version = !peer->ibp_version ? IBLND_MSG_VERSION : + peer->ibp_version; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version); - if (conn == NULL) { + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, + version); + if (!conn) { kiblnd_peer_connect_failed(peer, 1, -ENOMEM); kiblnd_peer_decref(peer); /* lose cmid's ref */ return -ENOMEM; } - /* conn "owns" cmid now, so I return success from here on to ensure the + /* + * conn "owns" cmid now, so I return success from here on to ensure the * CM callback doesn't destroy cmid. conn also takes over cmid's ref - * on peer */ - + * on peer + */ msg = &conn->ibc_connvars->cv_msg; memset(msg, 0, sizeof(*msg)); kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); - msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); + msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(peer->ibp_ni, msg, version, @@ -2764,7 +2930,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) LASSERT(conn->ibc_cmid == cmid); rc = rdma_connect(cmid, &cp); - if (rc != 0) { + if (rc) { CERROR("Can't connect to %s: %d\n", libcfs_nid2str(peer->ibp_nid), rc); kiblnd_connreq_done(conn, rc); @@ -2798,10 +2964,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case RDMA_CM_EVENT_ADDR_ERROR: peer = (kib_peer_t *)cmid->context; CNETERR("%s: ADDR ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer); - return -EHOSTUNREACH; /* rc != 0 destroys cmid */ + return -EHOSTUNREACH; /* rc destroys cmid */ case RDMA_CM_EVENT_ADDR_RESOLVED: peer = (kib_peer_t *)cmid->context; @@ -2809,14 +2975,14 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) CDEBUG(D_NET, "%s Addr resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); - if (event->status != 0) { + if (event->status) { CNETERR("Can't resolve address for %s: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); rc = event->status; } else { rc = rdma_resolve_route( cmid, *kiblnd_tunables.kib_timeout * 1000); - if (rc == 0) + if (!rc) return 0; /* Can't initiate route resolution */ CERROR("Can't resolve route for %s: %d\n", @@ -2824,7 +2990,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) } kiblnd_peer_connect_failed(peer, 1, rc); kiblnd_peer_decref(peer); - return rc; /* rc != 0 destroys cmid */ + return rc; /* rc destroys cmid */ case RDMA_CM_EVENT_ROUTE_ERROR: peer = (kib_peer_t *)cmid->context; @@ -2832,28 +2998,28 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer); - return -EHOSTUNREACH; /* rc != 0 destroys cmid */ + return -EHOSTUNREACH; /* rc destroys cmid */ case RDMA_CM_EVENT_ROUTE_RESOLVED: peer = (kib_peer_t *)cmid->context; CDEBUG(D_NET, "%s Route resolved: %d\n", libcfs_nid2str(peer->ibp_nid), event->status); - if (event->status == 0) + if (!event->status) return kiblnd_active_connect(cmid); CNETERR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer->ibp_nid), event->status); kiblnd_peer_connect_failed(peer, 1, event->status); kiblnd_peer_decref(peer); - return event->status; /* rc != 0 destroys cmid */ + return event->status; /* rc destroys cmid */ case RDMA_CM_EVENT_UNREACHABLE: conn = (kib_conn_t *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); CNETERR("%s: UNREACHABLE %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ENETDOWN); kiblnd_conn_decref(conn); return 0; @@ -2876,8 +3042,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case IBLND_CONN_PASSIVE_WAIT: CERROR("%s: REJECTED %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - event->status); + libcfs_nid2str(conn->ibc_peer->ibp_nid), + event->status); kiblnd_connreq_done(conn, -ECONNRESET); break; @@ -2933,8 +3099,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) LCONSOLE_ERROR_MSG(0x131, "Received notification of device removal\n" "Please shutdown LNET to allow this to proceed\n"); - /* Can't remove network from underneath LNET for now, so I have - * to ignore this */ + /* + * Can't remove network from underneath LNET for now, so I have + * to ignore this + */ return 0; case RDMA_CM_EVENT_ADDR_CHANGE: @@ -2956,7 +3124,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) LASSERT(tx->tx_queued); } else { LASSERT(!tx->tx_queued); - LASSERT(tx->tx_waiting || tx->tx_sending != 0); + LASSERT(tx->tx_waiting || tx->tx_sending); } if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { @@ -2989,13 +3157,16 @@ kiblnd_check_conns(int idx) struct list_head *ptmp; kib_peer_t *peer; kib_conn_t *conn; + kib_conn_t *temp; kib_conn_t *tmp; struct list_head *ctmp; unsigned long flags; - /* NB. We expect to have a look at all the peers and not find any + /* + * NB. We expect to have a look at all the peers and not find any * RDMAs to time out, so we just use a shared lock while we - * take a look... */ + * take a look... + */ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); list_for_each(ptmp, peers) { @@ -3028,8 +3199,7 @@ kiblnd_check_conns(int idx) conn->ibc_reserved_credits); list_add(&conn->ibc_connd_list, &closes); } else { - list_add(&conn->ibc_connd_list, - &checksends); + list_add(&conn->ibc_connd_list, &checksends); } /* +ref for 'closes' or 'checksends' */ kiblnd_conn_addref(conn); @@ -3040,21 +3210,23 @@ kiblnd_check_conns(int idx) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - /* Handle timeout by closing the whole + /* + * Handle timeout by closing the whole * connection. We can only be sure RDMA activity - * has ceased once the QP has been modified. */ + * has ceased once the QP has been modified. + */ list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) { list_del(&conn->ibc_connd_list); kiblnd_close_conn(conn, -ETIMEDOUT); kiblnd_conn_decref(conn); } - /* In case we have enough credits to return via a + /* + * In case we have enough credits to return via a * NOOP, but there were no non-blocking tx descs - * free to do it last time... */ - while (!list_empty(&checksends)) { - conn = list_entry(checksends.next, - kib_conn_t, ibc_connd_list); + * free to do it last time... + */ + list_for_each_entry_safe(conn, temp, &checksends, ibc_connd_list) { list_del(&conn->ibc_connd_list); kiblnd_check_sends(conn); kiblnd_conn_decref(conn); @@ -3074,9 +3246,21 @@ kiblnd_disconnect_conn(kib_conn_t *conn) kiblnd_peer_notify(conn->ibc_peer); } +/** + * High-water for reconnection to the same peer, reconnection attempt should + * be delayed after trying more than KIB_RECONN_HIGH_RACE. + */ +#define KIB_RECONN_HIGH_RACE 10 +/** + * Allow connd to take a break and handle other things after consecutive + * reconnection attemps. + */ +#define KIB_RECONN_BREAK 100 + int kiblnd_connd(void *arg) { + spinlock_t *lock= &kiblnd_data.kib_connd_lock; wait_queue_t wait; unsigned long flags; kib_conn_t *conn; @@ -3091,39 +3275,79 @@ kiblnd_connd(void *arg) init_waitqueue_entry(&wait, current); kiblnd_data.kib_connd = current; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); while (!kiblnd_data.kib_shutdown) { + int reconn = 0; dropped_lock = 0; if (!list_empty(&kiblnd_data.kib_connd_zombies)) { + kib_peer_t *peer = NULL; + conn = list_entry(kiblnd_data.kib_connd_zombies.next, - kib_conn_t, ibc_list); + kib_conn_t, ibc_list); list_del(&conn->ibc_list); + if (conn->ibc_reconnect) { + peer = conn->ibc_peer; + kiblnd_peer_addref(peer); + } - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, - flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - kiblnd_destroy_conn(conn); + kiblnd_destroy_conn(conn, !peer); + + spin_lock_irqsave(lock, flags); + if (!peer) + continue; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + conn->ibc_peer = peer; + if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_list); + else + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_wait); } if (!list_empty(&kiblnd_data.kib_connd_conns)) { conn = list_entry(kiblnd_data.kib_connd_conns.next, - kib_conn_t, ibc_list); + kib_conn_t, ibc_list); list_del(&conn->ibc_list); - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, - flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; kiblnd_disconnect_conn(conn); kiblnd_conn_decref(conn); - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); + } + + while (reconn < KIB_RECONN_BREAK) { + if (kiblnd_data.kib_reconn_sec != + ktime_get_real_seconds()) { + kiblnd_data.kib_reconn_sec = ktime_get_real_seconds(); + list_splice_init(&kiblnd_data.kib_reconn_wait, + &kiblnd_data.kib_reconn_list); + } + + if (list_empty(&kiblnd_data.kib_reconn_list)) + break; + + conn = list_entry(kiblnd_data.kib_reconn_list.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); + + spin_unlock_irqrestore(lock, flags); + dropped_lock = 1; + + reconn += kiblnd_reconnect_peer(conn->ibc_peer); + kiblnd_peer_decref(conn->ibc_peer); + LIBCFS_FREE(conn, sizeof(*conn)); + + spin_lock_irqsave(lock, flags); } /* careful with the jiffy wrap... */ @@ -3133,21 +3357,22 @@ kiblnd_connd(void *arg) const int p = 1; int chunk = kiblnd_data.kib_peer_hash_size; - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - /* Time to check for RDMA timeouts on a few more + /* + * Time to check for RDMA timeouts on a few more * peers: I do checks every 'p' seconds on a * proportion of the peer table and I need to check * every connection 'n' times within a timeout * interval, to ensure I detect a timeout on any * connection within (n+1)/n times the timeout - * interval. */ - + * interval. + */ if (*kiblnd_tunables.kib_timeout > n * p) chunk = (chunk * n * p) / *kiblnd_tunables.kib_timeout; - if (chunk == 0) + if (!chunk) chunk = 1; for (i = 0; i < chunk; i++) { @@ -3156,8 +3381,8 @@ kiblnd_connd(void *arg) kiblnd_data.kib_peer_hash_size; } - deadline += p * HZ; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + deadline += msecs_to_jiffies(p * MSEC_PER_SEC); + spin_lock_irqsave(lock, flags); } if (dropped_lock) @@ -3166,15 +3391,15 @@ kiblnd_connd(void *arg) /* Nothing to do for 'timeout' */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); schedule_timeout(timeout); remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); } - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); kiblnd_thread_fini(); return 0; @@ -3206,12 +3431,14 @@ kiblnd_complete(struct ib_wc *wc) LBUG(); case IBLND_WID_RDMA: - /* We only get RDMA completion notification if it fails. All + /* + * We only get RDMA completion notification if it fails. All * subsequent work items, including the final SEND will fail * too. However we can't print out any more info about the * failing RDMA because 'tx' might be back on the idle list or * even reused already if we didn't manage to post all our work - * items */ + * items + */ CNETERR("RDMA (tx: %p) failed: %d\n", kiblnd_wreqid2ptr(wc->wr_id), wc->status); return; @@ -3230,11 +3457,13 @@ kiblnd_complete(struct ib_wc *wc) void kiblnd_cq_completion(struct ib_cq *cq, void *arg) { - /* NB I'm not allowed to schedule this conn once its refcount has + /* + * NB I'm not allowed to schedule this conn once its refcount has * reached 0. Since fundamentally I'm racing with scheduler threads * consuming my CQ I could be called after all completions have - * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 - * and this CQ is about to be destroyed so I NOOP. */ + * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted + * and this CQ is about to be destroyed so I NOOP. + */ kib_conn_t *conn = arg; struct kib_sched_info *sched = conn->ibc_sched; unsigned long flags; @@ -3288,7 +3517,7 @@ kiblnd_scheduler(void *arg) sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); - if (rc != 0) { + if (rc) { CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt); } @@ -3308,8 +3537,8 @@ kiblnd_scheduler(void *arg) did_something = 0; if (!list_empty(&sched->ibs_conns)) { - conn = list_entry(sched->ibs_conns.next, - kib_conn_t, ibc_sched_list); + conn = list_entry(sched->ibs_conns.next, kib_conn_t, + ibc_sched_list); /* take over kib_sched_conns' ref on conn... */ LASSERT(conn->ibc_scheduled); list_del(&conn->ibc_sched_list); @@ -3317,8 +3546,10 @@ kiblnd_scheduler(void *arg) spin_unlock_irqrestore(&sched->ibs_lock, flags); + wc.wr_id = IBLND_WID_INVAL; + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); - if (rc == 0) { + if (!rc) { rc = ib_req_notify_cq(conn->ibc_cq, IB_CQ_NEXT_COMP); if (rc < 0) { @@ -3327,13 +3558,22 @@ kiblnd_scheduler(void *arg) kiblnd_close_conn(conn, -EIO); kiblnd_conn_decref(conn); spin_lock_irqsave(&sched->ibs_lock, - flags); + flags); continue; } rc = ib_poll_cq(conn->ibc_cq, 1, &wc); } + if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) { + LCONSOLE_ERROR("ib_poll_cq (rc: %d) returned invalid wr_id, opcode %d, status: %d, vendor_err: %d, conn: %s status: %d\nplease upgrade firmware and OFED or contact vendor.\n", + rc, wc.opcode, wc.status, + wc.vendor_err, + libcfs_nid2str(conn->ibc_peer->ibp_nid), + conn->ibc_state); + rc = -EINVAL; + } + if (rc < 0) { CWARN("%s: ib_poll_cq failed: %d, closing connection\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), @@ -3346,21 +3586,23 @@ kiblnd_scheduler(void *arg) spin_lock_irqsave(&sched->ibs_lock, flags); - if (rc != 0 || conn->ibc_ready) { - /* There may be another completion waiting; get + if (rc || conn->ibc_ready) { + /* + * There may be another completion waiting; get * another scheduler to check while I handle - * this one... */ + * this one... + */ /* +1 ref for sched_conns */ kiblnd_conn_addref(conn); list_add_tail(&conn->ibc_sched_list, - &sched->ibs_conns); + &sched->ibs_conns); if (waitqueue_active(&sched->ibs_waitq)) wake_up(&sched->ibs_waitq); } else { conn->ibc_scheduled = 0; } - if (rc != 0) { + if (rc) { spin_unlock_irqrestore(&sched->ibs_lock, flags); kiblnd_complete(&wc); @@ -3400,7 +3642,7 @@ kiblnd_failover_thread(void *arg) unsigned long flags; int rc; - LASSERT(*kiblnd_tunables.kib_dev_failover != 0); + LASSERT(*kiblnd_tunables.kib_dev_failover); cfs_block_allsigs(); @@ -3459,13 +3701,15 @@ kiblnd_failover_thread(void *arg) remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); write_lock_irqsave(glock, flags); - if (!long_sleep || rc != 0) + if (!long_sleep || rc) continue; - /* have a long sleep, routine check all active devices, + /* + * have a long sleep, routine check all active devices, * we need checking like this because if there is not active * connection on the dev and no SEND from local, we may listen - * on wrong HCA for ever while there is a bonding failover */ + * on wrong HCA for ever while there is a bonding failover + */ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { if (kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c index 1d4e7efb5..b4607dad3 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c @@ -52,8 +52,10 @@ static int timeout = 50; module_param(timeout, int, 0644); MODULE_PARM_DESC(timeout, "timeout (seconds)"); -/* Number of threads in each scheduler pool which is percpt, - * we will estimate reasonable value based on CPUs if it's set to zero. */ +/* + * Number of threads in each scheduler pool which is percpt, + * we will estimate reasonable value based on CPUs if it's set to zero. + */ static int nscheds; module_param(nscheds, int, 0444); MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool"); @@ -200,7 +202,7 @@ kiblnd_tunables_init(void) if (*kiblnd_tunables.kib_map_on_demand == 1) *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */ - if (*kiblnd_tunables.kib_concurrent_sends == 0) { + if (!*kiblnd_tunables.kib_concurrent_sends) { if (*kiblnd_tunables.kib_map_on_demand > 0 && *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 05aa90ea5..cca7b2f7f 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -70,7 +70,7 @@ ksocknal_create_route(__u32 ipaddr, int port) ksock_route_t *route; LIBCFS_ALLOC(route, sizeof(*route)); - if (route == NULL) + if (!route) return NULL; atomic_set(&route->ksnr_refcount, 1); @@ -91,9 +91,9 @@ ksocknal_create_route(__u32 ipaddr, int port) void ksocknal_destroy_route(ksock_route_t *route) { - LASSERT(atomic_read(&route->ksnr_refcount) == 0); + LASSERT(!atomic_read(&route->ksnr_refcount)); - if (route->ksnr_peer != NULL) + if (route->ksnr_peer) ksocknal_peer_decref(route->ksnr_peer); LIBCFS_FREE(route, sizeof(*route)); @@ -102,6 +102,7 @@ ksocknal_destroy_route(ksock_route_t *route) static int ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) { + int cpt = lnet_cpt_of_nid(id.nid); ksock_net_t *net = ni->ni_data; ksock_peer_t *peer; @@ -109,8 +110,8 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) LASSERT(id.pid != LNET_PID_ANY); LASSERT(!in_interrupt()); - LIBCFS_ALLOC(peer, sizeof(*peer)); - if (peer == NULL) + LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); + if (!peer) return -ENOMEM; peer->ksnp_ni = ni; @@ -152,10 +153,10 @@ ksocknal_destroy_peer(ksock_peer_t *peer) ksock_net_t *net = peer->ksnp_ni->ni_data; CDEBUG(D_NET, "peer %s %p deleted\n", - libcfs_id2str(peer->ksnp_id), peer); + libcfs_id2str(peer->ksnp_id), peer); - LASSERT(atomic_read(&peer->ksnp_refcount) == 0); - LASSERT(peer->ksnp_accepting == 0); + LASSERT(!atomic_read(&peer->ksnp_refcount)); + LASSERT(!peer->ksnp_accepting); LASSERT(list_empty(&peer->ksnp_conns)); LASSERT(list_empty(&peer->ksnp_routes)); LASSERT(list_empty(&peer->ksnp_tx_queue)); @@ -163,10 +164,12 @@ ksocknal_destroy_peer(ksock_peer_t *peer) LIBCFS_FREE(peer, sizeof(*peer)); - /* NB a peer's connections and routes keep a reference on their peer + /* + * NB a peer's connections and routes keep a reference on their peer * until they are destroyed, so we can be assured that _all_ state to * do with this peer has been cleaned up when its refcount drops to - * zero. */ + * zero. + */ spin_lock_bh(&net->ksnn_lock); net->ksnn_npeers--; spin_unlock_bh(&net->ksnn_lock); @@ -180,7 +183,6 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) ksock_peer_t *peer; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, ksock_peer_t, ksnp_list); LASSERT(!peer->ksnp_closing); @@ -207,7 +209,7 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) read_lock(&ksocknal_data.ksnd_global_lock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) /* +1 ref for caller? */ + if (peer) /* +1 ref for caller? */ ksocknal_peer_addref(peer); read_unlock(&ksocknal_data.ksnd_global_lock); @@ -226,9 +228,11 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer) ip = peer->ksnp_passive_ips[i]; iface = ksocknal_ip2iface(peer->ksnp_ni, ip); - /* All IPs in peer->ksnp_passive_ips[] come from the - * interface list, therefore the call must succeed. */ - LASSERT(iface != NULL); + /* + * All IPs in peer->ksnp_passive_ips[] come from the + * interface list, therefore the call must succeed. + */ + LASSERT(iface); CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n", peer, iface, iface->ksni_nroutes); @@ -246,8 +250,8 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer) static int ksocknal_get_peer_info(lnet_ni_t *ni, int index, - lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, - int *port, int *conn_count, int *share_count) + lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, + int *port, int *conn_count, int *share_count) { ksock_peer_t *peer; struct list_head *ptmp; @@ -260,14 +264,13 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, read_lock(&ksocknal_data.ksnd_global_lock); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { - list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) continue; - if (peer->ksnp_n_passive_ips == 0 && + if (!peer->ksnp_n_passive_ips && list_empty(&peer->ksnp_routes)) { if (index-- > 0) continue; @@ -301,7 +304,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, continue; route = list_entry(rtmp, ksock_route_t, - ksnr_list); + ksnr_list); *id = peer->ksnp_id; *myip = route->ksnr_myipaddr; @@ -330,7 +333,7 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) ksocknal_route_addref(route); if (route->ksnr_myipaddr != conn->ksnc_myipaddr) { - if (route->ksnr_myipaddr == 0) { + if (!route->ksnr_myipaddr) { /* route wasn't bound locally yet (the initial route) */ CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n", libcfs_id2str(peer->ksnp_id), @@ -345,21 +348,23 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, route->ksnr_myipaddr); - if (iface != NULL) + if (iface) iface->ksni_nroutes--; } route->ksnr_myipaddr = conn->ksnc_myipaddr; iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, route->ksnr_myipaddr); - if (iface != NULL) + if (iface) iface->ksni_nroutes++; } - route->ksnr_connected |= (1<ksnr_connected |= (1 << type); route->ksnr_conn_count++; - /* Successful connection => further attempts can - * proceed immediately */ + /* + * Successful connection => further attempts can + * proceed immediately + */ route->ksnr_retry_interval = 0; } @@ -371,10 +376,10 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) ksock_route_t *route2; LASSERT(!peer->ksnp_closing); - LASSERT(route->ksnr_peer == NULL); + LASSERT(!route->ksnr_peer); LASSERT(!route->ksnr_scheduled); LASSERT(!route->ksnr_connecting); - LASSERT(route->ksnr_connected == 0); + LASSERT(!route->ksnr_connected); /* LASSERT(unique) */ list_for_each(tmp, &peer->ksnp_routes) { @@ -382,8 +387,8 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) if (route2->ksnr_ipaddr == route->ksnr_ipaddr) { CERROR("Duplicate route %s %pI4h\n", - libcfs_id2str(peer->ksnp_id), - &route->ksnr_ipaddr); + libcfs_id2str(peer->ksnp_id), + &route->ksnr_ipaddr); LBUG(); } } @@ -425,10 +430,10 @@ ksocknal_del_route_locked(ksock_route_t *route) ksocknal_close_conn_locked(conn, 0); } - if (route->ksnr_myipaddr != 0) { + if (route->ksnr_myipaddr) { iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni, route->ksnr_myipaddr); - if (iface != NULL) + if (iface) iface->ksni_nroutes--; } @@ -438,8 +443,10 @@ ksocknal_del_route_locked(ksock_route_t *route) if (list_empty(&peer->ksnp_routes) && list_empty(&peer->ksnp_conns)) { - /* I've just removed the last route to a peer with no active - * connections */ + /* + * I've just removed the last route to a peer with no active + * connections + */ ksocknal_unlink_peer_locked(peer); } } @@ -460,11 +467,11 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) /* Have a brand new peer ready... */ rc = ksocknal_create_peer(&peer, ni, id); - if (rc != 0) + if (rc) return rc; route = ksocknal_create_route(ipaddr, port); - if (route == NULL) { + if (!route) { ksocknal_peer_decref(peer); return -ENOMEM; } @@ -472,16 +479,16 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) write_lock_bh(&ksocknal_data.ksnd_global_lock); /* always called with a ref on ni, so shutdown can't have started */ - LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); + LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown); peer2 = ksocknal_find_peer_locked(ni, id); - if (peer2 != NULL) { + if (peer2) { ksocknal_peer_decref(peer); peer = peer2; } else { /* peer table takes my ref on peer */ list_add_tail(&peer->ksnp_list, - ksocknal_nid2peerlist(id.nid)); + ksocknal_nid2peerlist(id.nid)); } route2 = NULL; @@ -493,7 +500,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) route2 = NULL; } - if (route2 == NULL) { + if (!route2) { ksocknal_add_route_locked(peer, route); route->ksnr_share_count++; } else { @@ -524,7 +531,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) route = list_entry(tmp, ksock_route_t, ksnr_list); /* no match */ - if (!(ip == 0 || route->ksnr_ipaddr == ip)) + if (!(!ip || route->ksnr_ipaddr == ip)) continue; route->ksnr_share_count = 0; @@ -538,15 +545,16 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) nshared += route->ksnr_share_count; } - if (nshared == 0) { - /* remove everything else if there are no explicit entries - * left */ - + if (!nshared) { + /* + * remove everything else if there are no explicit entries + * left + */ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); /* we should only be removing auto-entries */ - LASSERT(route->ksnr_share_count == 0); + LASSERT(!route->ksnr_share_count); ksocknal_del_route_locked(route); } @@ -575,16 +583,16 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) write_lock_bh(&ksocknal_data.ksnd_global_lock); - if (id.nid != LNET_NID_ANY) - lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - else { + if (id.nid != LNET_NID_ANY) { + lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + } else { lo = 0; hi = ksocknal_data.ksnd_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { - list_for_each_safe(ptmp, pnxt, - &ksocknal_data.ksnd_peers[i]) { + list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) @@ -604,7 +612,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) LASSERT(list_empty(&peer->ksnp_routes)); list_splice_init(&peer->ksnp_tx_queue, - &zombies); + &zombies); } ksocknal_peer_decref(peer); /* ...till here */ @@ -645,7 +653,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) continue; conn = list_entry(ctmp, ksock_conn_t, - ksnc_list); + ksnc_list); ksocknal_conn_addref(conn); read_unlock(&ksocknal_data.ksnd_global_lock); return conn; @@ -692,8 +700,10 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) nip = net->ksnn_ninterfaces; LASSERT(nip <= LNET_MAX_INTERFACES); - /* Only offer interfaces for additional connections if I have - * more than one. */ + /* + * Only offer interfaces for additional connections if I have + * more than one. + */ if (nip < 2) { read_unlock(&ksocknal_data.ksnd_global_lock); return 0; @@ -701,7 +711,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) for (i = 0; i < nip; i++) { ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr; - LASSERT(ipaddrs[i] != 0); + LASSERT(ipaddrs[i]); } read_unlock(&ksocknal_data.ksnd_global_lock); @@ -719,11 +729,11 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) int i; for (i = 0; i < nips; i++) { - if (ips[i] == 0) + if (!ips[i]) continue; this_xor = ips[i] ^ iface->ksni_ipaddr; - this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0; + this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0; if (!(best < 0 || best_netmatch < this_netmatch || @@ -757,38 +767,45 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) int best_netmatch; int best_npeers; - /* CAVEAT EMPTOR: We do all our interface matching with an + /* + * CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only * expecting to be dealing with small numbers of interfaces, so the - * O(n**3)-ness shouldn't matter */ - - /* Also note that I'm not going to return more than n_peerips - * interfaces, even if I have more myself */ - + * O(n**3)-ness shouldn't matter + */ + /* + * Also note that I'm not going to return more than n_peerips + * interfaces, even if I have more myself + */ write_lock_bh(global_lock); LASSERT(n_peerips <= LNET_MAX_INTERFACES); LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES); - /* Only match interfaces for additional connections - * if I have > 1 interface */ + /* + * Only match interfaces for additional connections + * if I have > 1 interface + */ n_ips = (net->ksnn_ninterfaces < 2) ? 0 : min(n_peerips, net->ksnn_ninterfaces); for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) { /* ^ yes really... */ - /* If we have any new interfaces, first tick off all the + /* + * If we have any new interfaces, first tick off all the * peer IPs that match old interfaces, then choose new * interfaces to match the remaining peer IPS. * We don't forget interfaces we've stopped using; we might - * start using them again... */ - + * start using them again... + */ if (i < peer->ksnp_n_passive_ips) { /* Old interface. */ ip = peer->ksnp_passive_ips[i]; best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip); + /* peer passive ips are kept up to date */ + LASSERT(best_iface); } else { /* choose a new interface */ LASSERT(i == peer->ksnp_n_passive_ips); @@ -810,9 +827,9 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) k = ksocknal_match_peerip(iface, peerips, n_peerips); xor = ip ^ peerips[k]; - this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0; + this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0; - if (!(best_iface == NULL || + if (!(!best_iface || best_netmatch < this_netmatch || (best_netmatch == this_netmatch && best_npeers > iface->ksni_npeers))) @@ -823,10 +840,12 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) best_npeers = iface->ksni_npeers; } + LASSERT(best_iface); + best_iface->ksni_npeers++; ip = best_iface->ksni_ipaddr; peer->ksnp_passive_ips[i] = ip; - peer->ksnp_n_passive_ips = i+1; + peer->ksnp_n_passive_ips = i + 1; } /* mark the best matching peer IP used */ @@ -860,16 +879,19 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, int i; int j; - /* CAVEAT EMPTOR: We do all our interface matching with an + /* + * CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only * expecting to be dealing with small numbers of interfaces, so the - * O(n**3)-ness here shouldn't matter */ - + * O(n**3)-ness here shouldn't matter + */ write_lock_bh(global_lock); if (net->ksnn_ninterfaces < 2) { - /* Only create additional connections - * if I have > 1 interface */ + /* + * Only create additional connections + * if I have > 1 interface + */ write_unlock_bh(global_lock); return; } @@ -877,13 +899,13 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES); for (i = 0; i < npeer_ipaddrs; i++) { - if (newroute != NULL) { + if (newroute) { newroute->ksnr_ipaddr = peer_ipaddrs[i]; } else { write_unlock_bh(global_lock); newroute = ksocknal_create_route(peer_ipaddrs[i], port); - if (newroute == NULL) + if (!newroute) return; write_lock_bh(global_lock); @@ -904,7 +926,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, route = NULL; } - if (route != NULL) + if (route) continue; best_iface = NULL; @@ -920,21 +942,21 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, /* Using this interface already? */ list_for_each(rtmp, &peer->ksnp_routes) { route = list_entry(rtmp, ksock_route_t, - ksnr_list); + ksnr_list); if (route->ksnr_myipaddr == iface->ksni_ipaddr) break; route = NULL; } - if (route != NULL) + if (route) continue; - this_netmatch = (((iface->ksni_ipaddr ^ + this_netmatch = (!((iface->ksni_ipaddr ^ newroute->ksnr_ipaddr) & - iface->ksni_netmask) == 0) ? 1 : 0; + iface->ksni_netmask)) ? 1 : 0; - if (!(best_iface == NULL || + if (!(!best_iface || best_netmatch < this_netmatch || (best_netmatch == this_netmatch && best_nroutes > iface->ksni_nroutes))) @@ -945,7 +967,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, best_nroutes = iface->ksni_nroutes; } - if (best_iface == NULL) + if (!best_iface) continue; newroute->ksnr_myipaddr = best_iface->ksni_ipaddr; @@ -956,7 +978,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, } write_unlock_bh(global_lock); - if (newroute != NULL) + if (newroute) ksocknal_route_decref(newroute); } @@ -969,10 +991,10 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock) int peer_port; rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT(rc == 0); /* we succeeded before */ + LASSERT(!rc); /* we succeeded before */ LIBCFS_ALLOC(cr, sizeof(*cr)); - if (cr == NULL) { + if (!cr) { LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n", &peer_ip); return -ENOMEM; @@ -997,7 +1019,6 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) ksock_route_t *route; list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { - if (route->ksnr_ipaddr == ipaddr) return route->ksnr_connecting; } @@ -1006,7 +1027,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, - struct socket *sock, int type) + struct socket *sock, int type) { rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; LIST_HEAD(zombies); @@ -1026,12 +1047,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, int active; char *warn = NULL; - active = (route != NULL); + active = !!route; LASSERT(active == (type != SOCKLND_CONN_NONE)); LIBCFS_ALLOC(conn, sizeof(*conn)); - if (conn == NULL) { + if (!conn) { rc = -ENOMEM; goto failed_0; } @@ -1039,8 +1060,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_peer = NULL; conn->ksnc_route = NULL; conn->ksnc_sock = sock; - /* 2 ref, 1 for conn, another extra ref prevents socket - * being closed before establishment of connection */ + /* + * 2 ref, 1 for conn, another extra ref prevents socket + * being closed before establishment of connection + */ atomic_set(&conn->ksnc_sock_refcount, 2); conn->ksnc_type = type; ksocknal_lib_save_callback(sock, conn); @@ -1057,21 +1080,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); - if (hello == NULL) { + if (!hello) { rc = -ENOMEM; goto failed_1; } /* stash conn's local and remote addrs */ rc = ksocknal_lib_get_conn_addrs(conn); - if (rc != 0) + if (rc) goto failed_1; - /* Find out/confirm peer's NID and connection type and get the + /* + * Find out/confirm peer's NID and connection type and get the * vector of interfaces she's willing to let me connect to. * Passive connections use the listener timeout since the peer sends - * eagerly */ - + * eagerly + */ if (active) { peer = route->ksnr_peer; LASSERT(ni == peer->ksnp_ni); @@ -1084,7 +1108,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn->ksnc_proto = peer->ksnp_proto; write_unlock_bh(global_lock); - if (conn->ksnc_proto == NULL) { + if (!conn->ksnc_proto) { conn->ksnc_proto = &ksocknal_protocol_v3x; #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol == 2) @@ -1095,7 +1119,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, } rc = ksocknal_send_hello(ni, conn, peerid.nid, hello); - if (rc != 0) + if (rc) goto failed_1; } else { peerid.nid = LNET_NID_ANY; @@ -1109,8 +1133,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, if (rc < 0) goto failed_1; - LASSERT(rc == 0 || active); - LASSERT(conn->ksnc_proto != NULL); + LASSERT(!rc || active); + LASSERT(conn->ksnc_proto); LASSERT(peerid.nid != LNET_NID_ANY); cpt = lnet_cpt_of_nid(peerid.nid); @@ -1120,20 +1144,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_lock_bh(global_lock); } else { rc = ksocknal_create_peer(&peer, ni, peerid); - if (rc != 0) + if (rc) goto failed_1; write_lock_bh(global_lock); /* called with a ref on ni, so shutdown can't have started */ - LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0); + LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown); peer2 = ksocknal_find_peer_locked(ni, peerid); - if (peer2 == NULL) { - /* NB this puts an "empty" peer in the peer - * table (which takes my ref) */ + if (!peer2) { + /* + * NB this puts an "empty" peer in the peer + * table (which takes my ref) + */ list_add_tail(&peer->ksnp_list, - ksocknal_nid2peerlist(peerid.nid)); + ksocknal_nid2peerlist(peerid.nid)); } else { ksocknal_peer_decref(peer); peer = peer2; @@ -1143,8 +1169,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, ksocknal_peer_addref(peer); peer->ksnp_accepting++; - /* Am I already connecting to this guy? Resolve in - * favour of higher NID... */ + /* + * Am I already connecting to this guy? Resolve in + * favour of higher NID... + */ if (peerid.nid < ni->ni_nid && ksocknal_connecting(peer, conn->ksnc_ipaddr)) { rc = EALREADY; @@ -1161,8 +1189,9 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, goto failed_2; } - if (peer->ksnp_proto == NULL) { - /* Never connected before. + if (!peer->ksnp_proto) { + /* + * Never connected before. * NB recv_hello may have returned EPROTO to signal my peer * wants a different protocol than the one I asked for. */ @@ -1198,8 +1227,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, goto failed_2; } - /* Refuse to duplicate an existing connection, unless this is a - * loopback connection */ + /* + * Refuse to duplicate an existing connection, unless this is a + * loopback connection + */ if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) { list_for_each(tmp, &peer->ksnp_conns) { conn2 = list_entry(tmp, ksock_conn_t, ksnc_list); @@ -1209,9 +1240,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, conn2->ksnc_type != conn->ksnc_type) continue; - /* Reply on a passive connection attempt so the peer - * realises we're connected. */ - LASSERT(rc == 0); + /* + * Reply on a passive connection attempt so the peer + * realises we're connected. + */ + LASSERT(!rc); if (!active) rc = EALREADY; @@ -1220,9 +1253,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, } } - /* If the connection created by this route didn't bind to the IP + /* + * If the connection created by this route didn't bind to the IP * address the route connected to, the connection/route matching - * code below probably isn't going to work. */ + * code below probably isn't going to work. + */ if (active && route->ksnr_ipaddr != conn->ksnc_ipaddr) { CERROR("Route %s %pI4h connected to %pI4h\n", @@ -1231,10 +1266,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, &conn->ksnc_ipaddr); } - /* Search for a route corresponding to the new connection and + /* + * Search for a route corresponding to the new connection and * create an association. This allows incoming connections created * by routes in my peer to match my own route entries so I don't - * continually create duplicate routes. */ + * continually create duplicate routes. + */ list_for_each(tmp, &peer->ksnp_routes) { route = list_entry(tmp, ksock_route_t, ksnr_list); @@ -1278,14 +1315,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_unlock_bh(global_lock); - /* We've now got a new connection. Any errors from here on are just + /* + * We've now got a new connection. Any errors from here on are just * like "normal" comms errors and we close the connection normally. * NB (a) we still have to send the reply HELLO for passive * connections, * (b) normal I/O on the conn is blocked until I setup and call the * socket callbacks. */ - CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n", libcfs_id2str(peerid), conn->ksnc_proto->pro_version, &conn->ksnc_myipaddr, &conn->ksnc_ipaddr, @@ -1305,12 +1342,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); - /* setup the socket AFTER I've received hello (it disables + /* + * setup the socket AFTER I've received hello (it disables * SO_LINGER). I might call back to the acceptor who may want * to send a protocol version response and then close the * socket; this ensures the socket only tears down after the - * response has been sent. */ - if (rc == 0) + * response has been sent. + */ + if (!rc) rc = ksocknal_lib_setup_sock(sock); write_lock_bh(global_lock); @@ -1323,14 +1362,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_unlock_bh(global_lock); - if (rc != 0) { + if (rc) { write_lock_bh(global_lock); if (!conn->ksnc_closing) { /* could be closed by another thread */ ksocknal_close_conn_locked(conn, rc); } write_unlock_bh(global_lock); - } else if (ksocknal_connsock_addref(conn) == 0) { + } else if (!ksocknal_connsock_addref(conn)) { /* Allow I/O to proceed. */ ksocknal_read_callback(conn); ksocknal_write_callback(conn); @@ -1352,19 +1391,21 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, write_unlock_bh(global_lock); - if (warn != NULL) { + if (warn) { if (rc < 0) CERROR("Not creating conn %s type %d: %s\n", libcfs_id2str(peerid), conn->ksnc_type, warn); else CDEBUG(D_NET, "Not creating conn %s type %d: %s\n", - libcfs_id2str(peerid), conn->ksnc_type, warn); + libcfs_id2str(peerid), conn->ksnc_type, warn); } if (!active) { if (rc > 0) { - /* Request retry by replying with CONN_NONE - * ksnc_proto has been set already */ + /* + * Request retry by replying with CONN_NONE + * ksnc_proto has been set already + */ conn->ksnc_type = SOCKLND_CONN_NONE; hello->kshm_nips = 0; ksocknal_send_hello(ni, conn, peerid.nid, hello); @@ -1379,7 +1420,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, ksocknal_peer_decref(peer); failed_1: - if (hello != NULL) + if (hello) LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); @@ -1393,15 +1434,17 @@ failed_0: void ksocknal_close_conn_locked(ksock_conn_t *conn, int error) { - /* This just does the immmediate housekeeping, and queues the + /* + * This just does the immmediate housekeeping, and queues the * connection for the reaper to terminate. - * Caller holds ksnd_global_lock exclusively in irq context */ + * Caller holds ksnd_global_lock exclusively in irq context + */ ksock_peer_t *peer = conn->ksnc_peer; ksock_route_t *route; ksock_conn_t *conn2; struct list_head *tmp; - LASSERT(peer->ksnp_error == 0); + LASSERT(!peer->ksnp_error); LASSERT(!conn->ksnc_closing); conn->ksnc_closing = 1; @@ -1409,10 +1452,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) list_del(&conn->ksnc_list); route = conn->ksnc_route; - if (route != NULL) { + if (route) { /* dissociate conn from route... */ LASSERT(!route->ksnr_deleted); - LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0); + LASSERT(route->ksnr_connected & (1 << conn->ksnc_type)); conn2 = NULL; list_for_each(tmp, &peer->ksnp_conns) { @@ -1424,7 +1467,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) conn2 = NULL; } - if (conn2 == NULL) + if (!conn2) route->ksnr_connected &= ~(1 << conn->ksnc_type); conn->ksnc_route = NULL; @@ -1445,15 +1488,17 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); - /* throw them to the last connection..., - * these TXs will be send to /dev/null by scheduler */ + /* + * throw them to the last connection..., + * these TXs will be send to /dev/null by scheduler + */ list_for_each_entry(tx, &peer->ksnp_tx_queue, - tx_list) + tx_list) ksocknal_tx_prep(conn, tx); spin_lock_bh(&conn->ksnc_scheduler->kss_lock); list_splice_init(&peer->ksnp_tx_queue, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); spin_unlock_bh(&conn->ksnc_scheduler->kss_lock); } @@ -1461,8 +1506,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) peer->ksnp_error = error; /* stash last conn close reason */ if (list_empty(&peer->ksnp_routes)) { - /* I've just closed last conn belonging to a - * peer with no routes to it */ + /* + * I've just closed last conn belonging to a + * peer with no routes to it + */ ksocknal_unlink_peer_locked(peer); } } @@ -1470,7 +1517,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); list_add_tail(&conn->ksnc_list, - &ksocknal_data.ksnd_deathrow_conns); + &ksocknal_data.ksnd_deathrow_conns); wake_up(&ksocknal_data.ksnd_reaper_waitq); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -1482,16 +1529,17 @@ ksocknal_peer_failed(ksock_peer_t *peer) int notify = 0; unsigned long last_alive = 0; - /* There has been a connection failure or comms error; but I'll only + /* + * There has been a connection failure or comms error; but I'll only * tell LNET I think the peer is dead if it's to another kernel and - * there are no connections or connection attempts in existence. */ - + * there are no connections or connection attempts in existence. + */ read_lock(&ksocknal_data.ksnd_global_lock); - if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 && + if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) && list_empty(&peer->ksnp_conns) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + !peer->ksnp_accepting && + !ksocknal_find_connecting_route_locked(peer)) { notify = 1; last_alive = peer->ksnp_last_alive; } @@ -1500,7 +1548,7 @@ ksocknal_peer_failed(ksock_peer_t *peer) if (notify) lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0, - last_alive); + last_alive); } void @@ -1508,12 +1556,15 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) { ksock_peer_t *peer = conn->ksnc_peer; ksock_tx_t *tx; + ksock_tx_t *temp; ksock_tx_t *tmp; LIST_HEAD(zlist); - /* NB safe to finalize TXs because closing of socket will - * abort all buffered data */ - LASSERT(conn->ksnc_sock == NULL); + /* + * NB safe to finalize TXs because closing of socket will + * abort all buffered data + */ + LASSERT(!conn->ksnc_sock); spin_lock(&peer->ksnp_lock); @@ -1521,7 +1572,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) if (tx->tx_conn != conn) continue; - LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0); + LASSERT(tx->tx_msg.ksm_zc_cookies[0]); tx->tx_msg.ksm_zc_cookies[0] = 0; tx->tx_zc_aborted = 1; /* mark it as not-acked */ @@ -1531,9 +1582,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) spin_unlock(&peer->ksnp_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list); - + list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) { list_del(&tx->tx_zc_list); ksocknal_tx_decref(tx); } @@ -1542,10 +1591,12 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn) void ksocknal_terminate_conn(ksock_conn_t *conn) { - /* This gets called by the reaper (guaranteed thread context) to + /* + * This gets called by the reaper (guaranteed thread context) to * disengage the socket from its callbacks and close it. * ksnc_refcount will eventually hit zero, and then the reaper will - * destroy it. */ + * destroy it. + */ ksock_peer_t *peer = conn->ksnc_peer; ksock_sched_t *sched = conn->ksnc_scheduler; int failed = 0; @@ -1561,7 +1612,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn) if (!conn->ksnc_tx_scheduled && !list_empty(&conn->ksnc_tx_queue)) { list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); + &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; /* extra ref for scheduler */ ksocknal_conn_addref(conn); @@ -1576,11 +1627,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn) ksocknal_lib_reset_callback(conn->ksnc_sock, conn); - /* OK, so this conn may not be completely disengaged from its - * scheduler yet, but it _has_ committed to terminate... */ + /* + * OK, so this conn may not be completely disengaged from its + * scheduler yet, but it _has_ committed to terminate... + */ conn->ksnc_scheduler->kss_nconns--; - if (peer->ksnp_error != 0) { + if (peer->ksnp_error) { /* peer's last conn closed in error */ LASSERT(list_empty(&peer->ksnp_conns)); failed = 1; @@ -1592,11 +1645,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn) if (failed) ksocknal_peer_failed(peer); - /* The socket is closed on the final put; either here, or in + /* + * The socket is closed on the final put; either here, or in * ksocknal_{send,recv}msg(). Since we set up the linger2 option * when the connection was established, this will close the socket * immediately, aborting anything buffered in it. Any hung - * zero-copy transmits will therefore complete in finite time. */ + * zero-copy transmits will therefore complete in finite time. + */ ksocknal_connsock_decref(conn); } @@ -1605,7 +1660,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn) { /* Queue the conn for the reaper to destroy */ - LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0); + LASSERT(!atomic_read(&conn->ksnc_conn_refcount)); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns); @@ -1622,10 +1677,10 @@ ksocknal_destroy_conn(ksock_conn_t *conn) /* Final coup-de-grace of the reaper */ CDEBUG(D_NET, "connection %p\n", conn); - LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0); - LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0); - LASSERT(conn->ksnc_sock == NULL); - LASSERT(conn->ksnc_route == NULL); + LASSERT(!atomic_read(&conn->ksnc_conn_refcount)); + LASSERT(!atomic_read(&conn->ksnc_sock_refcount)); + LASSERT(!conn->ksnc_sock); + LASSERT(!conn->ksnc_route); LASSERT(!conn->ksnc_tx_scheduled); LASSERT(!conn->ksnc_rx_scheduled); LASSERT(list_empty(&conn->ksnc_tx_queue)); @@ -1642,7 +1697,7 @@ ksocknal_destroy_conn(ksock_conn_t *conn) cfs_duration_sec(cfs_time_sub(cfs_time_current(), last_rcv))); lnet_finalize(conn->ksnc_peer->ksnp_ni, - conn->ksnc_cookie, -EIO); + conn->ksnc_cookie, -EIO); break; case SOCKNAL_RX_LNET_HEADER: if (conn->ksnc_rx_started) @@ -1685,8 +1740,7 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); - if (ipaddr == 0 || - conn->ksnc_ipaddr == ipaddr) { + if (!ipaddr || conn->ksnc_ipaddr == ipaddr) { count++; ksocknal_close_conn_locked(conn, why); } @@ -1724,17 +1778,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) write_lock_bh(&ksocknal_data.ksnd_global_lock); - if (id.nid != LNET_NID_ANY) - lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); - else { + if (id.nid != LNET_NID_ANY) { + lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers); + } else { lo = 0; hi = ksocknal_data.ksnd_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, - &ksocknal_data.ksnd_peers[i]) { - + &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, ksnp_list); if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) && @@ -1748,10 +1802,10 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) write_unlock_bh(&ksocknal_data.ksnd_global_lock); /* wildcards always succeed */ - if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0) + if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr) return 0; - if (count == 0) + if (!count) return -ENOENT; else return 0; @@ -1760,15 +1814,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) { - /* The router is telling me she's been notified of a change in - * gateway state.... */ + /* + * The router is telling me she's been notified of a change in + * gateway state.... + */ lnet_process_id_t id = {0}; id.nid = gw_nid; id.pid = LNET_PID_ANY; CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid), - alive ? "up" : "down"); + alive ? "up" : "down"); if (!alive) { /* If the gateway crashed, close all open connections... */ @@ -1776,8 +1832,10 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) return; } - /* ...otherwise do nothing. We can only establish new connections - * if we have autroutes, and these connect on demand. */ + /* + * ...otherwise do nothing. We can only establish new connections + * if we have autroutes, and these connect on demand. + */ } void @@ -1788,12 +1846,15 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) unsigned long now = cfs_time_current(); ksock_peer_t *peer = NULL; rwlock_t *glock = &ksocknal_data.ksnd_global_lock; - lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID}; + lnet_process_id_t id = { + .nid = nid, + .pid = LNET_PID_LUSTRE, + }; read_lock(glock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { + if (peer) { struct list_head *tmp; ksock_conn_t *conn; int bufnob; @@ -1812,13 +1873,13 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) } last_alive = peer->ksnp_last_alive; - if (ksocknal_find_connectable_route_locked(peer) == NULL) + if (!ksocknal_find_connectable_route_locked(peer)) connect = 0; } read_unlock(glock); - if (last_alive != 0) + if (last_alive) *when = last_alive; CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n", @@ -1834,7 +1895,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) write_lock_bh(glock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) + if (peer) ksocknal_launch_all_connections_locked(peer); write_unlock_bh(glock); @@ -1857,7 +1918,7 @@ ksocknal_push_peer(ksock_peer_t *peer) list_for_each(tmp, &peer->ksnp_conns) { if (i++ == index) { conn = list_entry(tmp, ksock_conn_t, - ksnc_list); + ksnc_list); ksocknal_conn_addref(conn); break; } @@ -1865,7 +1926,7 @@ ksocknal_push_peer(ksock_peer_t *peer) read_unlock(&ksocknal_data.ksnd_global_lock); - if (conn == NULL) + if (!conn) break; ksocknal_lib_push_conn(conn); @@ -1885,7 +1946,8 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) start = &ksocknal_data.ksnd_peers[0]; end = &ksocknal_data.ksnd_peers[hsize - 1]; } else { - start = end = ksocknal_nid2peerlist(id.nid); + start = ksocknal_nid2peerlist(id.nid); + end = ksocknal_nid2peerlist(id.nid); } for (tmp = start; tmp <= end; tmp++) { @@ -1910,7 +1972,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) } read_unlock(&ksocknal_data.ksnd_global_lock); - if (i == 0) /* no match */ + if (!i) /* no match */ break; rc = 0; @@ -1934,14 +1996,13 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) struct list_head *rtmp; ksock_route_t *route; - if (ipaddress == 0 || - netmask == 0) + if (!ipaddress || !netmask) return -EINVAL; write_lock_bh(&ksocknal_data.ksnd_global_lock); iface = ksocknal_ip2iface(ni, ipaddress); - if (iface != NULL) { + if (iface) { /* silently ignore dups */ rc = 0; } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) { @@ -1957,16 +2018,15 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) { peer = list_entry(ptmp, ksock_peer_t, - ksnp_list); + ksnp_list); for (j = 0; j < peer->ksnp_n_passive_ips; j++) if (peer->ksnp_passive_ips[j] == ipaddress) iface->ksni_npeers++; list_for_each(rtmp, &peer->ksnp_routes) { - route = list_entry(rtmp, - ksock_route_t, - ksnr_list); + route = list_entry(rtmp, ksock_route_t, + ksnr_list); if (route->ksnr_myipaddr == ipaddress) iface->ksni_nroutes++; @@ -1995,8 +2055,8 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) for (i = 0; i < peer->ksnp_n_passive_ips; i++) if (peer->ksnp_passive_ips[i] == ipaddr) { - for (j = i+1; j < peer->ksnp_n_passive_ips; j++) - peer->ksnp_passive_ips[j-1] = + for (j = i + 1; j < peer->ksnp_n_passive_ips; j++) + peer->ksnp_passive_ips[j - 1] = peer->ksnp_passive_ips[j]; peer->ksnp_n_passive_ips--; break; @@ -2008,7 +2068,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) if (route->ksnr_myipaddr != ipaddr) continue; - if (route->ksnr_share_count != 0) { + if (route->ksnr_share_count) { /* Manually created; keep, but unbind */ route->ksnr_myipaddr = 0; } else { @@ -2041,23 +2101,21 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) for (i = 0; i < net->ksnn_ninterfaces; i++) { this_ip = net->ksnn_interfaces[i].ksni_ipaddr; - if (!(ipaddress == 0 || - ipaddress == this_ip)) + if (!(!ipaddress || ipaddress == this_ip)) continue; rc = 0; - for (j = i+1; j < net->ksnn_ninterfaces; j++) - net->ksnn_interfaces[j-1] = + for (j = i + 1; j < net->ksnn_ninterfaces; j++) + net->ksnn_interfaces[j - 1] = net->ksnn_interfaces[j]; net->ksnn_ninterfaces--; for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) { list_for_each_safe(tmp, nxt, - &ksocknal_data.ksnd_peers[j]) { - peer = list_entry(tmp, ksock_peer_t, - ksnp_list); + &ksocknal_data.ksnd_peers[j]) { + peer = list_entry(tmp, ksock_peer_t, ksnp_list); if (peer->ksnp_ni != ni) continue; @@ -2121,7 +2179,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) rc = ksocknal_get_peer_info(ni, data->ioc_count, &id, &myip, &ip, &port, &conn_count, &share_count); - if (rc != 0) + if (rc) return rc; data->ioc_nid = id.nid; @@ -2136,7 +2194,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) case IOC_LIBCFS_ADD_PEER: id.nid = data->ioc_nid; - id.pid = LUSTRE_SRV_LNET_PID; + id.pid = LNET_PID_LUSTRE; return ksocknal_add_peer(ni, id, data->ioc_u32[0], /* IP */ data->ioc_u32[1]); /* port */ @@ -2153,7 +2211,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) int nagle; ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); - if (conn == NULL) + if (!conn) return -ENOENT; ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle); @@ -2202,14 +2260,14 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) static void ksocknal_free_buffers(void) { - LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0); + LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs)); - if (ksocknal_data.ksnd_sched_info != NULL) { + if (ksocknal_data.ksnd_sched_info) { struct ksock_sched_info *info; int i; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds != NULL) { + if (info->ksi_scheds) { LIBCFS_FREE(info->ksi_scheds, info->ksi_nthreads_max * sizeof(info->ksi_scheds[0])); @@ -2219,21 +2277,21 @@ ksocknal_free_buffers(void) } LIBCFS_FREE(ksocknal_data.ksnd_peers, - sizeof(struct list_head) * - ksocknal_data.ksnd_peer_hash_size); + sizeof(struct list_head) * + ksocknal_data.ksnd_peer_hash_size); spin_lock(&ksocknal_data.ksnd_tx_lock); if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { struct list_head zlist; ksock_tx_t *tx; + ksock_tx_t *temp; list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs); list_del_init(&ksocknal_data.ksnd_idle_noop_txs); spin_unlock(&ksocknal_data.ksnd_tx_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, ksock_tx_t, tx_list); + list_for_each_entry_safe(tx, temp, &zlist, tx_list) { list_del(&tx->tx_list); LIBCFS_FREE(tx, tx->tx_desc_size); } @@ -2250,7 +2308,7 @@ ksocknal_base_shutdown(void) int i; int j; - LASSERT(ksocknal_data.ksnd_nnets == 0); + LASSERT(!ksocknal_data.ksnd_nnets); switch (ksocknal_data.ksnd_init) { default: @@ -2258,7 +2316,7 @@ ksocknal_base_shutdown(void) case SOCKNAL_INIT_ALL: case SOCKNAL_INIT_DATA: - LASSERT(ksocknal_data.ksnd_peers != NULL); + LASSERT(ksocknal_data.ksnd_peers); for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) LASSERT(list_empty(&ksocknal_data.ksnd_peers[i])); @@ -2268,14 +2326,13 @@ ksocknal_base_shutdown(void) LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs)); LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes)); - if (ksocknal_data.ksnd_sched_info != NULL) { + if (ksocknal_data.ksnd_sched_info) { cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds == NULL) + if (!info->ksi_scheds) continue; for (j = 0; j < info->ksi_nthreads_max; j++) { - sched = &info->ksi_scheds[j]; LASSERT(list_empty( &sched->kss_tx_conns)); @@ -2283,7 +2340,7 @@ ksocknal_base_shutdown(void) &sched->kss_rx_conns)); LASSERT(list_empty( &sched->kss_zombie_noop_txs)); - LASSERT(sched->kss_nconns == 0); + LASSERT(!sched->kss_nconns); } } } @@ -2293,10 +2350,10 @@ ksocknal_base_shutdown(void) wake_up_all(&ksocknal_data.ksnd_connd_waitq); wake_up_all(&ksocknal_data.ksnd_reaper_waitq); - if (ksocknal_data.ksnd_sched_info != NULL) { + if (ksocknal_data.ksnd_sched_info) { cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - if (info->ksi_scheds == NULL) + if (!info->ksi_scheds) continue; for (j = 0; j < info->ksi_nthreads_max; j++) { @@ -2308,7 +2365,7 @@ ksocknal_base_shutdown(void) i = 4; read_lock(&ksocknal_data.ksnd_global_lock); - while (ksocknal_data.ksnd_nthreads != 0) { + while (ksocknal_data.ksnd_nthreads) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "waiting for %d threads to terminate\n", @@ -2332,7 +2389,6 @@ ksocknal_base_shutdown(void) static __u64 ksocknal_new_incarnation(void) { - /* The incarnation number is the time this module loaded and it * identifies this particular instance of the socknal. */ @@ -2347,15 +2403,15 @@ ksocknal_base_startup(void) int i; LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); - LASSERT(ksocknal_data.ksnd_nnets == 0); + LASSERT(!ksocknal_data.ksnd_nnets); memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */ ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE; LIBCFS_ALLOC(ksocknal_data.ksnd_peers, - sizeof(struct list_head) * - ksocknal_data.ksnd_peer_hash_size); - if (ksocknal_data.ksnd_peers == NULL) + sizeof(struct list_head) * + ksocknal_data.ksnd_peer_hash_size); + if (!ksocknal_data.ksnd_peers) return -ENOMEM; for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) @@ -2386,7 +2442,7 @@ ksocknal_base_startup(void) ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*info)); - if (ksocknal_data.ksnd_sched_info == NULL) + if (!ksocknal_data.ksnd_sched_info) goto failed; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { @@ -2397,8 +2453,10 @@ ksocknal_base_startup(void) if (*ksocknal_tunables.ksnd_nscheds > 0) { nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds); } else { - /* max to half of CPUs, assume another half should be - * reserved for upper layer modules */ + /* + * max to half of CPUs, assume another half should be + * reserved for upper layer modules + */ nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs); } @@ -2407,7 +2465,7 @@ ksocknal_base_startup(void) LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i, info->ksi_nthreads_max * sizeof(*sched)); - if (info->ksi_scheds == NULL) + if (!info->ksi_scheds) goto failed; for (; nthrs > 0; nthrs--) { @@ -2425,8 +2483,10 @@ ksocknal_base_startup(void) ksocknal_data.ksnd_connd_starting = 0; ksocknal_data.ksnd_connd_failed_stamp = 0; ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds(); - /* must have at least 2 connds to remain responsive to accepts while - * connecting */ + /* + * must have at least 2 connds to remain responsive to accepts while + * connecting + */ if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1) *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1; @@ -2446,7 +2506,7 @@ ksocknal_base_startup(void) snprintf(name, sizeof(name), "socknal_cd%02d", i); rc = ksocknal_thread_start(ksocknal_connd, (void *)((ulong_ptr_t)i), name); - if (rc != 0) { + if (rc) { spin_lock_bh(&ksocknal_data.ksnd_connd_lock); ksocknal_data.ksnd_connd_starting--; spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); @@ -2456,7 +2516,7 @@ ksocknal_base_startup(void) } rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn socknal reaper: %d\n", rc); goto failed; } @@ -2491,7 +2551,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) } } - if (peer != NULL) { + if (peer) { ksock_route_t *route; ksock_conn_t *conn; @@ -2515,9 +2575,9 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) list_for_each(tmp, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); CWARN("Conn: ref %d, sref %d, t %d, c %d\n", - atomic_read(&conn->ksnc_conn_refcount), - atomic_read(&conn->ksnc_sock_refcount), - conn->ksnc_type, conn->ksnc_closing); + atomic_read(&conn->ksnc_conn_refcount), + atomic_read(&conn->ksnc_sock_refcount), + conn->ksnc_type, conn->ksnc_closing); } } @@ -2548,7 +2608,7 @@ ksocknal_shutdown(lnet_ni_t *ni) /* Wait for all peer state to clean up */ i = 2; spin_lock_bh(&net->ksnn_lock); - while (net->ksnn_npeers != 0) { + while (net->ksnn_npeers) { spin_unlock_bh(&net->ksnn_lock); i++; @@ -2565,15 +2625,15 @@ ksocknal_shutdown(lnet_ni_t *ni) spin_unlock_bh(&net->ksnn_lock); for (i = 0; i < net->ksnn_ninterfaces; i++) { - LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0); - LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0); + LASSERT(!net->ksnn_interfaces[i].ksni_npeers); + LASSERT(!net->ksnn_interfaces[i].ksni_nroutes); } list_del(&net->ksnn_list); LIBCFS_FREE(net, sizeof(*net)); ksocknal_data.ksnd_nnets--; - if (ksocknal_data.ksnd_nnets == 0) + if (!ksocknal_data.ksnd_nnets) ksocknal_base_shutdown(); } @@ -2601,7 +2661,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) continue; rc = lnet_ipif_query(names[i], &up, &ip, &mask); - if (rc != 0) { + if (rc) { CWARN("Can't get interface %s info: %d\n", names[i], rc); continue; @@ -2628,7 +2688,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) lnet_ipif_free_enumeration(names, n); - if (j == 0) + if (!j) CERROR("Can't find any usable interfaces\n"); return j; @@ -2647,21 +2707,20 @@ ksocknal_search_new_ipif(ksock_net_t *net) ksock_net_t *tmp; int j; - if (colon != NULL) /* ignore alias device */ + if (colon) /* ignore alias device */ *colon = 0; - list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, - ksnn_list) { + list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) { for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) { char *ifnam2 = &tmp->ksnn_interfaces[j].ksni_name[0]; char *colon2 = strchr(ifnam2, ':'); - if (colon2 != NULL) + if (colon2) *colon2 = 0; - found = strcmp(ifnam, ifnam2) == 0; - if (colon2 != NULL) + found = !strcmp(ifnam, ifnam2); + if (colon2) *colon2 = ':'; } if (found) @@ -2669,7 +2728,7 @@ ksocknal_search_new_ipif(ksock_net_t *net) } new_ipif += !found; - if (colon != NULL) + if (colon) *colon = ':'; } @@ -2683,7 +2742,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) int rc = 0; int i; - if (info->ksi_nthreads == 0) { + if (!info->ksi_nthreads) { if (*ksocknal_tunables.ksnd_nscheds > 0) { nthrs = info->ksi_nthreads_max; } else { @@ -2711,7 +2770,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) rc = ksocknal_thread_start(ksocknal_scheduler, (void *)id, name); - if (rc == 0) + if (!rc) continue; CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", @@ -2734,7 +2793,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) for (i = 0; i < ncpts; i++) { struct ksock_sched_info *info; - int cpt = (cpts == NULL) ? i : cpts[i]; + int cpt = !cpts ? i : cpts[i]; LASSERT(cpt < cfs_cpt_number(lnet_cpt_table())); info = ksocknal_data.ksnd_sched_info[cpt]; @@ -2743,7 +2802,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) continue; rc = ksocknal_start_schedulers(info); - if (rc != 0) + if (rc) return rc; } return 0; @@ -2760,12 +2819,12 @@ ksocknal_startup(lnet_ni_t *ni) if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) { rc = ksocknal_base_startup(); - if (rc != 0) + if (rc) return rc; } LIBCFS_ALLOC(net, sizeof(*net)); - if (net == NULL) + if (!net) goto fail_0; spin_lock_init(&net->ksnn_lock); @@ -2776,7 +2835,7 @@ ksocknal_startup(lnet_ni_t *ni) ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits; ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits; - if (ni->ni_interfaces[0] == NULL) { + if (!ni->ni_interfaces[0]) { rc = ksocknal_enumerate_interfaces(net); if (rc <= 0) goto fail_1; @@ -2786,14 +2845,14 @@ ksocknal_startup(lnet_ni_t *ni) for (i = 0; i < LNET_MAX_INTERFACES; i++) { int up; - if (ni->ni_interfaces[i] == NULL) + if (!ni->ni_interfaces[i]) break; rc = lnet_ipif_query(ni->ni_interfaces[i], &up, - &net->ksnn_interfaces[i].ksni_ipaddr, - &net->ksnn_interfaces[i].ksni_netmask); + &net->ksnn_interfaces[i].ksni_ipaddr, + &net->ksnn_interfaces[i].ksni_netmask); - if (rc != 0) { + if (rc) { CERROR("Can't get interface %s info: %d\n", ni->ni_interfaces[i], rc); goto fail_1; @@ -2814,7 +2873,7 @@ ksocknal_startup(lnet_ni_t *ni) /* call it before add it to ksocknal_data.ksnd_nets */ rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) + if (rc) goto fail_1; ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), @@ -2828,20 +2887,18 @@ ksocknal_startup(lnet_ni_t *ni) fail_1: LIBCFS_FREE(net, sizeof(*net)); fail_0: - if (ksocknal_data.ksnd_nnets == 0) + if (!ksocknal_data.ksnd_nnets) ksocknal_base_shutdown(); return -ENETDOWN; } -static void __exit -ksocknal_module_fini(void) +static void __exit ksocklnd_exit(void) { lnet_unregister_lnd(&the_ksocklnd); } -static int __init -ksocknal_module_init(void) +static int __init ksocklnd_init(void) { int rc; @@ -2861,7 +2918,7 @@ ksocknal_module_init(void) the_ksocklnd.lnd_accept = ksocknal_accept; rc = ksocknal_tunables_init(); - if (rc != 0) + if (rc) return rc; lnet_register_lnd(&the_ksocklnd); @@ -2870,9 +2927,9 @@ ksocknal_module_init(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0"); +MODULE_DESCRIPTION("TCP Socket LNet Network Driver"); +MODULE_VERSION("2.7.0"); MODULE_LICENSE("GPL"); -MODULE_VERSION("3.0.0"); -module_init(ksocknal_module_init); -module_exit(ksocknal_module_fini); +module_init(ksocklnd_init); +module_exit(ksocklnd_exit); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index f4fa72550..a60d72f94 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -19,10 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #ifndef _SOCKLND_SOCKLND_H_ @@ -69,8 +65,10 @@ #define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ -/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). - * no risk if we're not running on a CONFIG_HIGHMEM platform. */ +/* + * risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). + * no risk if we're not running on a CONFIG_HIGHMEM platform. + */ #ifdef CONFIG_HIGHMEM # define SOCKNAL_RISK_KMAP_DEADLOCK 0 #else @@ -237,15 +235,16 @@ typedef struct { #define SOCKNAL_INIT_DATA 1 #define SOCKNAL_INIT_ALL 2 -/* A packet just assembled for transmission is represented by 1 or more +/* + * A packet just assembled for transmission is represented by 1 or more * struct iovec fragments (the first frag contains the portals header), * followed by 0 or more lnet_kiov_t fragments. * * On the receive side, initially 1 struct iovec fragment is posted for * receive (the header). Once the header has been received, the payload is * received into either struct iovec or lnet_kiov_t fragments, depending on - * what the header matched or whether the message needs forwarding. */ - + * what the header matched or whether the message needs forwarding. + */ struct ksock_conn; /* forward ref */ struct ksock_peer; /* forward ref */ struct ksock_route; /* forward ref */ @@ -284,12 +283,14 @@ typedef struct /* transmit packet */ } tx_frags; } ksock_tx_t; -#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) +#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) /* network zero copy callback descriptor embedded in ksock_tx_t */ -/* space for the rx frag descriptors; we either read a single contiguous - * header, or up to LNET_MAX_IOV frags of payload of either type. */ +/* + * space for the rx frag descriptors; we either read a single contiguous + * header, or up to LNET_MAX_IOV frags of payload of either type. + */ typedef union { struct kvec iov[LNET_MAX_IOV]; lnet_kiov_t kiov[LNET_MAX_IOV]; @@ -463,11 +464,13 @@ typedef struct ksock_proto { /* handle ZC ACK */ int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); - /* msg type matches the connection type: + /* + * msg type matches the connection type: * return value: * return MATCH_NO : no * return MATCH_YES : matching type - * return MATCH_MAY : can be backup */ + * return MATCH_MAY : can be backup + */ int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); } ksock_proto_t; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index 477b385f1..976fd7892 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -19,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "socklnd.h" @@ -47,10 +44,10 @@ ksocknal_alloc_tx(int type, int size) spin_unlock(&ksocknal_data.ksnd_tx_lock); } - if (tx == NULL) + if (!tx) LIBCFS_ALLOC(tx, size); - if (tx == NULL) + if (!tx) return NULL; atomic_set(&tx->tx_refcount, 1); @@ -70,7 +67,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) ksock_tx_t *tx; tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate noop tx desc\n"); return NULL; } @@ -90,11 +87,11 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) } void -ksocknal_free_tx (ksock_tx_t *tx) +ksocknal_free_tx(ksock_tx_t *tx) { atomic_dec(&ksocknal_data.ksnd_nactive_txs); - if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { + if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { /* it's a noop tx */ spin_lock(&ksocknal_data.ksnd_tx_lock); @@ -107,7 +104,7 @@ ksocknal_free_tx (ksock_tx_t *tx) } static int -ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) { struct kvec *iov = tx->tx_iov; int nob; @@ -122,7 +119,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) return rc; nob = rc; - LASSERT (nob <= tx->tx_resid); + LASSERT(nob <= tx->tx_resid); tx->tx_resid -= nob; /* "consume" iov */ @@ -138,19 +135,19 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) nob -= iov->iov_len; tx->tx_iov = ++iov; tx->tx_niov--; - } while (nob != 0); + } while (nob); return rc; } static int -ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) { lnet_kiov_t *kiov = tx->tx_kiov; int nob; int rc; - LASSERT(tx->tx_niov == 0); + LASSERT(!tx->tx_niov); LASSERT(tx->tx_nkiov > 0); /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ @@ -160,7 +157,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) return rc; nob = rc; - LASSERT (nob <= tx->tx_resid); + LASSERT(nob <= tx->tx_resid); tx->tx_resid -= nob; /* "consume" kiov */ @@ -176,27 +173,27 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) nob -= (int)kiov->kiov_len; tx->tx_kiov = ++kiov; tx->tx_nkiov--; - } while (nob != 0); + } while (nob); return rc; } static int -ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx) { int rc; int bufnob; - if (ksocknal_data.ksnd_stall_tx != 0) { + if (ksocknal_data.ksnd_stall_tx) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); } - LASSERT(tx->tx_resid != 0); + LASSERT(tx->tx_resid); rc = ksocknal_connsock_addref(conn); - if (rc != 0) { - LASSERT (conn->ksnc_closing); + if (rc) { + LASSERT(conn->ksnc_closing); return -ESHUTDOWN; } @@ -205,10 +202,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) /* testing... */ ksocknal_data.ksnd_enomem_tx--; rc = -EAGAIN; - } else if (tx->tx_niov != 0) { - rc = ksocknal_send_iov (conn, tx); + } else if (tx->tx_niov) { + rc = ksocknal_send_iov(conn, tx); } else { - rc = ksocknal_send_kiov (conn, tx); + rc = ksocknal_send_kiov(conn, tx); } bufnob = conn->ksnc_sock->sk->sk_wmem_queued; @@ -216,8 +213,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) conn->ksnc_tx_bufnob += rc; /* account it */ if (bufnob < conn->ksnc_tx_bufnob) { - /* allocated send buffer bytes < computed; infer - * something got ACKed */ + /* + * allocated send buffer bytes < computed; infer + * something got ACKed + */ conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); conn->ksnc_peer->ksnp_last_alive = cfs_time_current(); @@ -227,7 +226,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) if (rc <= 0) { /* Didn't write anything? */ - if (rc == 0) /* some stacks return 0 instead of -EAGAIN */ + if (!rc) /* some stacks return 0 instead of -EAGAIN */ rc = -EAGAIN; /* Check if EAGAIN is due to memory pressure */ @@ -238,17 +237,17 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) } /* socket's wmem_queued now includes 'rc' bytes */ - atomic_sub (rc, &conn->ksnc_tx_nob); + atomic_sub(rc, &conn->ksnc_tx_nob); rc = 0; - } while (tx->tx_resid != 0); + } while (tx->tx_resid); ksocknal_connsock_decref(conn); return rc; } static int -ksocknal_recv_iov (ksock_conn_t *conn) +ksocknal_recv_iov(ksock_conn_t *conn) { struct kvec *iov = conn->ksnc_rx_iov; int nob; @@ -256,8 +255,10 @@ ksocknal_recv_iov (ksock_conn_t *conn) LASSERT(conn->ksnc_rx_niov > 0); - /* Never touch conn->ksnc_rx_iov or change connection - * status inside ksocknal_lib_recv_iov */ + /* + * Never touch conn->ksnc_rx_iov or change connection + * status inside ksocknal_lib_recv_iov + */ rc = ksocknal_lib_recv_iov(conn); if (rc <= 0) @@ -287,13 +288,13 @@ ksocknal_recv_iov (ksock_conn_t *conn) nob -= iov->iov_len; conn->ksnc_rx_iov = ++iov; conn->ksnc_rx_niov--; - } while (nob != 0); + } while (nob); return rc; } static int -ksocknal_recv_kiov (ksock_conn_t *conn) +ksocknal_recv_kiov(ksock_conn_t *conn) { lnet_kiov_t *kiov = conn->ksnc_rx_kiov; int nob; @@ -301,8 +302,10 @@ ksocknal_recv_kiov (ksock_conn_t *conn) LASSERT(conn->ksnc_rx_nkiov > 0); - /* Never touch conn->ksnc_rx_kiov or change connection - * status inside ksocknal_lib_recv_iov */ + /* + * Never touch conn->ksnc_rx_kiov or change connection + * status inside ksocknal_lib_recv_iov + */ rc = ksocknal_lib_recv_kiov(conn); if (rc <= 0) @@ -332,41 +335,43 @@ ksocknal_recv_kiov (ksock_conn_t *conn) nob -= kiov->kiov_len; conn->ksnc_rx_kiov = ++kiov; conn->ksnc_rx_nkiov--; - } while (nob != 0); + } while (nob); return 1; } static int -ksocknal_receive (ksock_conn_t *conn) +ksocknal_receive(ksock_conn_t *conn) { - /* Return 1 on success, 0 on EOF, < 0 on error. + /* + * Return 1 on success, 0 on EOF, < 0 on error. * Caller checks ksnc_rx_nob_wanted to determine - * progress/completion. */ + * progress/completion. + */ int rc; - if (ksocknal_data.ksnd_stall_rx != 0) { + if (ksocknal_data.ksnd_stall_rx) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx)); } rc = ksocknal_connsock_addref(conn); - if (rc != 0) { - LASSERT (conn->ksnc_closing); + if (rc) { + LASSERT(conn->ksnc_closing); return -ESHUTDOWN; } for (;;) { - if (conn->ksnc_rx_niov != 0) - rc = ksocknal_recv_iov (conn); + if (conn->ksnc_rx_niov) + rc = ksocknal_recv_iov(conn); else - rc = ksocknal_recv_kiov (conn); + rc = ksocknal_recv_kiov(conn); if (rc <= 0) { /* error/EOF or partial receive */ if (rc == -EAGAIN) { rc = 1; - } else if (rc == 0 && conn->ksnc_rx_started) { + } else if (!rc && conn->ksnc_rx_started) { /* EOF in the middle of a message */ rc = -EPROTO; } @@ -375,7 +380,7 @@ ksocknal_receive (ksock_conn_t *conn) /* Completed a fragment */ - if (conn->ksnc_rx_nob_wanted == 0) { + if (!conn->ksnc_rx_nob_wanted) { rc = 1; break; } @@ -386,36 +391,36 @@ ksocknal_receive (ksock_conn_t *conn) } void -ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) +ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx) { lnet_msg_t *lnetmsg = tx->tx_lnetmsg; - int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; + int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO; - LASSERT(ni != NULL || tx->tx_conn != NULL); + LASSERT(ni || tx->tx_conn); - if (tx->tx_conn != NULL) + if (tx->tx_conn) ksocknal_conn_decref(tx->tx_conn); - if (ni == NULL && tx->tx_conn != NULL) + if (!ni && tx->tx_conn) ni = tx->tx_conn->ksnc_peer->ksnp_ni; - ksocknal_free_tx (tx); - if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */ - lnet_finalize (ni, lnetmsg, rc); + ksocknal_free_tx(tx); + if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */ + lnet_finalize(ni, lnetmsg, rc); } void -ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) +ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error) { ksock_tx_t *tx; - while (!list_empty (txlist)) { + while (!list_empty(txlist)) { tx = list_entry(txlist->next, ksock_tx_t, tx_list); - if (error && tx->tx_lnetmsg != NULL) { + if (error && tx->tx_lnetmsg) { CNETERR("Deleting packet type %d len %d %s->%s\n", - le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type), - le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length), + le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type), + le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length), libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid))); } else if (error) { @@ -435,12 +440,14 @@ ksocknal_check_zc_req(ksock_tx_t *tx) ksock_conn_t *conn = tx->tx_conn; ksock_peer_t *peer = conn->ksnc_peer; - /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx + /* + * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent * zero-copy. Our peer will send an ACK containing this cookie when * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on - * ksnp_zc_req_list. */ + * ksnp_zc_req_list. + */ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); @@ -450,9 +457,10 @@ ksocknal_check_zc_req(ksock_tx_t *tx) !conn->ksnc_zc_capable) return; - /* assign cookie and queue tx to pending list, it will be released when - * a matching ack is received. See ksocknal_handle_zcack() */ - + /* + * assign cookie and queue tx to pending list, it will be released when + * a matching ack is received. See ksocknal_handle_zcack() + */ ksocknal_tx_addref(tx); spin_lock(&peer->ksnp_lock); @@ -461,11 +469,11 @@ ksocknal_check_zc_req(ksock_tx_t *tx) tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(!tx->tx_msg.ksm_zc_cookies[0]); tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; - if (peer->ksnp_zc_next_cookie == 0) + if (!peer->ksnp_zc_next_cookie) peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1; list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); @@ -485,7 +493,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) spin_lock(&peer->ksnp_lock); - if (tx->tx_msg.ksm_zc_cookies[0] == 0) { + if (!tx->tx_msg.ksm_zc_cookies[0]) { /* Not waiting for an ACK */ spin_unlock(&peer->ksnp_lock); return; @@ -500,20 +508,20 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) } static int -ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) +ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx) { int rc; if (tx->tx_zc_capable && !tx->tx_zc_checked) ksocknal_check_zc_req(tx); - rc = ksocknal_transmit (conn, tx); + rc = ksocknal_transmit(conn, tx); CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc); - if (tx->tx_resid == 0) { + if (!tx->tx_resid) { /* Sent everything OK */ - LASSERT (rc == 0); + LASSERT(!rc); return 0; } @@ -532,13 +540,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); /* enomem list takes over scheduler's ref... */ - LASSERT (conn->ksnc_tx_scheduled); + LASSERT(conn->ksnc_tx_scheduled); list_add_tail(&conn->ksnc_tx_list, - &ksocknal_data.ksnd_enomem_conns); + &ksocknal_data.ksnd_enomem_conns); if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(), SOCKNAL_ENOMEM_RETRY), ksocknal_data.ksnd_reaper_waketime)) - wake_up (&ksocknal_data.ksnd_reaper_waitq); + wake_up(&ksocknal_data.ksnd_reaper_waitq); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); return rc; @@ -569,21 +577,19 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_uncheck_zc_req(tx); /* it's not an error if conn is being closed */ - ksocknal_close_conn_and_siblings (conn, - (conn->ksnc_closing) ? 0 : rc); + ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc); return rc; } static void -ksocknal_launch_connection_locked (ksock_route_t *route) +ksocknal_launch_connection_locked(ksock_route_t *route) { - /* called holding write lock on ksnd_global_lock */ LASSERT(!route->ksnr_scheduled); LASSERT(!route->ksnr_connecting); - LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0); + LASSERT(ksocknal_route_mask() & ~route->ksnr_connected); route->ksnr_scheduled = 1; /* scheduling conn for connd */ ksocknal_route_addref(route); /* extra ref for connd */ @@ -591,14 +597,14 @@ ksocknal_launch_connection_locked (ksock_route_t *route) spin_lock_bh(&ksocknal_data.ksnd_connd_lock); list_add_tail(&route->ksnr_connd_list, - &ksocknal_data.ksnd_connd_routes); + &ksocknal_data.ksnd_connd_routes); wake_up(&ksocknal_data.ksnd_connd_waitq); spin_unlock_bh(&ksocknal_data.ksnd_connd_lock); } void -ksocknal_launch_all_connections_locked (ksock_peer_t *peer) +ksocknal_launch_all_connections_locked(ksock_peer_t *peer) { ksock_route_t *route; @@ -606,7 +612,7 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) for (;;) { /* launch any/all connections that need it */ route = ksocknal_find_connectable_route_locked(peer); - if (route == NULL) + if (!route) return; ksocknal_launch_connection_locked(route); @@ -623,15 +629,15 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) int tnob = 0; int fnob = 0; - list_for_each (tmp, &peer->ksnp_conns) { + list_for_each(tmp, &peer->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); int nob = atomic_read(&c->ksnc_tx_nob) + c->ksnc_sock->sk->sk_wmem_queued; int rc; LASSERT(!c->ksnc_closing); - LASSERT(c->ksnc_proto != NULL && - c->ksnc_proto->pro_match_tx != NULL); + LASSERT(c->ksnc_proto && + c->ksnc_proto->pro_match_tx); rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); @@ -642,7 +648,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) continue; case SOCKNAL_MATCH_YES: /* typed connection */ - if (typed == NULL || tnob > nob || + if (!typed || tnob > nob || (tnob == nob && *ksocknal_tunables.ksnd_round_robin && cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) { typed = c; @@ -651,7 +657,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) break; case SOCKNAL_MATCH_MAY: /* fallback connection */ - if (fallback == NULL || fnob > nob || + if (!fallback || fnob > nob || (fnob == nob && *ksocknal_tunables.ksnd_round_robin && cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { fallback = c; @@ -662,9 +668,9 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) } /* prefer the typed selection */ - conn = (typed != NULL) ? typed : fallback; + conn = (typed) ? typed : fallback; - if (conn != NULL) + if (conn) conn->ksnc_tx_last_post = cfs_time_current(); return conn; @@ -675,48 +681,51 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) { conn->ksnc_proto->pro_pack(tx); - atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); + atomic_add(tx->tx_nob, &conn->ksnc_tx_nob); ksocknal_conn_addref(conn); /* +1 ref for tx */ tx->tx_conn = conn; } void -ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) +ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn) { ksock_sched_t *sched = conn->ksnc_scheduler; ksock_msg_t *msg = &tx->tx_msg; ksock_tx_t *ztx = NULL; int bufnob = 0; - /* called holding global lock (read or irq-write) and caller may + /* + * called holding global lock (read or irq-write) and caller may * not have dropped this lock between finding conn and calling me, * so we don't need the {get,put}connsock dance to deref - * ksnc_sock... */ + * ksnc_sock... + */ LASSERT(!conn->ksnc_closing); CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n", - libcfs_id2str(conn->ksnc_peer->ksnp_id), - &conn->ksnc_ipaddr, - conn->ksnc_port); + libcfs_id2str(conn->ksnc_peer->ksnp_id), + &conn->ksnc_ipaddr, conn->ksnc_port); ksocknal_tx_prep(conn, tx); - /* Ensure the frags we've been given EXACTLY match the number of + /* + * Ensure the frags we've been given EXACTLY match the number of * bytes we want to send. Many TCP/IP stacks disregard any total * size parameters passed to them and just look at the frags. * * We always expect at least 1 mapped fragment containing the - * complete ksocknal message header. */ - LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) + + * complete ksocknal message header. + */ + LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) + lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == (unsigned int)tx->tx_nob); LASSERT(tx->tx_niov >= 1); LASSERT(tx->tx_resid == tx->tx_nob); - CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", - tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type : - KSOCK_MSG_NOOP, - tx->tx_nob, tx->tx_niov, tx->tx_nkiov); + CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", + tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type : + KSOCK_MSG_NOOP, + tx->tx_nob, tx->tx_niov, tx->tx_nkiov); /* * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__ @@ -725,7 +734,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) bufnob = conn->ksnc_sock->sk->sk_wmem_queued; spin_lock_bh(&sched->kss_lock); - if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) { + if (list_empty(&conn->ksnc_tx_queue) && !bufnob) { /* First packet starts the timeout */ conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); @@ -736,26 +745,30 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } if (msg->ksm_type == KSOCK_MSG_NOOP) { - /* The packet is noop ZC ACK, try to piggyback the ack_cookie - * on a normal packet so I don't need to send it */ - LASSERT(msg->ksm_zc_cookies[1] != 0); - LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); + /* + * The packet is noop ZC ACK, try to piggyback the ack_cookie + * on a normal packet so I don't need to send it + */ + LASSERT(msg->ksm_zc_cookies[1]); + LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ } else { - /* It's a normal packet - can it piggback a noop zc-ack that - * has been queued already? */ - LASSERT(msg->ksm_zc_cookies[1] == 0); - LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL); + /* + * It's a normal packet - can it piggback a noop zc-ack that + * has been queued already? + */ + LASSERT(!msg->ksm_zc_cookies[1]); + LASSERT(conn->ksnc_proto->pro_queue_tx_msg); ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); /* ztx will be released later */ } - if (ztx != NULL) { - atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob); + if (ztx) { + atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob); list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); } @@ -763,24 +776,23 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) !conn->ksnc_tx_scheduled) { /* not scheduled to send */ /* +1 ref for scheduler */ ksocknal_conn_addref(conn); - list_add_tail (&conn->ksnc_tx_list, - &sched->kss_tx_conns); + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); } ksock_route_t * -ksocknal_find_connectable_route_locked (ksock_peer_t *peer) +ksocknal_find_connectable_route_locked(ksock_peer_t *peer) { unsigned long now = cfs_time_current(); struct list_head *tmp; ksock_route_t *route; - list_for_each (tmp, &peer->ksnp_routes) { - route = list_entry (tmp, ksock_route_t, ksnr_list); + list_for_each(tmp, &peer->ksnp_routes) { + route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); @@ -788,10 +800,10 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) continue; /* all route types connected ? */ - if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0) + if (!(ksocknal_route_mask() & ~route->ksnr_connected)) continue; - if (!(route->ksnr_retry_interval == 0 || /* first attempt */ + if (!(!route->ksnr_retry_interval || /* first attempt */ cfs_time_aftereq(now, route->ksnr_timeout))) { CDEBUG(D_NET, "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n", @@ -809,13 +821,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) } ksock_route_t * -ksocknal_find_connecting_route_locked (ksock_peer_t *peer) +ksocknal_find_connecting_route_locked(ksock_peer_t *peer) { struct list_head *tmp; ksock_route_t *route; - list_for_each (tmp, &peer->ksnp_routes) { - route = list_entry (tmp, ksock_route_t, ksnr_list); + list_for_each(tmp, &peer->ksnp_routes) { + route = list_entry(tmp, ksock_route_t, ksnr_list); LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); @@ -827,7 +839,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) } int -ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) +ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { ksock_peer_t *peer; ksock_conn_t *conn; @@ -835,21 +847,23 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int retry; int rc; - LASSERT(tx->tx_conn == NULL); + LASSERT(!tx->tx_conn); g_lock = &ksocknal_data.ksnd_global_lock; for (retry = 0;; retry = 1) { read_lock(g_lock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) { - if (ksocknal_find_connectable_route_locked(peer) == NULL) { + if (peer) { + if (!ksocknal_find_connectable_route_locked(peer)) { conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); - if (conn != NULL) { - /* I've got no routes that need to be + if (conn) { + /* + * I've got no routes that need to be * connecting and I do have an actual - * connection... */ - ksocknal_queue_tx_locked (tx, conn); + * connection... + */ + ksocknal_queue_tx_locked(tx, conn); read_unlock(g_lock); return 0; } @@ -862,12 +876,12 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) write_lock_bh(g_lock); peer = ksocknal_find_peer_locked(ni, id); - if (peer != NULL) + if (peer) break; write_unlock_bh(g_lock); - if ((id.pid & LNET_PID_USERFLAG) != 0) { + if (id.pid & LNET_PID_USERFLAG) { CERROR("Refusing to create a connection to userspace process %s\n", libcfs_id2str(id)); return -EHOSTUNREACH; @@ -881,7 +895,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) rc = ksocknal_add_peer(ni, id, LNET_NIDADDR(id.nid), lnet_acceptor_port()); - if (rc != 0) { + if (rc) { CERROR("Can't add peer %s: %d\n", libcfs_id2str(id), rc); return rc; @@ -891,21 +905,21 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) ksocknal_launch_all_connections_locked(peer); conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); - if (conn != NULL) { + if (conn) { /* Connection exists; queue message on it */ - ksocknal_queue_tx_locked (tx, conn); + ksocknal_queue_tx_locked(tx, conn); write_unlock_bh(g_lock); return 0; } if (peer->ksnp_accepting > 0 || - ksocknal_find_connecting_route_locked (peer) != NULL) { + ksocknal_find_connecting_route_locked(peer)) { /* the message is going to be pinned to the peer */ tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); /* Queue the message until a connection is established */ - list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); + list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue); write_unlock_bh(g_lock); return 0; } @@ -932,19 +946,20 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) int desc_size; int rc; - /* NB 'private' is different depending on what we're sending. - * Just ignore it... */ - + /* + * NB 'private' is different depending on what we're sending. + * Just ignore it... + */ CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(!payload_nob || payload_niov > 0); LASSERT(payload_niov <= LNET_MAX_IOV); /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); - LASSERT (!in_interrupt ()); + LASSERT(!(payload_kiov && payload_iov)); + LASSERT(!in_interrupt()); - if (payload_iov != NULL) + if (payload_iov) desc_size = offsetof(ksock_tx_t, tx_frags.virt.iov[1 + payload_niov]); else @@ -954,7 +969,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (lntmsg->msg_vmflush) mpflag = cfs_memory_pressure_get_and_set(); tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size); - if (tx == NULL) { + if (!tx) { CERROR("Can't allocate tx desc type %d size %d\n", type, desc_size); if (lntmsg->msg_vmflush) @@ -965,7 +980,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx->tx_conn = NULL; /* set when assigned a conn */ tx->tx_lnetmsg = lntmsg; - if (payload_iov != NULL) { + if (payload_iov) { tx->tx_kiov = NULL; tx->tx_nkiov = 0; tx->tx_iov = tx->tx_frags.virt.iov; @@ -992,7 +1007,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (!mpflag) cfs_memory_pressure_restore(mpflag); - if (rc == 0) + if (!rc) return 0; ksocknal_free_tx(tx); @@ -1014,7 +1029,7 @@ ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name) } void -ksocknal_thread_fini (void) +ksocknal_thread_fini(void) { write_lock_bh(&ksocknal_data.ksnd_global_lock); ksocknal_data.ksnd_nthreads--; @@ -1022,7 +1037,7 @@ ksocknal_thread_fini (void) } int -ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) +ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip) { static char ksocknal_slop_buffer[4096]; @@ -1030,14 +1045,14 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) unsigned int niov; int skipped; - LASSERT(conn->ksnc_proto != NULL); + LASSERT(conn->ksnc_proto); - if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) { + if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) { /* Remind the socket to ack eagerly... */ ksocknal_lib_eager_ack(conn); } - if (nob_to_skip == 0) { /* right at next packet boundary now */ + if (!nob_to_skip) { /* right at next packet boundary now */ conn->ksnc_rx_started = 0; mb(); /* racing with timeout thread */ @@ -1061,11 +1076,11 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); + conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t); break; default: - LBUG (); + LBUG(); } conn->ksnc_rx_niov = 1; @@ -1075,9 +1090,10 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) return 1; } - /* Set up to skip as much as possible now. If there's more left - * (ran out of iov entries) we'll get called again */ - + /* + * Set up to skip as much as possible now. If there's more left + * (ran out of iov entries) we'll get called again + */ conn->ksnc_rx_state = SOCKNAL_RX_SLOP; conn->ksnc_rx_nob_left = nob_to_skip; conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; @@ -1093,8 +1109,8 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) skipped += nob; nob_to_skip -= nob; - } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */ - niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec)); + } while (nob_to_skip && /* mustn't overflow conn's rx iov */ + niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec)); conn->ksnc_rx_niov = niov; conn->ksnc_rx_kiov = NULL; @@ -1104,13 +1120,13 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) } static int -ksocknal_process_receive (ksock_conn_t *conn) +ksocknal_process_receive(ksock_conn_t *conn) { lnet_hdr_t *lhdr; lnet_process_id_t *id; int rc; - LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); + LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0); /* NB: sched lock NOT held */ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ @@ -1119,13 +1135,13 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || conn->ksnc_rx_state == SOCKNAL_RX_SLOP); again: - if (conn->ksnc_rx_nob_wanted != 0) { + if (conn->ksnc_rx_nob_wanted) { rc = ksocknal_receive(conn); if (rc <= 0) { - LASSERT (rc != -EAGAIN); + LASSERT(rc != -EAGAIN); - if (rc == 0) + if (!rc) CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n", conn, libcfs_id2str(conn->ksnc_peer->ksnp_id), @@ -1139,12 +1155,12 @@ ksocknal_process_receive (ksock_conn_t *conn) conn->ksnc_port); /* it's not an error if conn is being closed */ - ksocknal_close_conn_and_siblings (conn, - (conn->ksnc_closing) ? 0 : rc); - return (rc == 0 ? -ESHUTDOWN : rc); + ksocknal_close_conn_and_siblings(conn, + (conn->ksnc_closing) ? 0 : rc); + return (!rc ? -ESHUTDOWN : rc); } - if (conn->ksnc_rx_nob_wanted != 0) { + if (conn->ksnc_rx_nob_wanted) { /* short read */ return -EAGAIN; } @@ -1169,7 +1185,7 @@ ksocknal_process_receive (ksock_conn_t *conn) } if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP && - conn->ksnc_msg.ksm_csum != 0 && /* has checksum */ + conn->ksnc_msg.ksm_csum && /* has checksum */ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { /* NOOP Checksum error */ CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", @@ -1180,10 +1196,10 @@ ksocknal_process_receive (ksock_conn_t *conn) return -EIO; } - if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) { + if (conn->ksnc_msg.ksm_zc_cookies[1]) { __u64 cookie = 0; - LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x); + LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) cookie = conn->ksnc_msg.ksm_zc_cookies[0]; @@ -1191,7 +1207,7 @@ ksocknal_process_receive (ksock_conn_t *conn) rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie, conn->ksnc_msg.ksm_zc_cookies[1]); - if (rc != 0) { + if (rc) { CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie, conn->ksnc_msg.ksm_zc_cookies[1]); @@ -1202,7 +1218,7 @@ ksocknal_process_receive (ksock_conn_t *conn) } if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) { - ksocknal_new_packet (conn, 0); + ksocknal_new_packet(conn, 0); return 0; /* NOOP is done and just return */ } @@ -1224,7 +1240,7 @@ ksocknal_process_receive (ksock_conn_t *conn) /* unpack message header */ conn->ksnc_proto->pro_unpack(&conn->ksnc_msg); - if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { + if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) { /* Userspace peer */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; id = &conn->ksnc_peer->ksnp_id; @@ -1243,14 +1259,14 @@ ksocknal_process_receive (ksock_conn_t *conn) if (rc < 0) { /* I just received garbage: give up on this conn */ ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings (conn, rc); + ksocknal_close_conn_and_siblings(conn, rc); ksocknal_conn_decref(conn); return -EPROTO; } /* I'm racing with ksocknal_recv() */ - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD); + LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD); if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD) return 0; @@ -1262,8 +1278,8 @@ ksocknal_process_receive (ksock_conn_t *conn) /* payload all received */ rc = 0; - if (conn->ksnc_rx_nob_left == 0 && /* not truncating */ - conn->ksnc_msg.ksm_csum != 0 && /* has checksum */ + if (!conn->ksnc_rx_nob_left && /* not truncating */ + conn->ksnc_msg.ksm_csum && /* has checksum */ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) { CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n", libcfs_id2str(conn->ksnc_peer->ksnp_id), @@ -1271,7 +1287,7 @@ ksocknal_process_receive (ksock_conn_t *conn) rc = -EIO; } - if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) { + if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) { LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; @@ -1285,16 +1301,16 @@ ksocknal_process_receive (ksock_conn_t *conn) lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc); - if (rc != 0) { + if (rc) { ksocknal_new_packet(conn, 0); - ksocknal_close_conn_and_siblings (conn, rc); + ksocknal_close_conn_and_siblings(conn, rc); return -EPROTO; } /* Fall through */ case SOCKNAL_RX_SLOP: /* starting new packet? */ - if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left)) + if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left)) return 0; /* come back later */ goto again; /* try to finish reading slop now */ @@ -1308,9 +1324,9 @@ ksocknal_process_receive (ksock_conn_t *conn) } int -ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, - unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) +ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { ksock_conn_t *conn = private; ksock_sched_t *sched = conn->ksnc_scheduler; @@ -1322,7 +1338,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, conn->ksnc_rx_nob_wanted = mlen; conn->ksnc_rx_nob_left = rlen; - if (mlen == 0 || iov != NULL) { + if (!mlen || iov) { conn->ksnc_rx_nkiov = 0; conn->ksnc_rx_kiov = NULL; conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov; @@ -1349,8 +1365,8 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, switch (conn->ksnc_rx_state) { case SOCKNAL_RX_PARSE_WAIT: list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); - wake_up (&sched->kss_waitq); - LASSERT (conn->ksnc_rx_ready); + wake_up(&sched->kss_waitq); + LASSERT(conn->ksnc_rx_ready); break; case SOCKNAL_RX_PARSE: @@ -1396,7 +1412,7 @@ int ksocknal_scheduler(void *arg) cfs_block_allsigs(); rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt); - if (rc != 0) { + if (rc) { CERROR("Can't set CPT affinity to %d: %d\n", info->ksi_cpt, rc); } @@ -1408,18 +1424,20 @@ int ksocknal_scheduler(void *arg) /* Ensure I progress everything semi-fairly */ - if (!list_empty (&sched->kss_rx_conns)) { + if (!list_empty(&sched->kss_rx_conns)) { conn = list_entry(sched->kss_rx_conns.next, - ksock_conn_t, ksnc_rx_list); + ksock_conn_t, ksnc_rx_list); list_del(&conn->ksnc_rx_list); LASSERT(conn->ksnc_rx_scheduled); LASSERT(conn->ksnc_rx_ready); - /* clear rx_ready in case receive isn't complete. + /* + * clear rx_ready in case receive isn't complete. * Do it BEFORE we call process_recv, since * data_ready can set it any time after we release - * kss_lock. */ + * kss_lock. + */ conn->ksnc_rx_ready = 0; spin_unlock_bh(&sched->kss_lock); @@ -1431,18 +1449,20 @@ int ksocknal_scheduler(void *arg) LASSERT(conn->ksnc_rx_scheduled); /* Did process_receive get everything it wanted? */ - if (rc == 0) + if (!rc) conn->ksnc_rx_ready = 1; if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) { - /* Conn blocked waiting for ksocknal_recv() + /* + * Conn blocked waiting for ksocknal_recv() * I change its state (under lock) to signal - * it can be rescheduled */ + * it can be rescheduled + */ conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT; } else if (conn->ksnc_rx_ready) { /* reschedule for rx */ - list_add_tail (&conn->ksnc_rx_list, - &sched->kss_rx_conns); + list_add_tail(&conn->ksnc_rx_list, + &sched->kss_rx_conns); } else { conn->ksnc_rx_scheduled = 0; /* drop my ref */ @@ -1452,25 +1472,24 @@ int ksocknal_scheduler(void *arg) did_something = 1; } - if (!list_empty (&sched->kss_tx_conns)) { + if (!list_empty(&sched->kss_tx_conns)) { LIST_HEAD(zlist); if (!list_empty(&sched->kss_zombie_noop_txs)) { - list_add(&zlist, - &sched->kss_zombie_noop_txs); + list_add(&zlist, &sched->kss_zombie_noop_txs); list_del_init(&sched->kss_zombie_noop_txs); } conn = list_entry(sched->kss_tx_conns.next, - ksock_conn_t, ksnc_tx_list); - list_del (&conn->ksnc_tx_list); + ksock_conn_t, ksnc_tx_list); + list_del(&conn->ksnc_tx_list); LASSERT(conn->ksnc_tx_scheduled); LASSERT(conn->ksnc_tx_ready); LASSERT(!list_empty(&conn->ksnc_tx_queue)); tx = list_entry(conn->ksnc_tx_queue.next, - ksock_tx_t, tx_list); + ksock_tx_t, tx_list); if (conn->ksnc_tx_carrier == tx) ksocknal_next_tx_carrier(conn); @@ -1478,16 +1497,20 @@ int ksocknal_scheduler(void *arg) /* dequeue now so empty list => more to send */ list_del(&tx->tx_list); - /* Clear tx_ready in case send isn't complete. Do + /* + * Clear tx_ready in case send isn't complete. Do * it BEFORE we call process_transmit, since * write_space can set it any time after we release - * kss_lock. */ + * kss_lock. + */ conn->ksnc_tx_ready = 0; spin_unlock_bh(&sched->kss_lock); if (!list_empty(&zlist)) { - /* free zombie noop txs, it's fast because - * noop txs are just put in freelist */ + /* + * free zombie noop txs, it's fast because + * noop txs are just put in freelist + */ ksocknal_txlist_done(NULL, &zlist, 0); } @@ -1496,8 +1519,7 @@ int ksocknal_scheduler(void *arg) if (rc == -ENOMEM || rc == -EAGAIN) { /* Incomplete send: replace tx on HEAD of tx_queue */ spin_lock_bh(&sched->kss_lock); - list_add(&tx->tx_list, - &conn->ksnc_tx_queue); + list_add(&tx->tx_list, &conn->ksnc_tx_queue); } else { /* Complete send; tx -ref */ ksocknal_tx_decref(tx); @@ -1508,13 +1530,15 @@ int ksocknal_scheduler(void *arg) } if (rc == -ENOMEM) { - /* Do nothing; after a short timeout, this - * conn will be reposted on kss_tx_conns. */ + /* + * Do nothing; after a short timeout, this + * conn will be reposted on kss_tx_conns. + */ } else if (conn->ksnc_tx_ready && !list_empty(&conn->ksnc_tx_queue)) { /* reschedule for tx */ list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); + &sched->kss_tx_conns); } else { conn->ksnc_tx_scheduled = 0; /* drop my ref */ @@ -1533,7 +1557,7 @@ int ksocknal_scheduler(void *arg) rc = wait_event_interruptible_exclusive( sched->kss_waitq, !ksocknal_sched_cansleep(sched)); - LASSERT (rc == 0); + LASSERT(!rc); } else { cond_resched(); } @@ -1551,7 +1575,7 @@ int ksocknal_scheduler(void *arg) * Add connection to kss_rx_conns of scheduler * and wakeup the scheduler. */ -void ksocknal_read_callback (ksock_conn_t *conn) +void ksocknal_read_callback(ksock_conn_t *conn) { ksock_sched_t *sched; @@ -1562,13 +1586,12 @@ void ksocknal_read_callback (ksock_conn_t *conn) conn->ksnc_rx_ready = 1; if (!conn->ksnc_rx_scheduled) { /* not being progressed */ - list_add_tail(&conn->ksnc_rx_list, - &sched->kss_rx_conns); + list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); conn->ksnc_rx_scheduled = 1; /* extra ref for scheduler */ ksocknal_conn_addref(conn); - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); } @@ -1577,7 +1600,7 @@ void ksocknal_read_callback (ksock_conn_t *conn) * Add connection to kss_tx_conns of scheduler * and wakeup the scheduler. */ -void ksocknal_write_callback (ksock_conn_t *conn) +void ksocknal_write_callback(ksock_conn_t *conn) { ksock_sched_t *sched; @@ -1589,20 +1612,19 @@ void ksocknal_write_callback (ksock_conn_t *conn) if (!conn->ksnc_tx_scheduled && /* not being progressed */ !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */ - list_add_tail (&conn->ksnc_tx_list, - &sched->kss_tx_conns); + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); conn->ksnc_tx_scheduled = 1; /* extra ref for scheduler */ ksocknal_conn_addref(conn); - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); } static ksock_proto_t * -ksocknal_parse_proto_version (ksock_hello_msg_t *hello) +ksocknal_parse_proto_version(ksock_hello_msg_t *hello) { __u32 version = 0; @@ -1611,7 +1633,7 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC)) version = __swab32(hello->kshm_version); - if (version != 0) { + if (version) { #if SOCKNAL_VERSION_DEBUG if (*ksocknal_tunables.ksnd_protocol == 1) return NULL; @@ -1632,11 +1654,11 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; - CLASSERT(sizeof (lnet_magicversion_t) == - offsetof (ksock_hello_msg_t, kshm_src_nid)); + CLASSERT(sizeof(lnet_magicversion_t) == + offsetof(ksock_hello_msg_t, kshm_src_nid)); - if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && - hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) + if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) && + hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR)) return &ksocknal_protocol_v1x; } @@ -1644,8 +1666,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) } int -ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, - lnet_nid_t peer_nid, ksock_hello_msg_t *hello) +ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn, + lnet_nid_t peer_nid, ksock_hello_msg_t *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ ksock_net_t *net = (ksock_net_t *)ni->ni_data; @@ -1653,7 +1675,7 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES); /* rely on caller to hold a ref on socket so it wouldn't disappear */ - LASSERT(conn->ksnc_proto != NULL); + LASSERT(conn->ksnc_proto); hello->kshm_src_nid = ni->ni_nid; hello->kshm_dst_nid = peer_nid; @@ -1682,9 +1704,9 @@ ksocknal_invert_type(int type) } int -ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, - ksock_hello_msg_t *hello, lnet_process_id_t *peerid, - __u64 *incarnation) +ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn, + ksock_hello_msg_t *hello, lnet_process_id_t *peerid, + __u64 *incarnation) { /* Return < 0 fatal error * 0 success @@ -1692,7 +1714,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, * EPROTO protocol version mismatch */ struct socket *sock = conn->ksnc_sock; - int active = (conn->ksnc_proto != NULL); + int active = !!conn->ksnc_proto; int timeout; int proto_match; int rc; @@ -1705,20 +1727,20 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, timeout = active ? *ksocknal_tunables.ksnd_timeout : lnet_acceptor_timeout(); - rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); - if (rc != 0) { + rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout); + if (rc) { CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + rc, &conn->ksnc_ipaddr); + LASSERT(rc < 0); return rc; } if (hello->kshm_magic != LNET_PROTO_MAGIC && hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) && - hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) { + hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { /* Unexpected magic! */ CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n", - __cpu_to_le32 (hello->kshm_magic), + __cpu_to_le32(hello->kshm_magic), LNET_PROTO_TCP_MAGIC, &conn->ksnc_ipaddr); return -EPROTO; @@ -1726,15 +1748,15 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, rc = lnet_sock_read(sock, &hello->kshm_version, sizeof(hello->kshm_version), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0); return rc; } proto = ksocknal_parse_proto_version(hello); - if (proto == NULL) { + if (!proto) { if (!active) { /* unknown protocol from peer, tell peer my protocol */ conn->ksnc_proto = &ksocknal_protocol_v3x; @@ -1760,7 +1782,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, /* receive the rest of hello message anyway */ rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading or checking hello from from %pI4h\n", rc, &conn->ksnc_ipaddr); LASSERT(rc < 0); @@ -1792,8 +1814,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype); if (conn->ksnc_type == SOCKLND_CONN_NONE) { CERROR("Unexpected type %d from %s ip %pI4h\n", - hello->kshm_ctype, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr); + hello->kshm_ctype, libcfs_id2str(*peerid), + &conn->ksnc_ipaddr); return -EPROTO; } @@ -1816,9 +1838,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) { CERROR("Mismatched types: me %d, %s ip %pI4h %d\n", - conn->ksnc_type, libcfs_id2str(*peerid), - &conn->ksnc_ipaddr, - hello->kshm_ctype); + conn->ksnc_type, libcfs_id2str(*peerid), + &conn->ksnc_ipaddr, hello->kshm_ctype); return -EPROTO; } @@ -1826,7 +1847,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, } static int -ksocknal_connect (ksock_route_t *route) +ksocknal_connect(ksock_route_t *route) { LIST_HEAD(zombies); ksock_peer_t *peer = route->ksnr_peer; @@ -1850,10 +1871,12 @@ ksocknal_connect (ksock_route_t *route) for (;;) { wanted = ksocknal_route_mask() & ~route->ksnr_connected; - /* stop connecting if peer/route got closed under me, or - * route got connected while queued */ + /* + * stop connecting if peer/route got closed under me, or + * route got connected while queued + */ if (peer->ksnp_closing || route->ksnr_deleted || - wanted == 0) { + !wanted) { retry_later = 0; break; } @@ -1869,14 +1892,14 @@ ksocknal_connect (ksock_route_t *route) if (retry_later) /* needs reschedule */ break; - if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) { + if (wanted & (1 << SOCKLND_CONN_ANY)) { type = SOCKLND_CONN_ANY; - } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) { + } else if (wanted & (1 << SOCKLND_CONN_CONTROL)) { type = SOCKLND_CONN_CONTROL; - } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) { + } else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) { type = SOCKLND_CONN_BULK_IN; } else { - LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0); + LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT)); type = SOCKLND_CONN_BULK_OUT; } @@ -1893,7 +1916,7 @@ ksocknal_connect (ksock_route_t *route) rc = lnet_connect(&sock, peer->ksnp_id.nid, route->ksnr_myipaddr, route->ksnr_ipaddr, route->ksnr_port); - if (rc != 0) + if (rc) goto failed; rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type); @@ -1904,9 +1927,11 @@ ksocknal_connect (ksock_route_t *route) goto failed; } - /* A +ve RC means I have to retry because I lost the connection - * race or I have to renegotiate protocol version */ - retry_later = (rc != 0); + /* + * A +ve RC means I have to retry because I lost the connection + * race or I have to renegotiate protocol version + */ + retry_later = (rc); if (retry_later) CDEBUG(D_NET, "peer %s: conn race, retry later.\n", libcfs_nid2str(peer->ksnp_id.nid)); @@ -1918,17 +1943,20 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_connecting = 0; if (retry_later) { - /* re-queue for attention; this frees me up to handle - * the peer's incoming connection request */ - + /* + * re-queue for attention; this frees me up to handle + * the peer's incoming connection request + */ if (rc == EALREADY || - (rc == 0 && peer->ksnp_accepting > 0)) { - /* We want to introduce a delay before next + (!rc && peer->ksnp_accepting > 0)) { + /* + * We want to introduce a delay before next * attempt to connect if we lost conn race, * but the race is resolved quickly usually, - * so min_reconnectms should be good heuristic */ + * so min_reconnectms should be good heuristic + */ route->ksnr_retry_interval = - cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000; + cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000; route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_retry_interval); } @@ -1949,30 +1977,34 @@ ksocknal_connect (ksock_route_t *route) route->ksnr_retry_interval *= 2; route->ksnr_retry_interval = max(route->ksnr_retry_interval, - cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000); + cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000); route->ksnr_retry_interval = min(route->ksnr_retry_interval, - cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000); + cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000); - LASSERT (route->ksnr_retry_interval != 0); + LASSERT(route->ksnr_retry_interval); route->ksnr_timeout = cfs_time_add(cfs_time_current(), route->ksnr_retry_interval); if (!list_empty(&peer->ksnp_tx_queue) && - peer->ksnp_accepting == 0 && - ksocknal_find_connecting_route_locked(peer) == NULL) { + !peer->ksnp_accepting && + !ksocknal_find_connecting_route_locked(peer)) { ksock_conn_t *conn; - /* ksnp_tx_queue is queued on a conn on successful - * connection for V1.x and V2.x */ - if (!list_empty (&peer->ksnp_conns)) { + /* + * ksnp_tx_queue is queued on a conn on successful + * connection for V1.x and V2.x + */ + if (!list_empty(&peer->ksnp_conns)) { conn = list_entry(peer->ksnp_conns.next, - ksock_conn_t, ksnc_list); - LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x); + ksock_conn_t, ksnc_list); + LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x); } - /* take all the blocked packets while I've got the lock and - * complete below... */ + /* + * take all the blocked packets while I've got the lock and + * complete below... + */ list_splice_init(&peer->ksnp_tx_queue, &zombies); } @@ -2011,8 +2043,10 @@ ksocknal_connd_check_start(time64_t sec, long *timeout) if (total >= *ksocknal_tunables.ksnd_nconnds_max || total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) { - /* can't create more connd, or still have enough - * threads to handle more connecting */ + /* + * can't create more connd, or still have enough + * threads to handle more connecting + */ return 0; } @@ -2041,7 +2075,7 @@ ksocknal_connd_check_start(time64_t sec, long *timeout) rc = ksocknal_thread_start(ksocknal_connd, NULL, name); spin_lock_bh(&ksocknal_data.ksnd_connd_lock); - if (rc == 0) + if (!rc) return 1; /* we tried ... */ @@ -2093,8 +2127,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout) ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV; } -/* Go through connd_routes queue looking for a route that we can process - * right now, @timeout_p can be updated if we need to come back later */ +/* + * Go through connd_routes queue looking for a route that we can process + * right now, @timeout_p can be updated if we need to come back later + */ static ksock_route_t * ksocknal_connd_get_route_locked(signed long *timeout_p) { @@ -2104,10 +2140,9 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) now = cfs_time_current(); /* connd_routes can contain both pending and ordinary routes */ - list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes, - ksnr_connd_list) { - - if (route->ksnr_retry_interval == 0 || + list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes, + ksnr_connd_list) { + if (!route->ksnr_retry_interval || cfs_time_aftereq(now, route->ksnr_timeout)) return route; @@ -2120,7 +2155,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) } int -ksocknal_connd (void *arg) +ksocknal_connd(void *arg) { spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; ksock_connreq_t *cr; @@ -2172,15 +2207,17 @@ ksocknal_connd (void *arg) spin_lock_bh(connd_lock); } - /* Only handle an outgoing connection request if there + /* + * Only handle an outgoing connection request if there * is a thread left to handle incoming connections and - * create new connd */ + * create new connd + */ if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV < ksocknal_data.ksnd_connd_running) { route = ksocknal_connd_get_route_locked(&timeout); } - if (route != NULL) { - list_del (&route->ksnr_connd_list); + if (route) { + list_del(&route->ksnr_connd_list); ksocknal_data.ksnd_connd_connecting++; spin_unlock_bh(connd_lock); dropped_lock = 1; @@ -2231,24 +2268,26 @@ ksocknal_connd (void *arg) } static ksock_conn_t * -ksocknal_find_timed_out_conn (ksock_peer_t *peer) +ksocknal_find_timed_out_conn(ksock_peer_t *peer) { /* We're called with a shared lock on ksnd_global_lock */ ksock_conn_t *conn; struct list_head *ctmp; - list_for_each (ctmp, &peer->ksnp_conns) { + list_for_each(ctmp, &peer->ksnp_conns) { int error; - conn = list_entry (ctmp, ksock_conn_t, ksnc_list); + conn = list_entry(ctmp, ksock_conn_t, ksnc_list); /* Don't need the {get,put}connsock dance to deref ksnc_sock */ LASSERT(!conn->ksnc_closing); - /* SOCK_ERROR will reset error code of socket in - * some platform (like Darwin8.x) */ + /* + * SOCK_ERROR will reset error code of socket in + * some platform (like Darwin8.x) + */ error = conn->ksnc_sock->sk->sk_err; - if (error != 0) { + if (error) { ksocknal_conn_addref(conn); switch (error) { @@ -2292,11 +2331,13 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) } if ((!list_empty(&conn->ksnc_tx_queue) || - conn->ksnc_sock->sk->sk_wmem_queued != 0) && + conn->ksnc_sock->sk->sk_wmem_queued) && cfs_time_aftereq(cfs_time_current(), conn->ksnc_tx_deadline)) { - /* Timed out messages queued for sending or - * buffered in the socket's send buffer */ + /* + * Timed out messages queued for sending or + * buffered in the socket's send buffer + */ ksocknal_conn_addref(conn); CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n", libcfs_id2str(peer->ksnp_id), @@ -2313,20 +2354,18 @@ static inline void ksocknal_flush_stale_txs(ksock_peer_t *peer) { ksock_tx_t *tx; + ksock_tx_t *tmp; LIST_HEAD(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); - while (!list_empty (&peer->ksnp_tx_queue)) { - tx = list_entry (peer->ksnp_tx_queue.next, - ksock_tx_t, tx_list); - + list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) { if (!cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) break; - list_del (&tx->tx_list); - list_add_tail (&tx->tx_list, &stale_txs); + list_del(&tx->tx_list); + list_add_tail(&tx->tx_list, &stale_txs); } write_unlock_bh(&ksocknal_data.ksnd_global_lock); @@ -2336,6 +2375,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) static int ksocknal_send_keepalive_locked(ksock_peer_t *peer) + __must_hold(&ksocknal_data.ksnd_global_lock) { ksock_sched_t *sched; ksock_conn_t *conn; @@ -2356,12 +2396,14 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) if (time_before(cfs_time_current(), peer->ksnp_send_keepalive)) return 0; - /* retry 10 secs later, so we wouldn't put pressure - * on this peer if we failed to send keepalive this time */ + /* + * retry 10 secs later, so we wouldn't put pressure + * on this peer if we failed to send keepalive this time + */ peer->ksnp_send_keepalive = cfs_time_shift(10); conn = ksocknal_find_conn_locked(peer, NULL, 1); - if (conn != NULL) { + if (conn) { sched = conn->ksnc_scheduler; spin_lock_bh(&sched->kss_lock); @@ -2378,12 +2420,12 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) /* cookie = 1 is reserved for keepalive PING */ tx = ksocknal_alloc_tx_noop(1, 1); - if (tx == NULL) { + if (!tx) { read_lock(&ksocknal_data.ksnd_global_lock); return -ENOMEM; } - if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) { + if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) { read_lock(&ksocknal_data.ksnd_global_lock); return 1; } @@ -2395,7 +2437,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) } static void -ksocknal_check_peer_timeouts (int idx) +ksocknal_check_peer_timeouts(int idx) { struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; ksock_peer_t *peer; @@ -2403,9 +2445,11 @@ ksocknal_check_peer_timeouts (int idx) ksock_tx_t *tx; again: - /* NB. We expect to have a look at all the peers and not find any + /* + * NB. We expect to have a look at all the peers and not find any * connections to time out, so we just use a shared lock while we - * take a look... */ + * take a look... + */ read_lock(&ksocknal_data.ksnd_global_lock); list_for_each_entry(peer, peers, ksnp_list) { @@ -2413,35 +2457,37 @@ ksocknal_check_peer_timeouts (int idx) int resid = 0; int n = 0; - if (ksocknal_send_keepalive_locked(peer) != 0) { + if (ksocknal_send_keepalive_locked(peer)) { read_unlock(&ksocknal_data.ksnd_global_lock); goto again; } - conn = ksocknal_find_timed_out_conn (peer); + conn = ksocknal_find_timed_out_conn(peer); - if (conn != NULL) { + if (conn) { read_unlock(&ksocknal_data.ksnd_global_lock); - ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); - /* NB we won't find this one again, but we can't + /* + * NB we won't find this one again, but we can't * just proceed with the next peer, since we dropped - * ksnd_global_lock and it might be dead already! */ + * ksnd_global_lock and it might be dead already! + */ ksocknal_conn_decref(conn); goto again; } - /* we can't process stale txs right here because we're - * holding only shared lock */ - if (!list_empty (&peer->ksnp_tx_queue)) { - ksock_tx_t *tx = - list_entry (peer->ksnp_tx_queue.next, - ksock_tx_t, tx_list); + /* + * we can't process stale txs right here because we're + * holding only shared lock + */ + if (!list_empty(&peer->ksnp_tx_queue)) { + ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next, + ksock_tx_t, tx_list); if (cfs_time_aftereq(cfs_time_current(), tx->tx_deadline)) { - ksocknal_peer_addref(peer); read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2466,13 +2512,13 @@ ksocknal_check_peer_timeouts (int idx) n++; } - if (n == 0) { + if (!n) { spin_unlock(&peer->ksnp_lock); continue; } tx = list_entry(peer->ksnp_zc_req_list.next, - ksock_tx_t, tx_zc_list); + ksock_tx_t, tx_zc_list); deadline = tx->tx_deadline; resid = tx->tx_resid; conn = tx->tx_conn; @@ -2486,7 +2532,7 @@ ksocknal_check_peer_timeouts (int idx) cfs_duration_sec(cfs_time_current() - deadline), resid, conn->ksnc_sock->sk->sk_wmem_queued); - ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT); + ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT); ksocknal_conn_decref(conn); goto again; } @@ -2495,7 +2541,7 @@ ksocknal_check_peer_timeouts (int idx) } int -ksocknal_reaper (void *arg) +ksocknal_reaper(void *arg) { wait_queue_t wait; ksock_conn_t *conn; @@ -2515,12 +2561,10 @@ ksocknal_reaper (void *arg) spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); while (!ksocknal_data.ksnd_shuttingdown) { - - if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) { - conn = list_entry (ksocknal_data. \ - ksnd_deathrow_conns.next, - ksock_conn_t, ksnc_list); - list_del (&conn->ksnc_list); + if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) { + conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next, + ksock_conn_t, ksnc_list); + list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2531,10 +2575,10 @@ ksocknal_reaper (void *arg) continue; } - if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) { - conn = list_entry (ksocknal_data.ksnd_zombie_conns.\ - next, ksock_conn_t, ksnc_list); - list_del (&conn->ksnc_list); + if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) { + conn = list_entry(ksocknal_data.ksnd_zombie_conns.next, + ksock_conn_t, ksnc_list); + list_del(&conn->ksnc_list); spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock); @@ -2544,9 +2588,9 @@ ksocknal_reaper (void *arg) continue; } - if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) { + if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) { list_add(&enomem_conns, - &ksocknal_data.ksnd_enomem_conns); + &ksocknal_data.ksnd_enomem_conns); list_del_init(&ksocknal_data.ksnd_enomem_conns); } @@ -2554,10 +2598,10 @@ ksocknal_reaper (void *arg) /* reschedule all the connections that stalled with ENOMEM... */ nenomem_conns = 0; - while (!list_empty (&enomem_conns)) { - conn = list_entry (enomem_conns.next, - ksock_conn_t, ksnc_tx_list); - list_del (&conn->ksnc_tx_list); + while (!list_empty(&enomem_conns)) { + conn = list_entry(enomem_conns.next, ksock_conn_t, + ksnc_tx_list); + list_del(&conn->ksnc_tx_list); sched = conn->ksnc_scheduler; @@ -2566,7 +2610,7 @@ ksocknal_reaper (void *arg) LASSERT(conn->ksnc_tx_scheduled); conn->ksnc_tx_ready = 1; list_add_tail(&conn->ksnc_tx_list, - &sched->kss_tx_conns); + &sched->kss_tx_conns); wake_up(&sched->kss_waitq); spin_unlock_bh(&sched->kss_lock); @@ -2580,21 +2624,22 @@ ksocknal_reaper (void *arg) const int p = 1; int chunk = ksocknal_data.ksnd_peer_hash_size; - /* Time to check for timeouts on a few more peers: I do + /* + * Time to check for timeouts on a few more peers: I do * checks every 'p' seconds on a proportion of the peer * table and I need to check every connection 'n' times * within a timeout interval, to ensure I detect a * timeout on any connection within (n+1)/n times the - * timeout interval. */ - + * timeout interval. + */ if (*ksocknal_tunables.ksnd_timeout > n * p) chunk = (chunk * n * p) / *ksocknal_tunables.ksnd_timeout; - if (chunk == 0) + if (!chunk) chunk = 1; for (i = 0; i < chunk; i++) { - ksocknal_check_peer_timeouts (peer_index); + ksocknal_check_peer_timeouts(peer_index); peer_index = (peer_index + 1) % ksocknal_data.ksnd_peer_hash_size; } @@ -2602,25 +2647,27 @@ ksocknal_reaper (void *arg) deadline = cfs_time_add(deadline, cfs_time_seconds(p)); } - if (nenomem_conns != 0) { - /* Reduce my timeout if I rescheduled ENOMEM conns. + if (nenomem_conns) { + /* + * Reduce my timeout if I rescheduled ENOMEM conns. * This also prevents me getting woken immediately - * if any go back on my enomem list. */ + * if any go back on my enomem list. + */ timeout = SOCKNAL_ENOMEM_RETRY; } ksocknal_data.ksnd_reaper_waketime = cfs_time_add(cfs_time_current(), timeout); - set_current_state (TASK_INTERRUPTIBLE); - add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); if (!ksocknal_data.ksnd_shuttingdown && - list_empty (&ksocknal_data.ksnd_deathrow_conns) && - list_empty (&ksocknal_data.ksnd_zombie_conns)) + list_empty(&ksocknal_data.ksnd_deathrow_conns) && + list_empty(&ksocknal_data.ksnd_zombie_conns)) schedule_timeout(timeout); - set_current_state (TASK_RUNNING); - remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait); + set_current_state(TASK_RUNNING); + remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait); spin_lock_bh(&ksocknal_data.ksnd_reaper_lock); } diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index cf8e43bd3..d4ce06d0a 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -45,13 +45,13 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */ LASSERT(!conn->ksnc_closing); - if (rc != 0) { + if (rc) { CERROR("Error %d getting sock peer IP\n", rc); return rc; } rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL); - if (rc != 0) { + if (rc) { CERROR("Error %d getting sock local IP\n", rc); return rc; } @@ -67,9 +67,11 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn) if (conn->ksnc_proto == &ksocknal_protocol_v1x) return 0; - /* ZC if the socket supports scatter/gather and doesn't need software - * checksums */ - return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0); + /* + * ZC if the socket supports scatter/gather and doesn't need software + * checksums + */ + return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK)); } int @@ -82,12 +84,13 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */ conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */ tx->tx_nob == tx->tx_resid && /* frist sending */ - tx->tx_msg.ksm_csum == 0) /* not checksummed */ + !tx->tx_msg.ksm_csum) /* not checksummed */ ksocknal_lib_csum_tx(tx); - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ { #if SOCKNAL_SINGLE_FRAG_TX struct kvec scratch; @@ -123,11 +126,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) int nob; /* Not NOOP message */ - LASSERT(tx->tx_lnetmsg != NULL); + LASSERT(tx->tx_lnetmsg); - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - if (tx->tx_msg.ksm_zc_cookies[0] != 0) { + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ + if (tx->tx_msg.ksm_zc_cookies[0]) { /* Zero copy is enabled */ struct sock *sk = sock->sk; struct page *page = kiov->kiov_page; @@ -136,13 +141,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) int msgflg = MSG_DONTWAIT; CDEBUG(D_NET, "page %p + offset %x for %d\n", - page, offset, kiov->kiov_len); + page, offset, kiov->kiov_len); if (!list_empty(&conn->ksnc_tx_queue) || fragsize < tx->tx_resid) msgflg |= MSG_MORE; - if (sk->sk_prot->sendpage != NULL) { + if (sk->sk_prot->sendpage) { rc = sk->sk_prot->sendpage(sk, page, offset, fragsize, msgflg); } else { @@ -187,13 +192,14 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn) int opt = 1; struct socket *sock = conn->ksnc_sock; - /* Remind the socket to ACK eagerly. If I don't, the socket might + /* + * Remind the socket to ACK eagerly. If I don't, the socket might * think I'm about to send something it could piggy-back the ACK * on, introducing delay in completing zero-copy sends in my - * peer. */ - - kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, - (char *)&opt, sizeof(opt)); + * peer. + */ + kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt, + sizeof(opt)); } int @@ -218,8 +224,10 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) int sum; __u32 saved_csum; - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ LASSERT(niov > 0); for (nob = i = 0; i < niov; i++) { @@ -228,8 +236,8 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) } LASSERT(nob <= conn->ksnc_rx_nob_wanted); - rc = kernel_recvmsg(conn->ksnc_sock, &msg, - scratchiov, niov, nob, MSG_DONTWAIT); + rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob, + MSG_DONTWAIT); saved_csum = 0; if (conn->ksnc_proto == &ksocknal_protocol_v2x) { @@ -237,7 +245,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) conn->ksnc_msg.ksm_csum = 0; } - if (saved_csum != 0) { + if (saved_csum) { /* accumulate checksum */ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { LASSERT(i < niov); @@ -258,7 +266,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn) static void ksocknal_lib_kiov_vunmap(void *addr) { - if (addr == NULL) + if (!addr) return; vunmap(addr); @@ -272,7 +280,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, int nob; int i; - if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL) + if (!*ksocknal_tunables.ksnd_zc_recv || !pages) return NULL; LASSERT(niov <= LNET_MAX_IOV); @@ -282,8 +290,8 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, return NULL; for (nob = i = 0; i < niov; i++) { - if ((kiov[i].kiov_offset != 0 && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) + if ((kiov[i].kiov_offset && i > 0) || + (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) return NULL; pages[i] = kiov[i].kiov_page; @@ -291,7 +299,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, } addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL); - if (addr == NULL) + if (!addr) return NULL; iov->iov_base = addr + kiov[0].kiov_offset; @@ -329,10 +337,12 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) int fragnob; int n; - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ + /* + * NB we can't trust socket ops to either consume our iovs + * or leave them alone. + */ addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages); - if (addr != NULL) { + if (addr) { nob = scratchiov[0].iov_len; n = 1; @@ -347,17 +357,19 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) LASSERT(nob <= conn->ksnc_rx_nob_wanted); - rc = kernel_recvmsg(conn->ksnc_sock, &msg, - (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT); + rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov, + n, nob, MSG_DONTWAIT); - if (conn->ksnc_msg.ksm_csum != 0) { + if (conn->ksnc_msg.ksm_csum) { for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { LASSERT(i < niov); - /* Dang! have to kmap again because I have nowhere to + /* + * Dang! have to kmap again because I have nowhere to * stash the mapped address. But by doing it while the * page is still mapped, the kernel just bumps the map - * count and returns me the address it stashed. */ + * count and returns me the address it stashed. + */ base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; fragnob = kiov[i].kiov_len; if (fragnob > sum) @@ -370,7 +382,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) } } - if (addr != NULL) { + if (addr) { ksocknal_lib_kiov_vunmap(addr); } else { for (i = 0; i < niov; i++) @@ -388,7 +400,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx) void *base; LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); - LASSERT(tx->tx_conn != NULL); + LASSERT(tx->tx_conn); LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x); tx->tx_msg.ksm_csum = 0; @@ -396,7 +408,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx) csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base, tx->tx_iov[0].iov_len); - if (tx->tx_kiov != NULL) { + if (tx->tx_kiov) { for (i = 0; i < tx->tx_nkiov; i++) { base = kmap(tx->tx_kiov[i].kiov_page) + tx->tx_kiov[i].kiov_offset; @@ -427,22 +439,22 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int * int rc; rc = ksocknal_connsock_addref(conn); - if (rc != 0) { + if (rc) { LASSERT(conn->ksnc_closing); *txmem = *rxmem = *nagle = 0; return -ESHUTDOWN; } rc = lnet_sock_getbuf(sock, txmem, rxmem); - if (rc == 0) { + if (!rc) { len = sizeof(*nagle); rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)nagle, &len); + (char *)nagle, &len); } ksocknal_connsock_decref(conn); - if (rc == 0) + if (!rc) *nagle = !*nagle; else *txmem = *rxmem = *nagle = 0; @@ -463,23 +475,24 @@ ksocknal_lib_setup_sock(struct socket *sock) sock->sk->sk_allocation = GFP_NOFS; - /* Ensure this socket aborts active sends immediately when we close - * it. */ - + /* + * Ensure this socket aborts active sends immediately when we close + * it. + */ linger.l_onoff = 0; linger.l_linger = 0; - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, - (char *)&linger, sizeof(linger)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger, + sizeof(linger)); + if (rc) { CERROR("Can't set SO_LINGER: %d\n", rc); return rc; } option = -1; - rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, - (char *)&option, sizeof(option)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option, + sizeof(option)); + if (rc) { CERROR("Can't set SO_LINGER2: %d\n", rc); return rc; } @@ -488,8 +501,8 @@ ksocknal_lib_setup_sock(struct socket *sock) option = 1; rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)&option, sizeof(option)); - if (rc != 0) { + (char *)&option, sizeof(option)); + if (rc) { CERROR("Can't disable nagle: %d\n", rc); return rc; } @@ -497,10 +510,10 @@ ksocknal_lib_setup_sock(struct socket *sock) rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size); - if (rc != 0) { + if (rc) { CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", - *ksocknal_tunables.ksnd_tx_buffer_size, - *ksocknal_tunables.ksnd_rx_buffer_size, rc); + *ksocknal_tunables.ksnd_tx_buffer_size, + *ksocknal_tunables.ksnd_rx_buffer_size, rc); return rc; } @@ -514,9 +527,9 @@ ksocknal_lib_setup_sock(struct socket *sock) do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); option = (do_keepalive ? 1 : 0); - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&option, sizeof(option)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option, + sizeof(option)); + if (rc) { CERROR("Can't set SO_KEEPALIVE: %d\n", rc); return rc; } @@ -524,23 +537,23 @@ ksocknal_lib_setup_sock(struct socket *sock) if (!do_keepalive) return 0; - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, - (char *)&keep_idle, sizeof(keep_idle)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle, + sizeof(keep_idle)); + if (rc) { CERROR("Can't set TCP_KEEPIDLE: %d\n", rc); return rc; } rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, - (char *)&keep_intvl, sizeof(keep_intvl)); - if (rc != 0) { + (char *)&keep_intvl, sizeof(keep_intvl)); + if (rc) { CERROR("Can't set TCP_KEEPINTVL: %d\n", rc); return rc; } - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, - (char *)&keep_count, sizeof(keep_count)); - if (rc != 0) { + rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count, + sizeof(keep_count)); + if (rc) { CERROR("Can't set TCP_KEEPCNT: %d\n", rc); return rc; } @@ -558,7 +571,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn) int rc; rc = ksocknal_connsock_addref(conn); - if (rc != 0) /* being shut down */ + if (rc) /* being shut down */ return; sk = conn->ksnc_sock->sk; @@ -570,8 +583,8 @@ ksocknal_lib_push_conn(ksock_conn_t *conn) release_sock(sk); rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - LASSERT(rc == 0); + (char *)&val, sizeof(val)); + LASSERT(!rc); lock_sock(sk); tp->nonagle = nonagle; @@ -593,11 +606,12 @@ ksocknal_data_ready(struct sock *sk) read_lock(&ksocknal_data.ksnd_global_lock); conn = sk->sk_user_data; - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ + if (!conn) { /* raced with ksocknal_terminate_conn */ LASSERT(sk->sk_data_ready != &ksocknal_data_ready); sk->sk_data_ready(sk); - } else + } else { ksocknal_read_callback(conn); + } read_unlock(&ksocknal_data.ksnd_global_lock); } @@ -619,14 +633,14 @@ ksocknal_write_space(struct sock *sk) CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", sk, wspace, min_wpace, conn, - (conn == NULL) ? "" : (conn->ksnc_tx_ready ? + !conn ? "" : (conn->ksnc_tx_ready ? " ready" : " blocked"), - (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ? + !conn ? "" : (conn->ksnc_tx_scheduled ? " scheduled" : " idle"), - (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ? + !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ? " empty" : " queued")); - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ + if (!conn) { /* raced with ksocknal_terminate_conn */ LASSERT(sk->sk_write_space != &ksocknal_write_space); sk->sk_write_space(sk); @@ -637,10 +651,11 @@ ksocknal_write_space(struct sock *sk) if (wspace >= min_wpace) { /* got enough space */ ksocknal_write_callback(conn); - /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the + /* + * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the * ENOMEM check in ksocknal_transmit is race-free (think about - * it). */ - + * it). + */ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); } @@ -666,15 +681,19 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn) void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn) { - /* Remove conn's network callbacks. + /* + * Remove conn's network callbacks. * NB I _have_ to restore the callback, rather than storing a noop, - * since the socket could survive past this module being unloaded!! */ + * since the socket could survive past this module being unloaded!! + */ sock->sk->sk_data_ready = conn->ksnc_saved_data_ready; sock->sk->sk_write_space = conn->ksnc_saved_write_space; - /* A callback could be in progress already; they hold a read lock + /* + * A callback could be in progress already; they hold a read lock * on ksnd_global_lock (to serialise with me) and NOOP if - * sk_user_data is NULL. */ + * sk_user_data is NULL. + */ sock->sk->sk_user_data = NULL; return ; @@ -691,14 +710,16 @@ ksocknal_lib_memory_pressure(ksock_conn_t *conn) if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) && !conn->ksnc_tx_ready) { - /* SOCK_NOSPACE is set when the socket fills + /* + * SOCK_NOSPACE is set when the socket fills * and cleared in the write_space callback * (which also sets ksnc_tx_ready). If * SOCK_NOSPACE and ksnc_tx_ready are BOTH * zero, I didn't fill the socket and * write_space won't reschedule me, so I * return -ENOMEM to get my caller to retry - * after a timeout */ + * after a timeout + */ rc = -ENOMEM; } diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c index fdb2b23e2..6329cbe66 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c @@ -14,9 +14,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "socklnd.h" @@ -41,8 +38,10 @@ static int peer_timeout = 180; module_param(peer_timeout, int, 0444); MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)"); -/* Number of daemons in each thread pool which is percpt, - * we will estimate reasonable value based on CPUs if it's not set. */ +/* + * Number of daemons in each thread pool which is percpt, + * we will estimate reasonable value based on CPUs if it's not set. + */ static unsigned int nscheds; module_param(nscheds, int, 0444); MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting"); @@ -72,7 +71,7 @@ static int typed_conns = 1; module_param(typed_conns, int, 0444); MODULE_PARM_DESC(typed_conns, "use different sockets for bulk"); -static int min_bulk = 1<<10; +static int min_bulk = 1 << 10; module_param(min_bulk, int, 0644); MODULE_PARM_DESC(min_bulk, "smallest 'large' message"); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c index 986bce4c9..32cc31e4c 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c @@ -19,9 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "socklnd.h" @@ -56,15 +53,14 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn) /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ LASSERT(!list_empty(&conn->ksnc_tx_queue)); - LASSERT(tx != NULL); + LASSERT(tx); /* Next TX that can carry ZC-ACK or LNet message */ if (tx->tx_list.next == &conn->ksnc_tx_queue) { /* no more packets queued */ conn->ksnc_tx_carrier = NULL; } else { - conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, - ksock_tx_t, tx_list); + conn->ksnc_tx_carrier = list_next_entry(tx, tx_list); LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); } } @@ -75,8 +71,8 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, { ksock_tx_t *tx = conn->ksnc_tx_carrier; - LASSERT(tx_ack == NULL || - tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); + LASSERT(!tx_ack || + tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); /* * Enqueue or piggyback tx_ack / cookie @@ -85,10 +81,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, * . There is tx can piggyback cookie of tx_ack (or cookie), * piggyback the cookie and return the tx. */ - if (tx == NULL) { - if (tx_ack != NULL) { + if (!tx) { + if (tx_ack) { list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_ack; } return 0; @@ -96,16 +92,16 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { /* tx is noop zc-ack, can't piggyback zc-ack cookie */ - if (tx_ack != NULL) + if (tx_ack) list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); return 0; } LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); - LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0); + LASSERT(!tx->tx_msg.ksm_zc_cookies[1]); - if (tx_ack != NULL) + if (tx_ack) cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; /* piggyback the zc-ack cookie */ @@ -128,7 +124,7 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) * . If there is NOOP on the connection, piggyback the cookie * and replace the NOOP tx, and return the NOOP tx. */ - if (tx == NULL) { /* nothing on queue */ + if (!tx) { /* nothing on queue */ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_msg; return NULL; @@ -162,22 +158,22 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie); /* non-blocking ZC-ACK (to router) */ - LASSERT(tx_ack == NULL || - tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); + LASSERT(!tx_ack || + tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); tx = conn->ksnc_tx_carrier; - if (tx == NULL) { - if (tx_ack != NULL) { + if (!tx) { + if (tx_ack) { list_add_tail(&tx_ack->tx_list, - &conn->ksnc_tx_queue); + &conn->ksnc_tx_queue); conn->ksnc_tx_carrier = tx_ack; } return 0; } - /* conn->ksnc_tx_carrier != NULL */ + /* conn->ksnc_tx_carrier */ - if (tx_ack != NULL) + if (tx_ack) cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */ @@ -185,7 +181,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) { /* replace the keepalive PING with a real ACK */ - LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(!tx->tx_msg.ksm_zc_cookies[0]); tx->tx_msg.ksm_zc_cookies[1] = cookie; return 1; } @@ -197,7 +193,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, return 1; /* XXX return error in the future */ } - if (tx->tx_msg.ksm_zc_cookies[0] == 0) { + if (!tx->tx_msg.ksm_zc_cookies[0]) { /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */ if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; @@ -233,7 +229,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, tmp = tx->tx_msg.ksm_zc_cookies[0]; } - if (tmp != 0) { + if (tmp) { /* range of cookies */ tx->tx_msg.ksm_zc_cookies[0] = tmp - 1; tx->tx_msg.ksm_zc_cookies[1] = tmp + 1; @@ -261,7 +257,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn, } /* failed to piggyback ZC-ACK */ - if (tx_ack != NULL) { + if (tx_ack) { list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue); /* the next tx can piggyback at least 1 ACK */ ksocknal_next_tx_carrier(conn); @@ -280,7 +276,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) return SOCKNAL_MATCH_YES; #endif - if (tx == NULL || tx->tx_lnetmsg == NULL) { + if (!tx || !tx->tx_lnetmsg) { /* noop packet */ nob = offsetof(ksock_msg_t, ksm_u); } else { @@ -319,7 +315,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) { int nob; - if (tx == NULL || tx->tx_lnetmsg == NULL) + if (!tx || !tx->tx_lnetmsg) nob = offsetof(ksock_msg_t, ksm_u); else nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t); @@ -334,7 +330,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) case SOCKLND_CONN_ACK: if (nonblk) return SOCKNAL_MATCH_YES; - else if (tx == NULL || tx->tx_lnetmsg == NULL) + else if (!tx || !tx->tx_lnetmsg) return SOCKNAL_MATCH_MAY; else return SOCKNAL_MATCH_NO; @@ -369,10 +365,10 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) read_lock(&ksocknal_data.ksnd_global_lock); conn = ksocknal_find_conn_locked(peer, NULL, !!remote); - if (conn != NULL) { + if (conn) { ksock_sched_t *sched = conn->ksnc_scheduler; - LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); + LASSERT(conn->ksnc_proto->pro_queue_tx_zcack); spin_lock_bh(&sched->kss_lock); @@ -390,11 +386,11 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) /* ACK connection is not ready, or can't piggyback the ACK */ tx = ksocknal_alloc_tx_noop(cookie, !!remote); - if (tx == NULL) + if (!tx) return -ENOMEM; rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id); - if (rc == 0) + if (!rc) return 0; ksocknal_free_tx(tx); @@ -407,11 +403,12 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) { ksock_peer_t *peer = conn->ksnc_peer; ksock_tx_t *tx; + ksock_tx_t *temp; ksock_tx_t *tmp; LIST_HEAD(zlist); int count; - if (cookie1 == 0) + if (!cookie1) cookie1 = cookie2; count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1); @@ -424,8 +421,8 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) spin_lock(&peer->ksnp_lock); - list_for_each_entry_safe(tx, tmp, - &peer->ksnp_zc_req_list, tx_zc_list) { + list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, + tx_zc_list) { __u64 c = tx->tx_msg.ksm_zc_cookies[0]; if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) { @@ -433,20 +430,19 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) list_del(&tx->tx_zc_list); list_add(&tx->tx_zc_list, &zlist); - if (--count == 0) + if (!--count) break; } } spin_unlock(&peer->ksnp_lock); - while (!list_empty(&zlist)) { - tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list); + list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) { list_del(&tx->tx_zc_list); ksocknal_tx_decref(tx); } - return count == 0 ? 0 : -EPROTO; + return !count ? 0 : -EPROTO; } static int @@ -461,58 +457,59 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid)); LIBCFS_ALLOC(hdr, sizeof(*hdr)); - if (hdr == NULL) { + if (!hdr) { CERROR("Can't allocate lnet_hdr_t\n"); return -ENOMEM; } hmv = (lnet_magicversion_t *)&hdr->dest_nid; - /* Re-organize V2.x message header to V1.x (lnet_hdr_t) - * header and send out */ - hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); - hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR); - hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR); + /* + * Re-organize V2.x message header to V1.x (lnet_hdr_t) + * header and send out + */ + hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC); + hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR); + hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR); - if (the_lnet.ln_testprotocompat != 0) { + if (the_lnet.ln_testprotocompat) { /* single-shot proto check */ LNET_LOCK(); - if ((the_lnet.ln_testprotocompat & 1) != 0) { + if (the_lnet.ln_testprotocompat & 1) { hmv->version_major++; /* just different! */ the_lnet.ln_testprotocompat &= ~1; } - if ((the_lnet.ln_testprotocompat & 2) != 0) { + if (the_lnet.ln_testprotocompat & 2) { hmv->magic = LNET_PROTO_MAGIC; the_lnet.ln_testprotocompat &= ~2; } LNET_UNLOCK(); } - hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); - hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); - hdr->type = cpu_to_le32 (LNET_MSG_HELLO); - hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32)); - hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype); - hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation); + hdr->src_nid = cpu_to_le64(hello->kshm_src_nid); + hdr->src_pid = cpu_to_le32(hello->kshm_src_pid); + hdr->type = cpu_to_le32(LNET_MSG_HELLO); + hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32)); + hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype); + hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation); rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); goto out; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) goto out; - for (i = 0; i < (int) hello->kshm_nips; i++) { - hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]); - } + for (i = 0; i < (int) hello->kshm_nips; i++) + hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]); rc = lnet_sock_write(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -532,10 +529,10 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) hello->kshm_magic = LNET_PROTO_MAGIC; hello->kshm_version = conn->ksnc_proto->pro_version; - if (the_lnet.ln_testprotocompat != 0) { + if (the_lnet.ln_testprotocompat) { /* single-shot proto check */ LNET_LOCK(); - if ((the_lnet.ln_testprotocompat & 1) != 0) { + if (the_lnet.ln_testprotocompat & 1) { hello->kshm_version++; /* just different! */ the_lnet.ln_testprotocompat &= ~1; } @@ -544,19 +541,19 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); return rc; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) return 0; rc = lnet_sock_write(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), lnet_acceptor_timeout()); - if (rc != 0) { + if (rc) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -575,7 +572,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, int i; LIBCFS_ALLOC(hdr, sizeof(*hdr)); - if (hdr == NULL) { + if (!hdr) { CERROR("Can't allocate lnet_hdr_t\n"); return -ENOMEM; } @@ -583,15 +580,15 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, rc = lnet_sock_read(sock, &hdr->src_nid, sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading rest of HELLO hdr from %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); goto out; } /* ...and check we got what we expected */ - if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) { + if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) { CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n", le32_to_cpu(hdr->type), &conn->ksnc_ipaddr); @@ -613,14 +610,14 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, goto out; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) goto out; rc = lnet_sock_read(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading IPs from ip %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); goto out; } @@ -628,7 +625,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, for (i = 0; i < (int) hello->kshm_nips; i++) { hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]); - if (hello->kshm_ips[i] == 0) { + if (!hello->kshm_ips[i]) { CERROR("Zero IP[%d] from ip %pI4h\n", i, &conn->ksnc_ipaddr); rc = -EPROTO; @@ -657,9 +654,9 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout offsetof(ksock_hello_msg_t, kshm_ips) - offsetof(ksock_hello_msg_t, kshm_src_nid), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading HELLO from %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); return rc; } @@ -681,14 +678,14 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout return -EPROTO; } - if (hello->kshm_nips == 0) + if (!hello->kshm_nips) return 0; rc = lnet_sock_read(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading IPs from ip %pI4h\n", - rc, &conn->ksnc_ipaddr); + rc, &conn->ksnc_ipaddr); LASSERT(rc < 0 && rc != -EALREADY); return rc; } @@ -697,7 +694,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout if (conn->ksnc_flip) __swab32s(&hello->kshm_ips[i]); - if (hello->kshm_ips[i] == 0) { + if (!hello->kshm_ips[i]) { CERROR("Zero IP[%d] from ip %pI4h\n", i, &conn->ksnc_ipaddr); return -EPROTO; @@ -712,12 +709,13 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx) { /* V1.x has no KSOCK_MSG_NOOP */ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT(tx->tx_lnetmsg != NULL); + LASSERT(tx->tx_lnetmsg); tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr; tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t); - tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); + tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); + tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); } static void @@ -725,17 +723,19 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx) { tx->tx_iov[0].iov_base = &tx->tx_msg; - if (tx->tx_lnetmsg != NULL) { + if (tx->tx_lnetmsg) { LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; tx->tx_iov[0].iov_len = sizeof(ksock_msg_t); - tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; + tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; + tx->tx_resid = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; } else { LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); - tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); + tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); + tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); } /* Don't checksum before start sending, because packet can be piggybacked with ACK */ } @@ -745,7 +745,8 @@ ksocknal_unpack_msg_v1(ksock_msg_t *msg) { msg->ksm_csum = 0; msg->ksm_type = KSOCK_MSG_LNET; - msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; + msg->ksm_zc_cookies[0] = 0; + msg->ksm_zc_cookies[1] = 0; } static void diff --git a/drivers/staging/lustre/lnet/libcfs/Makefile b/drivers/staging/lustre/lnet/libcfs/Makefile new file mode 100644 index 000000000..8c8945545 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/Makefile @@ -0,0 +1,17 @@ +obj-$(CONFIG_LNET) += libcfs.o + +libcfs-linux-objs := linux-tracefile.o linux-debug.o +libcfs-linux-objs += linux-prim.o linux-cpu.o +libcfs-linux-objs += linux-curproc.o +libcfs-linux-objs += linux-module.o +libcfs-linux-objs += linux-crypto.o +libcfs-linux-objs += linux-crypto-adler.o +libcfs-linux-objs += linux-mem.o + +libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs)) + +libcfs-all-objs := debug.o fail.o module.o tracefile.o \ + libcfs_string.o hash.o prng.o workitem.o \ + libcfs_cpu.o libcfs_mem.o libcfs_lock.o + +libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs) diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c new file mode 100644 index 000000000..c3d628bac --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/debug.c @@ -0,0 +1,560 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * libcfs/libcfs/debug.c + * + * Author: Phil Schwan + * + */ + +# define DEBUG_SUBSYSTEM S_LNET + +#include "../../include/linux/libcfs/libcfs.h" +#include "tracefile.h" + +static char debug_file_name[1024]; + +unsigned int libcfs_subsystem_debug = ~0; +EXPORT_SYMBOL(libcfs_subsystem_debug); +module_param(libcfs_subsystem_debug, int, 0644); +MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask"); + +unsigned int libcfs_debug = (D_CANTMASK | + D_NETERROR | D_HA | D_CONFIG | D_IOCTL); +EXPORT_SYMBOL(libcfs_debug); +module_param(libcfs_debug, int, 0644); +MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask"); + +static int libcfs_param_debug_mb_set(const char *val, + const struct kernel_param *kp) +{ + int rc; + unsigned num; + + rc = kstrtouint(val, 0, &num); + if (rc < 0) + return rc; + + if (!*((unsigned int *)kp->arg)) { + *((unsigned int *)kp->arg) = num; + return 0; + } + + rc = cfs_trace_set_debug_mb(num); + + if (!rc) + *((unsigned int *)kp->arg) = cfs_trace_get_debug_mb(); + + return rc; +} + +/* While debug_mb setting look like unsigned int, in fact + * it needs quite a bunch of extra processing, so we define special + * debugmb parameter type with corresponding methods to handle this case + */ +static struct kernel_param_ops param_ops_debugmb = { + .set = libcfs_param_debug_mb_set, + .get = param_get_uint, +}; + +#define param_check_debugmb(name, p) \ + __param_check(name, p, unsigned int) + +static unsigned int libcfs_debug_mb; +module_param(libcfs_debug_mb, debugmb, 0644); +MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size."); + +unsigned int libcfs_printk = D_CANTMASK; +module_param(libcfs_printk, uint, 0644); +MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask"); + +unsigned int libcfs_console_ratelimit = 1; +module_param(libcfs_console_ratelimit, uint, 0644); +MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)"); + +static int param_set_delay_minmax(const char *val, + const struct kernel_param *kp, + long min, long max) +{ + long d; + int sec; + int rc; + + rc = kstrtoint(val, 0, &sec); + if (rc) + return -EINVAL; + + d = cfs_time_seconds(sec) / 100; + if (d < min || d > max) + return -EINVAL; + + *((unsigned int *)kp->arg) = d; + + return 0; +} + +static int param_get_delay(char *buffer, const struct kernel_param *kp) +{ + unsigned int d = *(unsigned int *)kp->arg; + + return sprintf(buffer, "%u", (unsigned int)cfs_duration_sec(d * 100)); +} + +unsigned int libcfs_console_max_delay; +unsigned int libcfs_console_min_delay; + +static int param_set_console_max_delay(const char *val, + const struct kernel_param *kp) +{ + return param_set_delay_minmax(val, kp, + libcfs_console_min_delay, INT_MAX); +} + +static struct kernel_param_ops param_ops_console_max_delay = { + .set = param_set_console_max_delay, + .get = param_get_delay, +}; + +#define param_check_console_max_delay(name, p) \ + __param_check(name, p, unsigned int) + +module_param(libcfs_console_max_delay, console_max_delay, 0644); +MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)"); + +static int param_set_console_min_delay(const char *val, + const struct kernel_param *kp) +{ + return param_set_delay_minmax(val, kp, + 1, libcfs_console_max_delay); +} + +static struct kernel_param_ops param_ops_console_min_delay = { + .set = param_set_console_min_delay, + .get = param_get_delay, +}; + +#define param_check_console_min_delay(name, p) \ + __param_check(name, p, unsigned int) + +module_param(libcfs_console_min_delay, console_min_delay, 0644); +MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)"); + +static int param_set_uint_minmax(const char *val, + const struct kernel_param *kp, + unsigned int min, unsigned int max) +{ + unsigned int num; + int ret; + + if (!val) + return -EINVAL; + ret = kstrtouint(val, 0, &num); + if (ret < 0 || num < min || num > max) + return -EINVAL; + *((unsigned int *)kp->arg) = num; + return 0; +} + +static int param_set_uintpos(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, 1, -1); +} + +static struct kernel_param_ops param_ops_uintpos = { + .set = param_set_uintpos, + .get = param_get_uint, +}; + +#define param_check_uintpos(name, p) \ + __param_check(name, p, unsigned int) + +unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF; +module_param(libcfs_console_backoff, uintpos, 0644); +MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor"); + +unsigned int libcfs_debug_binary = 1; + +unsigned int libcfs_stack = 3 * THREAD_SIZE / 4; +EXPORT_SYMBOL(libcfs_stack); + +unsigned int libcfs_catastrophe; +EXPORT_SYMBOL(libcfs_catastrophe); + +unsigned int libcfs_panic_on_lbug = 1; +module_param(libcfs_panic_on_lbug, uint, 0644); +MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG"); + +static wait_queue_head_t debug_ctlwq; + +char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT; + +/* We need to pass a pointer here, but elsewhere this must be a const */ +static char *libcfs_debug_file_path; +module_param(libcfs_debug_file_path, charp, 0644); +MODULE_PARM_DESC(libcfs_debug_file_path, + "Path for dumping debug logs, set 'NONE' to prevent log dumping"); + +int libcfs_panic_in_progress; + +/* libcfs_debug_token2mask() expects the returned string in lower-case */ +static const char * +libcfs_debug_subsys2str(int subsys) +{ + switch (1 << subsys) { + default: + return NULL; + case S_UNDEFINED: + return "undefined"; + case S_MDC: + return "mdc"; + case S_MDS: + return "mds"; + case S_OSC: + return "osc"; + case S_OST: + return "ost"; + case S_CLASS: + return "class"; + case S_LOG: + return "log"; + case S_LLITE: + return "llite"; + case S_RPC: + return "rpc"; + case S_LNET: + return "lnet"; + case S_LND: + return "lnd"; + case S_PINGER: + return "pinger"; + case S_FILTER: + return "filter"; + case S_ECHO: + return "echo"; + case S_LDLM: + return "ldlm"; + case S_LOV: + return "lov"; + case S_LQUOTA: + return "lquota"; + case S_OSD: + return "osd"; + case S_LFSCK: + return "lfsck"; + case S_LMV: + return "lmv"; + case S_SEC: + return "sec"; + case S_GSS: + return "gss"; + case S_MGC: + return "mgc"; + case S_MGS: + return "mgs"; + case S_FID: + return "fid"; + case S_FLD: + return "fld"; + } +} + +/* libcfs_debug_token2mask() expects the returned string in lower-case */ +static const char * +libcfs_debug_dbg2str(int debug) +{ + switch (1 << debug) { + default: + return NULL; + case D_TRACE: + return "trace"; + case D_INODE: + return "inode"; + case D_SUPER: + return "super"; + case D_EXT2: + return "ext2"; + case D_MALLOC: + return "malloc"; + case D_CACHE: + return "cache"; + case D_INFO: + return "info"; + case D_IOCTL: + return "ioctl"; + case D_NETERROR: + return "neterror"; + case D_NET: + return "net"; + case D_WARNING: + return "warning"; + case D_BUFFS: + return "buffs"; + case D_OTHER: + return "other"; + case D_DENTRY: + return "dentry"; + case D_NETTRACE: + return "nettrace"; + case D_PAGE: + return "page"; + case D_DLMTRACE: + return "dlmtrace"; + case D_ERROR: + return "error"; + case D_EMERG: + return "emerg"; + case D_HA: + return "ha"; + case D_RPCTRACE: + return "rpctrace"; + case D_VFSTRACE: + return "vfstrace"; + case D_READA: + return "reada"; + case D_MMAP: + return "mmap"; + case D_CONFIG: + return "config"; + case D_CONSOLE: + return "console"; + case D_QUOTA: + return "quota"; + case D_SEC: + return "sec"; + case D_LFSCK: + return "lfsck"; + } +} + +int +libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys) +{ + const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : + libcfs_debug_dbg2str; + int len = 0; + const char *token; + int i; + + if (mask == 0) { /* "0" */ + if (size > 0) + str[0] = '0'; + len = 1; + } else { /* space-separated tokens */ + for (i = 0; i < 32; i++) { + if ((mask & (1 << i)) == 0) + continue; + + token = fn(i); + if (!token) /* unused bit */ + continue; + + if (len > 0) { /* separator? */ + if (len < size) + str[len] = ' '; + len++; + } + + while (*token != 0) { + if (len < size) + str[len] = *token; + token++; + len++; + } + } + } + + /* terminate 'str' */ + if (len < size) + str[len] = 0; + else + str[size - 1] = 0; + + return len; +} + +int +libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) +{ + const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : + libcfs_debug_dbg2str; + int m = 0; + int matched; + int n; + int t; + + /* Allow a number for backwards compatibility */ + + for (n = strlen(str); n > 0; n--) + if (!isspace(str[n - 1])) + break; + matched = n; + t = sscanf(str, "%i%n", &m, &matched); + if (t >= 1 && matched == n) { + /* don't print warning for lctl set_param debug=0 or -1 */ + if (m != 0 && m != -1) + CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n"); + *mask = m; + return 0; + } + + return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK, + 0xffffffff); +} + +/** + * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages() + */ +void libcfs_debug_dumplog_internal(void *arg) +{ + void *journal_info; + + journal_info = current->journal_info; + current->journal_info = NULL; + + if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) { + snprintf(debug_file_name, sizeof(debug_file_name) - 1, + "%s.%lld.%ld", libcfs_debug_file_path_arr, + (s64)ktime_get_real_seconds(), (long_ptr_t)arg); + pr_alert("LustreError: dumping log to %s\n", debug_file_name); + cfs_tracefile_dump_all_pages(debug_file_name); + libcfs_run_debug_log_upcall(debug_file_name); + } + + current->journal_info = journal_info; +} + +static int libcfs_debug_dumplog_thread(void *arg) +{ + libcfs_debug_dumplog_internal(arg); + wake_up(&debug_ctlwq); + return 0; +} + +void libcfs_debug_dumplog(void) +{ + wait_queue_t wait; + struct task_struct *dumper; + + /* we're being careful to ensure that the kernel thread is + * able to set our state to running as it exits before we + * get to schedule() + */ + init_waitqueue_entry(&wait, current); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&debug_ctlwq, &wait); + + dumper = kthread_run(libcfs_debug_dumplog_thread, + (void *)(long)current_pid(), + "libcfs_debug_dumper"); + if (IS_ERR(dumper)) + pr_err("LustreError: cannot start log dump thread: %ld\n", + PTR_ERR(dumper)); + else + schedule(); + + /* be sure to teardown if cfs_create_thread() failed */ + remove_wait_queue(&debug_ctlwq, &wait); + set_current_state(TASK_RUNNING); +} +EXPORT_SYMBOL(libcfs_debug_dumplog); + +int libcfs_debug_init(unsigned long bufsize) +{ + int rc = 0; + unsigned int max = libcfs_debug_mb; + + init_waitqueue_head(&debug_ctlwq); + + if (libcfs_console_max_delay <= 0 || /* not set by user or */ + libcfs_console_min_delay <= 0 || /* set to invalid values */ + libcfs_console_min_delay >= libcfs_console_max_delay) { + libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY; + libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY; + } + + if (libcfs_debug_file_path) { + strlcpy(libcfs_debug_file_path_arr, + libcfs_debug_file_path, + sizeof(libcfs_debug_file_path_arr)); + } + + /* If libcfs_debug_mb is set to an invalid value or uninitialized + * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES + */ + if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) { + max = TCD_MAX_PAGES; + } else { + max = max / num_possible_cpus(); + max <<= (20 - PAGE_SHIFT); + } + rc = cfs_tracefile_init(max); + + if (rc == 0) { + libcfs_register_panic_notifier(); + libcfs_debug_mb = cfs_trace_get_debug_mb(); + } + + return rc; +} + +int libcfs_debug_cleanup(void) +{ + libcfs_unregister_panic_notifier(); + cfs_tracefile_exit(); + return 0; +} + +int libcfs_debug_clear_buffer(void) +{ + cfs_trace_flush_pages(); + return 0; +} + +/* Debug markers, although printed by S_LNET should not be be marked as such. */ +#undef DEBUG_SUBSYSTEM +#define DEBUG_SUBSYSTEM S_UNDEFINED +int libcfs_debug_mark_buffer(const char *text) +{ + CDEBUG(D_TRACE, + "***************************************************\n"); + LCONSOLE(D_WARNING, "DEBUG MARKER: %s\n", text); + CDEBUG(D_TRACE, + "***************************************************\n"); + + return 0; +} + +#undef DEBUG_SUBSYSTEM +#define DEBUG_SUBSYSTEM S_LNET diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c new file mode 100644 index 000000000..dadaf7685 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/fail.c @@ -0,0 +1,139 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see http://www.gnu.org/licenses + * + * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores, + * CA 94065 USA or visit www.oracle.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2015, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Oracle Corporation, Inc. + */ + +#include "../../include/linux/libcfs/libcfs.h" + +unsigned long cfs_fail_loc; +EXPORT_SYMBOL(cfs_fail_loc); + +unsigned int cfs_fail_val; +EXPORT_SYMBOL(cfs_fail_val); + +DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq); +EXPORT_SYMBOL(cfs_race_waitq); + +int cfs_race_state; +EXPORT_SYMBOL(cfs_race_state); + +int __cfs_fail_check_set(__u32 id, __u32 value, int set) +{ + static atomic_t cfs_fail_count = ATOMIC_INIT(0); + + LASSERT(!(id & CFS_FAIL_ONCE)); + + if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) == + (CFS_FAILED | CFS_FAIL_ONCE)) { + atomic_set(&cfs_fail_count, 0); /* paranoia */ + return 0; + } + + /* Fail 1/cfs_fail_val times */ + if (cfs_fail_loc & CFS_FAIL_RAND) { + if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0) + return 0; + } + + /* Skip the first cfs_fail_val, then fail */ + if (cfs_fail_loc & CFS_FAIL_SKIP) { + if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val) + return 0; + } + + /* check cfs_fail_val... */ + if (set == CFS_FAIL_LOC_VALUE) { + if (cfs_fail_val != -1 && cfs_fail_val != value) + return 0; + } + + /* Fail cfs_fail_val times, overridden by FAIL_ONCE */ + if (cfs_fail_loc & CFS_FAIL_SOME && + (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) { + int count = atomic_inc_return(&cfs_fail_count); + + if (count >= cfs_fail_val) { + set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); + atomic_set(&cfs_fail_count, 0); + /* we are lost race to increase */ + if (count > cfs_fail_val) + return 0; + } + } + + if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) && + (value & CFS_FAIL_ONCE)) + set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); + /* Lost race to set CFS_FAILED_BIT. */ + if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) { + /* If CFS_FAIL_ONCE is valid, only one process can fail, + * otherwise multi-process can fail at the same time. + */ + if (cfs_fail_loc & CFS_FAIL_ONCE) + return 0; + } + + switch (set) { + case CFS_FAIL_LOC_NOSET: + case CFS_FAIL_LOC_VALUE: + break; + case CFS_FAIL_LOC_ORSET: + cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE); + break; + case CFS_FAIL_LOC_RESET: + cfs_fail_loc = value; + break; + default: + LASSERTF(0, "called with bad set %u\n", set); + break; + } + + return 1; +} +EXPORT_SYMBOL(__cfs_fail_check_set); + +int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) +{ + int ret; + + ret = __cfs_fail_check_set(id, value, set); + if (ret && likely(ms > 0)) { + CERROR("cfs_fail_timeout id %x sleeping for %dms\n", + id, ms); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(ms) / 1000); + CERROR("cfs_fail_timeout id %x awake\n", id); + } + return ret; +} +EXPORT_SYMBOL(__cfs_fail_timeout_set); diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c new file mode 100644 index 000000000..f60feb3a3 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/hash.c @@ -0,0 +1,2085 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * libcfs/libcfs/hash.c + * + * Implement a hash class for hash process in lustre system. + * + * Author: YuZhangyong + * + * 2008-08-15: Brian Behlendorf + * - Simplified API and improved documentation + * - Added per-hash feature flags: + * * CFS_HASH_DEBUG additional validation + * * CFS_HASH_REHASH dynamic rehashing + * - Added per-hash statistics + * - General performance enhancements + * + * 2009-07-31: Liang Zhen + * - move all stuff to libcfs + * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH + * - ignore hs_rwlock if without CFS_HASH_REHASH setting + * - buckets are allocated one by one(instead of contiguous memory), + * to avoid unnecessary cacheline conflict + * + * 2010-03-01: Liang Zhen + * - "bucket" is a group of hlist_head now, user can specify bucket size + * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share + * one lock for reducing memory overhead. + * + * - support lockless hash, caller will take care of locks: + * avoid lock overhead for hash tables that are already protected + * by locking in the caller for another reason + * + * - support both spin_lock/rwlock for bucket: + * overhead of spinlock contention is lower than read/write + * contention of rwlock, so using spinlock to serialize operations on + * bucket is more reasonable for those frequently changed hash tables + * + * - support one-single lock mode: + * one lock to protect all hash operations to avoid overhead of + * multiple locks if hash table is always small + * + * - removed a lot of unnecessary addref & decref on hash element: + * addref & decref are atomic operations in many use-cases which + * are expensive. + * + * - support non-blocking cfs_hash_add() and cfs_hash_findadd(): + * some lustre use-cases require these functions to be strictly + * non-blocking, we need to schedule required rehash on a different + * thread on those cases. + * + * - safer rehash on large hash table + * In old implementation, rehash function will exclusively lock the + * hash table and finish rehash in one batch, it's dangerous on SMP + * system because rehash millions of elements could take long time. + * New implemented rehash can release lock and relax CPU in middle + * of rehash, it's safe for another thread to search/change on the + * hash table even it's in rehasing. + * + * - support two different refcount modes + * . hash table has refcount on element + * . hash table doesn't change refcount on adding/removing element + * + * - support long name hash table (for param-tree) + * + * - fix a bug for cfs_hash_rehash_key: + * in old implementation, cfs_hash_rehash_key could screw up the + * hash-table because @key is overwritten without any protection. + * Now we need user to define hs_keycpy for those rehash enabled + * hash tables, cfs_hash_rehash_key will overwrite hash-key + * inside lock by calling hs_keycpy. + * + * - better hash iteration: + * Now we support both locked iteration & lockless iteration of hash + * table. Also, user can break the iteration by return 1 in callback. + */ +#include +#include + +#include "../../include/linux/libcfs/libcfs.h" + +#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 +static unsigned int warn_on_depth = 8; +module_param(warn_on_depth, uint, 0644); +MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high."); +#endif + +struct cfs_wi_sched *cfs_sched_rehash; + +static inline void +cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {} + +static inline void +cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {} + +static inline void +cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive) + __acquires(&lock->spin) +{ + spin_lock(&lock->spin); +} + +static inline void +cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive) + __releases(&lock->spin) +{ + spin_unlock(&lock->spin); +} + +static inline void +cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive) + __acquires(&lock->rw) +{ + if (!exclusive) + read_lock(&lock->rw); + else + write_lock(&lock->rw); +} + +static inline void +cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive) + __releases(&lock->rw) +{ + if (!exclusive) + read_unlock(&lock->rw); + else + write_unlock(&lock->rw); +} + +/** No lock hash */ +static struct cfs_hash_lock_ops cfs_hash_nl_lops = { + .hs_lock = cfs_hash_nl_lock, + .hs_unlock = cfs_hash_nl_unlock, + .hs_bkt_lock = cfs_hash_nl_lock, + .hs_bkt_unlock = cfs_hash_nl_unlock, +}; + +/** no bucket lock, one spinlock to protect everything */ +static struct cfs_hash_lock_ops cfs_hash_nbl_lops = { + .hs_lock = cfs_hash_spin_lock, + .hs_unlock = cfs_hash_spin_unlock, + .hs_bkt_lock = cfs_hash_nl_lock, + .hs_bkt_unlock = cfs_hash_nl_unlock, +}; + +/** spin bucket lock, rehash is enabled */ +static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = { + .hs_lock = cfs_hash_rw_lock, + .hs_unlock = cfs_hash_rw_unlock, + .hs_bkt_lock = cfs_hash_spin_lock, + .hs_bkt_unlock = cfs_hash_spin_unlock, +}; + +/** rw bucket lock, rehash is enabled */ +static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = { + .hs_lock = cfs_hash_rw_lock, + .hs_unlock = cfs_hash_rw_unlock, + .hs_bkt_lock = cfs_hash_rw_lock, + .hs_bkt_unlock = cfs_hash_rw_unlock, +}; + +/** spin bucket lock, rehash is disabled */ +static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = { + .hs_lock = cfs_hash_nl_lock, + .hs_unlock = cfs_hash_nl_unlock, + .hs_bkt_lock = cfs_hash_spin_lock, + .hs_bkt_unlock = cfs_hash_spin_unlock, +}; + +/** rw bucket lock, rehash is disabled */ +static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = { + .hs_lock = cfs_hash_nl_lock, + .hs_unlock = cfs_hash_nl_unlock, + .hs_bkt_lock = cfs_hash_rw_lock, + .hs_bkt_unlock = cfs_hash_rw_unlock, +}; + +static void +cfs_hash_lock_setup(struct cfs_hash *hs) +{ + if (cfs_hash_with_no_lock(hs)) { + hs->hs_lops = &cfs_hash_nl_lops; + + } else if (cfs_hash_with_no_bktlock(hs)) { + hs->hs_lops = &cfs_hash_nbl_lops; + spin_lock_init(&hs->hs_lock.spin); + + } else if (cfs_hash_with_rehash(hs)) { + rwlock_init(&hs->hs_lock.rw); + + if (cfs_hash_with_rw_bktlock(hs)) + hs->hs_lops = &cfs_hash_bkt_rw_lops; + else if (cfs_hash_with_spin_bktlock(hs)) + hs->hs_lops = &cfs_hash_bkt_spin_lops; + else + LBUG(); + } else { + if (cfs_hash_with_rw_bktlock(hs)) + hs->hs_lops = &cfs_hash_nr_bkt_rw_lops; + else if (cfs_hash_with_spin_bktlock(hs)) + hs->hs_lops = &cfs_hash_nr_bkt_spin_lops; + else + LBUG(); + } +} + +/** + * Simple hash head without depth tracking + * new element is always added to head of hlist + */ +struct cfs_hash_head { + struct hlist_head hh_head; /**< entries list */ +}; + +static int +cfs_hash_hh_hhead_size(struct cfs_hash *hs) +{ + return sizeof(struct cfs_hash_head); +} + +static struct hlist_head * +cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) +{ + struct cfs_hash_head *head; + + head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0]; + return &head[bd->bd_offset].hh_head; +} + +static int +cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd)); + return -1; /* unknown depth */ +} + +static int +cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + hlist_del_init(hnode); + return -1; /* unknown depth */ +} + +/** + * Simple hash head with depth tracking + * new element is always added to head of hlist + */ +struct cfs_hash_head_dep { + struct hlist_head hd_head; /**< entries list */ + unsigned int hd_depth; /**< list length */ +}; + +static int +cfs_hash_hd_hhead_size(struct cfs_hash *hs) +{ + return sizeof(struct cfs_hash_head_dep); +} + +static struct hlist_head * +cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) +{ + struct cfs_hash_head_dep *head; + + head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0]; + return &head[bd->bd_offset].hd_head; +} + +static int +cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + struct cfs_hash_head_dep *hh; + + hh = container_of(cfs_hash_hd_hhead(hs, bd), + struct cfs_hash_head_dep, hd_head); + hlist_add_head(hnode, &hh->hd_head); + return ++hh->hd_depth; +} + +static int +cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + struct cfs_hash_head_dep *hh; + + hh = container_of(cfs_hash_hd_hhead(hs, bd), + struct cfs_hash_head_dep, hd_head); + hlist_del_init(hnode); + return --hh->hd_depth; +} + +/** + * double links hash head without depth tracking + * new element is always added to tail of hlist + */ +struct cfs_hash_dhead { + struct hlist_head dh_head; /**< entries list */ + struct hlist_node *dh_tail; /**< the last entry */ +}; + +static int +cfs_hash_dh_hhead_size(struct cfs_hash *hs) +{ + return sizeof(struct cfs_hash_dhead); +} + +static struct hlist_head * +cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) +{ + struct cfs_hash_dhead *head; + + head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0]; + return &head[bd->bd_offset].dh_head; +} + +static int +cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + struct cfs_hash_dhead *dh; + + dh = container_of(cfs_hash_dh_hhead(hs, bd), + struct cfs_hash_dhead, dh_head); + if (dh->dh_tail) /* not empty */ + hlist_add_behind(hnode, dh->dh_tail); + else /* empty list */ + hlist_add_head(hnode, &dh->dh_head); + dh->dh_tail = hnode; + return -1; /* unknown depth */ +} + +static int +cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnd) +{ + struct cfs_hash_dhead *dh; + + dh = container_of(cfs_hash_dh_hhead(hs, bd), + struct cfs_hash_dhead, dh_head); + if (!hnd->next) { /* it's the tail */ + dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL : + container_of(hnd->pprev, struct hlist_node, next); + } + hlist_del_init(hnd); + return -1; /* unknown depth */ +} + +/** + * double links hash head with depth tracking + * new element is always added to tail of hlist + */ +struct cfs_hash_dhead_dep { + struct hlist_head dd_head; /**< entries list */ + struct hlist_node *dd_tail; /**< the last entry */ + unsigned int dd_depth; /**< list length */ +}; + +static int +cfs_hash_dd_hhead_size(struct cfs_hash *hs) +{ + return sizeof(struct cfs_hash_dhead_dep); +} + +static struct hlist_head * +cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) +{ + struct cfs_hash_dhead_dep *head; + + head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0]; + return &head[bd->bd_offset].dd_head; +} + +static int +cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + struct cfs_hash_dhead_dep *dh; + + dh = container_of(cfs_hash_dd_hhead(hs, bd), + struct cfs_hash_dhead_dep, dd_head); + if (dh->dd_tail) /* not empty */ + hlist_add_behind(hnode, dh->dd_tail); + else /* empty list */ + hlist_add_head(hnode, &dh->dd_head); + dh->dd_tail = hnode; + return ++dh->dd_depth; +} + +static int +cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnd) +{ + struct cfs_hash_dhead_dep *dh; + + dh = container_of(cfs_hash_dd_hhead(hs, bd), + struct cfs_hash_dhead_dep, dd_head); + if (!hnd->next) { /* it's the tail */ + dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL : + container_of(hnd->pprev, struct hlist_node, next); + } + hlist_del_init(hnd); + return --dh->dd_depth; +} + +static struct cfs_hash_hlist_ops cfs_hash_hh_hops = { + .hop_hhead = cfs_hash_hh_hhead, + .hop_hhead_size = cfs_hash_hh_hhead_size, + .hop_hnode_add = cfs_hash_hh_hnode_add, + .hop_hnode_del = cfs_hash_hh_hnode_del, +}; + +static struct cfs_hash_hlist_ops cfs_hash_hd_hops = { + .hop_hhead = cfs_hash_hd_hhead, + .hop_hhead_size = cfs_hash_hd_hhead_size, + .hop_hnode_add = cfs_hash_hd_hnode_add, + .hop_hnode_del = cfs_hash_hd_hnode_del, +}; + +static struct cfs_hash_hlist_ops cfs_hash_dh_hops = { + .hop_hhead = cfs_hash_dh_hhead, + .hop_hhead_size = cfs_hash_dh_hhead_size, + .hop_hnode_add = cfs_hash_dh_hnode_add, + .hop_hnode_del = cfs_hash_dh_hnode_del, +}; + +static struct cfs_hash_hlist_ops cfs_hash_dd_hops = { + .hop_hhead = cfs_hash_dd_hhead, + .hop_hhead_size = cfs_hash_dd_hhead_size, + .hop_hnode_add = cfs_hash_dd_hnode_add, + .hop_hnode_del = cfs_hash_dd_hnode_del, +}; + +static void +cfs_hash_hlist_setup(struct cfs_hash *hs) +{ + if (cfs_hash_with_add_tail(hs)) { + hs->hs_hops = cfs_hash_with_depth(hs) ? + &cfs_hash_dd_hops : &cfs_hash_dh_hops; + } else { + hs->hs_hops = cfs_hash_with_depth(hs) ? + &cfs_hash_hd_hops : &cfs_hash_hh_hops; + } +} + +static void +cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts, + unsigned int bits, const void *key, struct cfs_hash_bd *bd) +{ + unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1); + + LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits); + + bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)]; + bd->bd_offset = index >> (bits - hs->hs_bkt_bits); +} + +void +cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd) +{ + /* NB: caller should hold hs->hs_rwlock if REHASH is set */ + if (likely(!hs->hs_rehash_buckets)) { + cfs_hash_bd_from_key(hs, hs->hs_buckets, + hs->hs_cur_bits, key, bd); + } else { + LASSERT(hs->hs_rehash_bits != 0); + cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, + hs->hs_rehash_bits, key, bd); + } +} +EXPORT_SYMBOL(cfs_hash_bd_get); + +static inline void +cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) +{ + if (likely(dep_cur <= bd->bd_bucket->hsb_depmax)) + return; + + bd->bd_bucket->hsb_depmax = dep_cur; +# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 + if (likely(warn_on_depth == 0 || + max(warn_on_depth, hs->hs_dep_max) >= dep_cur)) + return; + + spin_lock(&hs->hs_dep_lock); + hs->hs_dep_max = dep_cur; + hs->hs_dep_bkt = bd->bd_bucket->hsb_index; + hs->hs_dep_off = bd->bd_offset; + hs->hs_dep_bits = hs->hs_cur_bits; + spin_unlock(&hs->hs_dep_lock); + + cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi); +# endif +} + +void +cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + int rc; + + rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode); + cfs_hash_bd_dep_record(hs, bd, rc); + bd->bd_bucket->hsb_version++; + if (unlikely(bd->bd_bucket->hsb_version == 0)) + bd->bd_bucket->hsb_version++; + bd->bd_bucket->hsb_count++; + + if (cfs_hash_with_counter(hs)) + atomic_inc(&hs->hs_count); + if (!cfs_hash_with_no_itemref(hs)) + cfs_hash_get(hs, hnode); +} +EXPORT_SYMBOL(cfs_hash_bd_add_locked); + +void +cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode) +{ + hs->hs_hops->hop_hnode_del(hs, bd, hnode); + + LASSERT(bd->bd_bucket->hsb_count > 0); + bd->bd_bucket->hsb_count--; + bd->bd_bucket->hsb_version++; + if (unlikely(bd->bd_bucket->hsb_version == 0)) + bd->bd_bucket->hsb_version++; + + if (cfs_hash_with_counter(hs)) { + LASSERT(atomic_read(&hs->hs_count) > 0); + atomic_dec(&hs->hs_count); + } + if (!cfs_hash_with_no_itemref(hs)) + cfs_hash_put_locked(hs, hnode); +} +EXPORT_SYMBOL(cfs_hash_bd_del_locked); + +void +cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, + struct cfs_hash_bd *bd_new, struct hlist_node *hnode) +{ + struct cfs_hash_bucket *obkt = bd_old->bd_bucket; + struct cfs_hash_bucket *nbkt = bd_new->bd_bucket; + int rc; + + if (cfs_hash_bd_compare(bd_old, bd_new) == 0) + return; + + /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops + * in cfs_hash_bd_del/add_locked + */ + hs->hs_hops->hop_hnode_del(hs, bd_old, hnode); + rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode); + cfs_hash_bd_dep_record(hs, bd_new, rc); + + LASSERT(obkt->hsb_count > 0); + obkt->hsb_count--; + obkt->hsb_version++; + if (unlikely(obkt->hsb_version == 0)) + obkt->hsb_version++; + nbkt->hsb_count++; + nbkt->hsb_version++; + if (unlikely(nbkt->hsb_version == 0)) + nbkt->hsb_version++; +} + +enum { + /** always set, for sanity (avoid ZERO intent) */ + CFS_HS_LOOKUP_MASK_FIND = BIT(0), + /** return entry with a ref */ + CFS_HS_LOOKUP_MASK_REF = BIT(1), + /** add entry if not existing */ + CFS_HS_LOOKUP_MASK_ADD = BIT(2), + /** delete entry, ignore other masks */ + CFS_HS_LOOKUP_MASK_DEL = BIT(3), +}; + +enum cfs_hash_lookup_intent { + /** return item w/o refcount */ + CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND, + /** return item with refcount */ + CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND | + CFS_HS_LOOKUP_MASK_REF), + /** return item w/o refcount if existed, otherwise add */ + CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND | + CFS_HS_LOOKUP_MASK_ADD), + /** return item with refcount if existed, otherwise add */ + CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND | + CFS_HS_LOOKUP_MASK_ADD), + /** delete if existed */ + CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND | + CFS_HS_LOOKUP_MASK_DEL) +}; + +static struct hlist_node * +cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, + const void *key, struct hlist_node *hnode, + enum cfs_hash_lookup_intent intent) + +{ + struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd); + struct hlist_node *ehnode; + struct hlist_node *match; + int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; + + /* with this function, we can avoid a lot of useless refcount ops, + * which are expensive atomic operations most time. + */ + match = intent_add ? NULL : hnode; + hlist_for_each(ehnode, hhead) { + if (!cfs_hash_keycmp(hs, key, ehnode)) + continue; + + if (match && match != ehnode) /* can't match */ + continue; + + /* match and ... */ + if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) { + cfs_hash_bd_del_locked(hs, bd, ehnode); + return ehnode; + } + + /* caller wants refcount? */ + if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0) + cfs_hash_get(hs, ehnode); + return ehnode; + } + /* no match item */ + if (!intent_add) + return NULL; + + LASSERT(hnode); + cfs_hash_bd_add_locked(hs, bd, hnode); + return hnode; +} + +struct hlist_node * +cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, + const void *key) +{ + return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, + CFS_HS_LOOKUP_IT_FIND); +} +EXPORT_SYMBOL(cfs_hash_bd_lookup_locked); + +struct hlist_node * +cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, + const void *key) +{ + return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, + CFS_HS_LOOKUP_IT_PEEK); +} +EXPORT_SYMBOL(cfs_hash_bd_peek_locked); + +static void +cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, + unsigned n, int excl) +{ + struct cfs_hash_bucket *prev = NULL; + int i; + + /** + * bds must be ascendantly ordered by bd->bd_bucket->hsb_index. + * NB: it's possible that several bds point to the same bucket but + * have different bd::bd_offset, so need take care of deadlock. + */ + cfs_hash_for_each_bd(bds, n, i) { + if (prev == bds[i].bd_bucket) + continue; + + LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index); + cfs_hash_bd_lock(hs, &bds[i], excl); + prev = bds[i].bd_bucket; + } +} + +static void +cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, + unsigned n, int excl) +{ + struct cfs_hash_bucket *prev = NULL; + int i; + + cfs_hash_for_each_bd(bds, n, i) { + if (prev != bds[i].bd_bucket) { + cfs_hash_bd_unlock(hs, &bds[i], excl); + prev = bds[i].bd_bucket; + } + } +} + +static struct hlist_node * +cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, + unsigned n, const void *key) +{ + struct hlist_node *ehnode; + unsigned i; + + cfs_hash_for_each_bd(bds, n, i) { + ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, + CFS_HS_LOOKUP_IT_FIND); + if (ehnode) + return ehnode; + } + return NULL; +} + +static struct hlist_node * +cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, + unsigned n, const void *key, + struct hlist_node *hnode, int noref) +{ + struct hlist_node *ehnode; + int intent; + unsigned i; + + LASSERT(hnode); + intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK; + + cfs_hash_for_each_bd(bds, n, i) { + ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, + NULL, intent); + if (ehnode) + return ehnode; + } + + if (i == 1) { /* only one bucket */ + cfs_hash_bd_add_locked(hs, &bds[0], hnode); + } else { + struct cfs_hash_bd mybd; + + cfs_hash_bd_get(hs, key, &mybd); + cfs_hash_bd_add_locked(hs, &mybd, hnode); + } + + return hnode; +} + +static struct hlist_node * +cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, + unsigned n, const void *key, + struct hlist_node *hnode) +{ + struct hlist_node *ehnode; + unsigned int i; + + cfs_hash_for_each_bd(bds, n, i) { + ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode, + CFS_HS_LOOKUP_IT_FINDDEL); + if (ehnode) + return ehnode; + } + return NULL; +} + +static void +cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) +{ + int rc; + + if (!bd2->bd_bucket) + return; + + if (!bd1->bd_bucket) { + *bd1 = *bd2; + bd2->bd_bucket = NULL; + return; + } + + rc = cfs_hash_bd_compare(bd1, bd2); + if (!rc) + bd2->bd_bucket = NULL; + else if (rc > 0) + swap(*bd1, *bd2); /* swap bd1 and bd2 */ +} + +void +cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, + struct cfs_hash_bd *bds) +{ + /* NB: caller should hold hs_lock.rw if REHASH is set */ + cfs_hash_bd_from_key(hs, hs->hs_buckets, + hs->hs_cur_bits, key, &bds[0]); + if (likely(!hs->hs_rehash_buckets)) { + /* no rehash or not rehashing */ + bds[1].bd_bucket = NULL; + return; + } + + LASSERT(hs->hs_rehash_bits != 0); + cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, + hs->hs_rehash_bits, key, &bds[1]); + + cfs_hash_bd_order(&bds[0], &bds[1]); +} + +void +cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) +{ + cfs_hash_multi_bd_lock(hs, bds, 2, excl); +} + +void +cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) +{ + cfs_hash_multi_bd_unlock(hs, bds, 2, excl); +} + +struct hlist_node * +cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, + const void *key) +{ + return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key); +} + +struct hlist_node * +cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, + const void *key, struct hlist_node *hnode, + int noref) +{ + return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key, + hnode, noref); +} + +struct hlist_node * +cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, + const void *key, struct hlist_node *hnode) +{ + return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode); +} + +static void +cfs_hash_buckets_free(struct cfs_hash_bucket **buckets, + int bkt_size, int prev_size, int size) +{ + int i; + + for (i = prev_size; i < size; i++) { + if (buckets[i]) + LIBCFS_FREE(buckets[i], bkt_size); + } + + LIBCFS_FREE(buckets, sizeof(buckets[0]) * size); +} + +/* + * Create or grow bucket memory. Return old_buckets if no allocation was + * needed, the newly allocated buckets if allocation was needed and + * successful, and NULL on error. + */ +static struct cfs_hash_bucket ** +cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, + unsigned int old_size, unsigned int new_size) +{ + struct cfs_hash_bucket **new_bkts; + int i; + + LASSERT(old_size == 0 || old_bkts); + + if (old_bkts && old_size == new_size) + return old_bkts; + + LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size); + if (!new_bkts) + return NULL; + + if (old_bkts) { + memcpy(new_bkts, old_bkts, + min(old_size, new_size) * sizeof(*old_bkts)); + } + + for (i = old_size; i < new_size; i++) { + struct hlist_head *hhead; + struct cfs_hash_bd bd; + + LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs)); + if (!new_bkts[i]) { + cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs), + old_size, new_size); + return NULL; + } + + new_bkts[i]->hsb_index = i; + new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ + new_bkts[i]->hsb_depmax = -1; /* unknown */ + bd.bd_bucket = new_bkts[i]; + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) + INIT_HLIST_HEAD(hhead); + + if (cfs_hash_with_no_lock(hs) || + cfs_hash_with_no_bktlock(hs)) + continue; + + if (cfs_hash_with_rw_bktlock(hs)) + rwlock_init(&new_bkts[i]->hsb_lock.rw); + else if (cfs_hash_with_spin_bktlock(hs)) + spin_lock_init(&new_bkts[i]->hsb_lock.spin); + else + LBUG(); /* invalid use-case */ + } + return new_bkts; +} + +/** + * Initialize new libcfs hash, where: + * @name - Descriptive hash name + * @cur_bits - Initial hash table size, in bits + * @max_bits - Maximum allowed hash table resize, in bits + * @ops - Registered hash table operations + * @flags - CFS_HASH_REHASH enable synamic hash resizing + * - CFS_HASH_SORT enable chained hash sort + */ +static int cfs_hash_rehash_worker(cfs_workitem_t *wi); + +#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 +static int cfs_hash_dep_print(cfs_workitem_t *wi) +{ + struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); + int dep; + int bkt; + int off; + int bits; + + spin_lock(&hs->hs_dep_lock); + dep = hs->hs_dep_max; + bkt = hs->hs_dep_bkt; + off = hs->hs_dep_off; + bits = hs->hs_dep_bits; + spin_unlock(&hs->hs_dep_lock); + + LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n", + hs->hs_name, bits, dep, bkt, off); + spin_lock(&hs->hs_dep_lock); + hs->hs_dep_bits = 0; /* mark as workitem done */ + spin_unlock(&hs->hs_dep_lock); + return 0; +} + +static void cfs_hash_depth_wi_init(struct cfs_hash *hs) +{ + spin_lock_init(&hs->hs_dep_lock); + cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print); +} + +static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) +{ + if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi)) + return; + + spin_lock(&hs->hs_dep_lock); + while (hs->hs_dep_bits != 0) { + spin_unlock(&hs->hs_dep_lock); + cond_resched(); + spin_lock(&hs->hs_dep_lock); + } + spin_unlock(&hs->hs_dep_lock); +} + +#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */ + +static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {} +static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {} + +#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */ + +struct cfs_hash * +cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, + unsigned bkt_bits, unsigned extra_bytes, + unsigned min_theta, unsigned max_theta, + struct cfs_hash_ops *ops, unsigned flags) +{ + struct cfs_hash *hs; + int len; + + CLASSERT(CFS_HASH_THETA_BITS < 15); + + LASSERT(name); + LASSERT(ops->hs_key); + LASSERT(ops->hs_hash); + LASSERT(ops->hs_object); + LASSERT(ops->hs_keycmp); + LASSERT(ops->hs_get); + LASSERT(ops->hs_put_locked); + + if ((flags & CFS_HASH_REHASH) != 0) + flags |= CFS_HASH_COUNTER; /* must have counter */ + + LASSERT(cur_bits > 0); + LASSERT(cur_bits >= bkt_bits); + LASSERT(max_bits >= cur_bits && max_bits < 31); + LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits)); + LASSERT(ergo((flags & CFS_HASH_REHASH) != 0, + (flags & CFS_HASH_NO_LOCK) == 0)); + LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy)); + + len = (flags & CFS_HASH_BIGNAME) == 0 ? + CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; + LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len])); + if (!hs) + return NULL; + + strlcpy(hs->hs_name, name, len); + hs->hs_flags = flags; + + atomic_set(&hs->hs_refcount, 1); + atomic_set(&hs->hs_count, 0); + + cfs_hash_lock_setup(hs); + cfs_hash_hlist_setup(hs); + + hs->hs_cur_bits = (__u8)cur_bits; + hs->hs_min_bits = (__u8)cur_bits; + hs->hs_max_bits = (__u8)max_bits; + hs->hs_bkt_bits = (__u8)bkt_bits; + + hs->hs_ops = ops; + hs->hs_extra_bytes = extra_bytes; + hs->hs_rehash_bits = 0; + cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker); + cfs_hash_depth_wi_init(hs); + + if (cfs_hash_with_rehash(hs)) + __cfs_hash_set_theta(hs, min_theta, max_theta); + + hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0, + CFS_HASH_NBKT(hs)); + if (hs->hs_buckets) + return hs; + + LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len])); + return NULL; +} +EXPORT_SYMBOL(cfs_hash_create); + +/** + * Cleanup libcfs hash @hs. + */ +static void +cfs_hash_destroy(struct cfs_hash *hs) +{ + struct hlist_node *hnode; + struct hlist_node *pos; + struct cfs_hash_bd bd; + int i; + + LASSERT(hs); + LASSERT(!cfs_hash_is_exiting(hs) && + !cfs_hash_is_iterating(hs)); + + /** + * prohibit further rehashes, don't need any lock because + * I'm the only (last) one can change it. + */ + hs->hs_exiting = 1; + if (cfs_hash_with_rehash(hs)) + cfs_hash_rehash_cancel(hs); + + cfs_hash_depth_wi_cancel(hs); + /* rehash should be done/canceled */ + LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets); + + cfs_hash_for_each_bucket(hs, &bd, i) { + struct hlist_head *hhead; + + LASSERT(bd.bd_bucket); + /* no need to take this lock, just for consistent code */ + cfs_hash_bd_lock(hs, &bd, 1); + + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { + hlist_for_each_safe(hnode, pos, hhead) { + LASSERTF(!cfs_hash_with_assert_empty(hs), + "hash %s bucket %u(%u) is not empty: %u items left\n", + hs->hs_name, bd.bd_bucket->hsb_index, + bd.bd_offset, bd.bd_bucket->hsb_count); + /* can't assert key valicate, because we + * can interrupt rehash + */ + cfs_hash_bd_del_locked(hs, &bd, hnode); + cfs_hash_exit(hs, hnode); + } + } + LASSERT(bd.bd_bucket->hsb_count == 0); + cfs_hash_bd_unlock(hs, &bd, 1); + cond_resched(); + } + + LASSERT(atomic_read(&hs->hs_count) == 0); + + cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs), + 0, CFS_HASH_NBKT(hs)); + i = cfs_hash_with_bigname(hs) ? + CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN; + LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i])); +} + +struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs) +{ + if (atomic_inc_not_zero(&hs->hs_refcount)) + return hs; + return NULL; +} +EXPORT_SYMBOL(cfs_hash_getref); + +void cfs_hash_putref(struct cfs_hash *hs) +{ + if (atomic_dec_and_test(&hs->hs_refcount)) + cfs_hash_destroy(hs); +} +EXPORT_SYMBOL(cfs_hash_putref); + +static inline int +cfs_hash_rehash_bits(struct cfs_hash *hs) +{ + if (cfs_hash_with_no_lock(hs) || + !cfs_hash_with_rehash(hs)) + return -EOPNOTSUPP; + + if (unlikely(cfs_hash_is_exiting(hs))) + return -ESRCH; + + if (unlikely(cfs_hash_is_rehashing(hs))) + return -EALREADY; + + if (unlikely(cfs_hash_is_iterating(hs))) + return -EAGAIN; + + /* XXX: need to handle case with max_theta != 2.0 + * and the case with min_theta != 0.5 + */ + if ((hs->hs_cur_bits < hs->hs_max_bits) && + (__cfs_hash_theta(hs) > hs->hs_max_theta)) + return hs->hs_cur_bits + 1; + + if (!cfs_hash_with_shrink(hs)) + return 0; + + if ((hs->hs_cur_bits > hs->hs_min_bits) && + (__cfs_hash_theta(hs) < hs->hs_min_theta)) + return hs->hs_cur_bits - 1; + + return 0; +} + +/** + * don't allow inline rehash if: + * - user wants non-blocking change (add/del) on hash table + * - too many elements + */ +static inline int +cfs_hash_rehash_inline(struct cfs_hash *hs) +{ + return !cfs_hash_with_nblk_change(hs) && + atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG; +} + +/** + * Add item @hnode to libcfs hash @hs using @key. The registered + * ops->hs_get function will be called when the item is added. + */ +void +cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) +{ + struct cfs_hash_bd bd; + int bits; + + LASSERT(hlist_unhashed(hnode)); + + cfs_hash_lock(hs, 0); + cfs_hash_bd_get_and_lock(hs, key, &bd, 1); + + cfs_hash_key_validate(hs, key, hnode); + cfs_hash_bd_add_locked(hs, &bd, hnode); + + cfs_hash_bd_unlock(hs, &bd, 1); + + bits = cfs_hash_rehash_bits(hs); + cfs_hash_unlock(hs, 0); + if (bits > 0) + cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); +} +EXPORT_SYMBOL(cfs_hash_add); + +static struct hlist_node * +cfs_hash_find_or_add(struct cfs_hash *hs, const void *key, + struct hlist_node *hnode, int noref) +{ + struct hlist_node *ehnode; + struct cfs_hash_bd bds[2]; + int bits = 0; + + LASSERT(hlist_unhashed(hnode)); + + cfs_hash_lock(hs, 0); + cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); + + cfs_hash_key_validate(hs, key, hnode); + ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key, + hnode, noref); + cfs_hash_dual_bd_unlock(hs, bds, 1); + + if (ehnode == hnode) /* new item added */ + bits = cfs_hash_rehash_bits(hs); + cfs_hash_unlock(hs, 0); + if (bits > 0) + cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); + + return ehnode; +} + +/** + * Add item @hnode to libcfs hash @hs using @key. The registered + * ops->hs_get function will be called if the item was added. + * Returns 0 on success or -EALREADY on key collisions. + */ +int +cfs_hash_add_unique(struct cfs_hash *hs, const void *key, + struct hlist_node *hnode) +{ + return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ? + -EALREADY : 0; +} +EXPORT_SYMBOL(cfs_hash_add_unique); + +/** + * Add item @hnode to libcfs hash @hs using @key. If this @key + * already exists in the hash then ops->hs_get will be called on the + * conflicting entry and that entry will be returned to the caller. + * Otherwise ops->hs_get is called on the item which was added. + */ +void * +cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key, + struct hlist_node *hnode) +{ + hnode = cfs_hash_find_or_add(hs, key, hnode, 0); + + return cfs_hash_object(hs, hnode); +} +EXPORT_SYMBOL(cfs_hash_findadd_unique); + +/** + * Delete item @hnode from the libcfs hash @hs using @key. The @key + * is required to ensure the correct hash bucket is locked since there + * is no direct linkage from the item to the bucket. The object + * removed from the hash will be returned and obs->hs_put is called + * on the removed object. + */ +void * +cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) +{ + void *obj = NULL; + int bits = 0; + struct cfs_hash_bd bds[2]; + + cfs_hash_lock(hs, 0); + cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); + + /* NB: do nothing if @hnode is not in hash table */ + if (!hnode || !hlist_unhashed(hnode)) { + if (!bds[1].bd_bucket && hnode) { + cfs_hash_bd_del_locked(hs, &bds[0], hnode); + } else { + hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, + key, hnode); + } + } + + if (hnode) { + obj = cfs_hash_object(hs, hnode); + bits = cfs_hash_rehash_bits(hs); + } + + cfs_hash_dual_bd_unlock(hs, bds, 1); + cfs_hash_unlock(hs, 0); + if (bits > 0) + cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); + + return obj; +} +EXPORT_SYMBOL(cfs_hash_del); + +/** + * Delete item given @key in libcfs hash @hs. The first @key found in + * the hash will be removed, if the key exists multiple times in the hash + * @hs this function must be called once per key. The removed object + * will be returned and ops->hs_put is called on the removed object. + */ +void * +cfs_hash_del_key(struct cfs_hash *hs, const void *key) +{ + return cfs_hash_del(hs, key, NULL); +} +EXPORT_SYMBOL(cfs_hash_del_key); + +/** + * Lookup an item using @key in the libcfs hash @hs and return it. + * If the @key is found in the hash hs->hs_get() is called and the + * matching objects is returned. It is the callers responsibility + * to call the counterpart ops->hs_put using the cfs_hash_put() macro + * when when finished with the object. If the @key was not found + * in the hash @hs NULL is returned. + */ +void * +cfs_hash_lookup(struct cfs_hash *hs, const void *key) +{ + void *obj = NULL; + struct hlist_node *hnode; + struct cfs_hash_bd bds[2]; + + cfs_hash_lock(hs, 0); + cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); + + hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key); + if (hnode) + obj = cfs_hash_object(hs, hnode); + + cfs_hash_dual_bd_unlock(hs, bds, 0); + cfs_hash_unlock(hs, 0); + + return obj; +} +EXPORT_SYMBOL(cfs_hash_lookup); + +static void +cfs_hash_for_each_enter(struct cfs_hash *hs) +{ + LASSERT(!cfs_hash_is_exiting(hs)); + + if (!cfs_hash_with_rehash(hs)) + return; + /* + * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter + * because it's just an unreliable signal to rehash-thread, + * rehash-thread will try to finish rehash ASAP when seeing this. + */ + hs->hs_iterating = 1; + + cfs_hash_lock(hs, 1); + hs->hs_iterators++; + + /* NB: iteration is mostly called by service thread, + * we tend to cancel pending rehash-request, instead of + * blocking service thread, we will relaunch rehash request + * after iteration + */ + if (cfs_hash_is_rehashing(hs)) + cfs_hash_rehash_cancel_locked(hs); + cfs_hash_unlock(hs, 1); +} + +static void +cfs_hash_for_each_exit(struct cfs_hash *hs) +{ + int remained; + int bits; + + if (!cfs_hash_with_rehash(hs)) + return; + cfs_hash_lock(hs, 1); + remained = --hs->hs_iterators; + bits = cfs_hash_rehash_bits(hs); + cfs_hash_unlock(hs, 1); + /* NB: it's race on cfs_has_t::hs_iterating, see above */ + if (remained == 0) + hs->hs_iterating = 0; + if (bits > 0) { + cfs_hash_rehash(hs, atomic_read(&hs->hs_count) < + CFS_HASH_LOOP_HOG); + } +} + +/** + * For each item in the libcfs hash @hs call the passed callback @func + * and pass to it as an argument each hash item and the private @data. + * + * a) the function may sleep! + * b) during the callback: + * . the bucket lock is held so the callback must never sleep. + * . if @removal_safe is true, use can remove current item by + * cfs_hash_bd_del_locked + */ +static __u64 +cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data, int remove_safe) +{ + struct hlist_node *hnode; + struct hlist_node *pos; + struct cfs_hash_bd bd; + __u64 count = 0; + int excl = !!remove_safe; + int loop = 0; + int i; + + cfs_hash_for_each_enter(hs); + + cfs_hash_lock(hs, 0); + LASSERT(!cfs_hash_is_rehashing(hs)); + + cfs_hash_for_each_bucket(hs, &bd, i) { + struct hlist_head *hhead; + + cfs_hash_bd_lock(hs, &bd, excl); + if (!func) { /* only glimpse size */ + count += bd.bd_bucket->hsb_count; + cfs_hash_bd_unlock(hs, &bd, excl); + continue; + } + + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { + hlist_for_each_safe(hnode, pos, hhead) { + cfs_hash_bucket_validate(hs, &bd, hnode); + count++; + loop++; + if (func(hs, &bd, hnode, data)) { + cfs_hash_bd_unlock(hs, &bd, excl); + goto out; + } + } + } + cfs_hash_bd_unlock(hs, &bd, excl); + if (loop < CFS_HASH_LOOP_HOG) + continue; + loop = 0; + cfs_hash_unlock(hs, 0); + cond_resched(); + cfs_hash_lock(hs, 0); + } + out: + cfs_hash_unlock(hs, 0); + + cfs_hash_for_each_exit(hs); + return count; +} + +struct cfs_hash_cond_arg { + cfs_hash_cond_opt_cb_t func; + void *arg; +}; + +static int +cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode, void *data) +{ + struct cfs_hash_cond_arg *cond = data; + + if (cond->func(cfs_hash_object(hs, hnode), cond->arg)) + cfs_hash_bd_del_locked(hs, bd, hnode); + return 0; +} + +/** + * Delete item from the libcfs hash @hs when @func return true. + * The write lock being hold during loop for each bucket to avoid + * any object be reference. + */ +void +cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data) +{ + struct cfs_hash_cond_arg arg = { + .func = func, + .arg = data, + }; + + cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1); +} +EXPORT_SYMBOL(cfs_hash_cond_del); + +void +cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data) +{ + cfs_hash_for_each_tight(hs, func, data, 0); +} +EXPORT_SYMBOL(cfs_hash_for_each); + +void +cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data) +{ + cfs_hash_for_each_tight(hs, func, data, 1); +} +EXPORT_SYMBOL(cfs_hash_for_each_safe); + +static int +cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd, + struct hlist_node *hnode, void *data) +{ + *(int *)data = 0; + return 1; /* return 1 to break the loop */ +} + +int +cfs_hash_is_empty(struct cfs_hash *hs) +{ + int empty = 1; + + cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0); + return empty; +} +EXPORT_SYMBOL(cfs_hash_is_empty); + +__u64 +cfs_hash_size_get(struct cfs_hash *hs) +{ + return cfs_hash_with_counter(hs) ? + atomic_read(&hs->hs_count) : + cfs_hash_for_each_tight(hs, NULL, NULL, 0); +} +EXPORT_SYMBOL(cfs_hash_size_get); + +/* + * cfs_hash_for_each_relax: + * Iterate the hash table and call @func on each item without + * any lock. This function can't guarantee to finish iteration + * if these features are enabled: + * + * a. if rehash_key is enabled, an item can be moved from + * one bucket to another bucket + * b. user can remove non-zero-ref item from hash-table, + * so the item can be removed from hash-table, even worse, + * it's possible that user changed key and insert to another + * hash bucket. + * there's no way for us to finish iteration correctly on previous + * two cases, so iteration has to be stopped on change. + */ +static int +cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data) +{ + struct hlist_node *hnode; + struct hlist_node *tmp; + struct cfs_hash_bd bd; + __u32 version; + int count = 0; + int stop_on_change; + int rc; + int i; + + stop_on_change = cfs_hash_with_rehash_key(hs) || + !cfs_hash_with_no_itemref(hs) || + !hs->hs_ops->hs_put_locked; + cfs_hash_lock(hs, 0); + LASSERT(!cfs_hash_is_rehashing(hs)); + + cfs_hash_for_each_bucket(hs, &bd, i) { + struct hlist_head *hhead; + + cfs_hash_bd_lock(hs, &bd, 0); + version = cfs_hash_bd_version_get(&bd); + + cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { + for (hnode = hhead->first; hnode;) { + cfs_hash_bucket_validate(hs, &bd, hnode); + cfs_hash_get(hs, hnode); + cfs_hash_bd_unlock(hs, &bd, 0); + cfs_hash_unlock(hs, 0); + + rc = func(hs, &bd, hnode, data); + if (stop_on_change) + cfs_hash_put(hs, hnode); + cond_resched(); + count++; + + cfs_hash_lock(hs, 0); + cfs_hash_bd_lock(hs, &bd, 0); + if (!stop_on_change) { + tmp = hnode->next; + cfs_hash_put_locked(hs, hnode); + hnode = tmp; + } else { /* bucket changed? */ + if (version != + cfs_hash_bd_version_get(&bd)) + break; + /* safe to continue because no change */ + hnode = hnode->next; + } + if (rc) /* callback wants to break iteration */ + break; + } + if (rc) /* callback wants to break iteration */ + break; + } + cfs_hash_bd_unlock(hs, &bd, 0); + if (rc) /* callback wants to break iteration */ + break; + } + cfs_hash_unlock(hs, 0); + + return count; +} + +int +cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data) +{ + if (cfs_hash_with_no_lock(hs) || + cfs_hash_with_rehash_key(hs) || + !cfs_hash_with_no_itemref(hs)) + return -EOPNOTSUPP; + + if (!hs->hs_ops->hs_get || + (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked)) + return -EOPNOTSUPP; + + cfs_hash_for_each_enter(hs); + cfs_hash_for_each_relax(hs, func, data); + cfs_hash_for_each_exit(hs); + + return 0; +} +EXPORT_SYMBOL(cfs_hash_for_each_nolock); + +/** + * For each hash bucket in the libcfs hash @hs call the passed callback + * @func until all the hash buckets are empty. The passed callback @func + * or the previously registered callback hs->hs_put must remove the item + * from the hash. You may either use the cfs_hash_del() or hlist_del() + * functions. No rwlocks will be held during the callback @func it is + * safe to sleep if needed. This function will not terminate until the + * hash is empty. Note it is still possible to concurrently add new + * items in to the hash. It is the callers responsibility to ensure + * the required locking is in place to prevent concurrent insertions. + */ +int +cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, + void *data) +{ + unsigned i = 0; + + if (cfs_hash_with_no_lock(hs)) + return -EOPNOTSUPP; + + if (!hs->hs_ops->hs_get || + (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked)) + return -EOPNOTSUPP; + + cfs_hash_for_each_enter(hs); + while (cfs_hash_for_each_relax(hs, func, data)) { + CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", + hs->hs_name, i++); + } + cfs_hash_for_each_exit(hs); + return 0; +} +EXPORT_SYMBOL(cfs_hash_for_each_empty); + +void +cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, + cfs_hash_for_each_cb_t func, void *data) +{ + struct hlist_head *hhead; + struct hlist_node *hnode; + struct cfs_hash_bd bd; + + cfs_hash_for_each_enter(hs); + cfs_hash_lock(hs, 0); + if (hindex >= CFS_HASH_NHLIST(hs)) + goto out; + + cfs_hash_bd_index_set(hs, hindex, &bd); + + cfs_hash_bd_lock(hs, &bd, 0); + hhead = cfs_hash_bd_hhead(hs, &bd); + hlist_for_each(hnode, hhead) { + if (func(hs, &bd, hnode, data)) + break; + } + cfs_hash_bd_unlock(hs, &bd, 0); +out: + cfs_hash_unlock(hs, 0); + cfs_hash_for_each_exit(hs); +} +EXPORT_SYMBOL(cfs_hash_hlist_for_each); + +/* + * For each item in the libcfs hash @hs which matches the @key call + * the passed callback @func and pass to it as an argument each hash + * item and the private @data. During the callback the bucket lock + * is held so the callback must never sleep. + */ +void +cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, + cfs_hash_for_each_cb_t func, void *data) +{ + struct hlist_node *hnode; + struct cfs_hash_bd bds[2]; + unsigned int i; + + cfs_hash_lock(hs, 0); + + cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); + + cfs_hash_for_each_bd(bds, 2, i) { + struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]); + + hlist_for_each(hnode, hlist) { + cfs_hash_bucket_validate(hs, &bds[i], hnode); + + if (cfs_hash_keycmp(hs, key, hnode)) { + if (func(hs, &bds[i], hnode, data)) + break; + } + } + } + + cfs_hash_dual_bd_unlock(hs, bds, 0); + cfs_hash_unlock(hs, 0); +} +EXPORT_SYMBOL(cfs_hash_for_each_key); + +/** + * Rehash the libcfs hash @hs to the given @bits. This can be used + * to grow the hash size when excessive chaining is detected, or to + * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH + * flag is set in @hs the libcfs hash may be dynamically rehashed + * during addition or removal if the hash's theta value exceeds + * either the hs->hs_min_theta or hs->max_theta values. By default + * these values are tuned to keep the chained hash depth small, and + * this approach assumes a reasonably uniform hashing function. The + * theta thresholds for @hs are tunable via cfs_hash_set_theta(). + */ +void +cfs_hash_rehash_cancel_locked(struct cfs_hash *hs) +{ + int i; + + /* need hold cfs_hash_lock(hs, 1) */ + LASSERT(cfs_hash_with_rehash(hs) && + !cfs_hash_with_no_lock(hs)); + + if (!cfs_hash_is_rehashing(hs)) + return; + + if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) { + hs->hs_rehash_bits = 0; + return; + } + + for (i = 2; cfs_hash_is_rehashing(hs); i++) { + cfs_hash_unlock(hs, 1); + /* raise console warning while waiting too long */ + CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO, + "hash %s is still rehashing, rescheded %d\n", + hs->hs_name, i - 1); + cond_resched(); + cfs_hash_lock(hs, 1); + } +} + +void +cfs_hash_rehash_cancel(struct cfs_hash *hs) +{ + cfs_hash_lock(hs, 1); + cfs_hash_rehash_cancel_locked(hs); + cfs_hash_unlock(hs, 1); +} + +int +cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) +{ + int rc; + + LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs)); + + cfs_hash_lock(hs, 1); + + rc = cfs_hash_rehash_bits(hs); + if (rc <= 0) { + cfs_hash_unlock(hs, 1); + return rc; + } + + hs->hs_rehash_bits = rc; + if (!do_rehash) { + /* launch and return */ + cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi); + cfs_hash_unlock(hs, 1); + return 0; + } + + /* rehash right now */ + cfs_hash_unlock(hs, 1); + + return cfs_hash_rehash_worker(&hs->hs_rehash_wi); +} + +static int +cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) +{ + struct cfs_hash_bd new; + struct hlist_head *hhead; + struct hlist_node *hnode; + struct hlist_node *pos; + void *key; + int c = 0; + + /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */ + cfs_hash_bd_for_each_hlist(hs, old, hhead) { + hlist_for_each_safe(hnode, pos, hhead) { + key = cfs_hash_key(hs, hnode); + LASSERT(key); + /* Validate hnode is in the correct bucket. */ + cfs_hash_bucket_validate(hs, old, hnode); + /* + * Delete from old hash bucket; move to new bucket. + * ops->hs_key must be defined. + */ + cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, + hs->hs_rehash_bits, key, &new); + cfs_hash_bd_move_locked(hs, old, &new, hnode); + c++; + } + } + + return c; +} + +static int +cfs_hash_rehash_worker(cfs_workitem_t *wi) +{ + struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); + struct cfs_hash_bucket **bkts; + struct cfs_hash_bd bd; + unsigned int old_size; + unsigned int new_size; + int bsize; + int count = 0; + int rc = 0; + int i; + + LASSERT(hs && cfs_hash_with_rehash(hs)); + + cfs_hash_lock(hs, 0); + LASSERT(cfs_hash_is_rehashing(hs)); + + old_size = CFS_HASH_NBKT(hs); + new_size = CFS_HASH_RH_NBKT(hs); + + cfs_hash_unlock(hs, 0); + + /* + * don't need hs::hs_rwlock for hs::hs_buckets, + * because nobody can change bkt-table except me. + */ + bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets, + old_size, new_size); + cfs_hash_lock(hs, 1); + if (!bkts) { + rc = -ENOMEM; + goto out; + } + + if (bkts == hs->hs_buckets) { + bkts = NULL; /* do nothing */ + goto out; + } + + rc = __cfs_hash_theta(hs); + if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) { + /* free the new allocated bkt-table */ + old_size = new_size; + new_size = CFS_HASH_NBKT(hs); + rc = -EALREADY; + goto out; + } + + LASSERT(!hs->hs_rehash_buckets); + hs->hs_rehash_buckets = bkts; + + rc = 0; + cfs_hash_for_each_bucket(hs, &bd, i) { + if (cfs_hash_is_exiting(hs)) { + rc = -ESRCH; + /* someone wants to destroy the hash, abort now */ + if (old_size < new_size) /* OK to free old bkt-table */ + break; + /* it's shrinking, need free new bkt-table */ + hs->hs_rehash_buckets = NULL; + old_size = new_size; + new_size = CFS_HASH_NBKT(hs); + goto out; + } + + count += cfs_hash_rehash_bd(hs, &bd); + if (count < CFS_HASH_LOOP_HOG || + cfs_hash_is_iterating(hs)) { /* need to finish ASAP */ + continue; + } + + count = 0; + cfs_hash_unlock(hs, 1); + cond_resched(); + cfs_hash_lock(hs, 1); + } + + hs->hs_rehash_count++; + + bkts = hs->hs_buckets; + hs->hs_buckets = hs->hs_rehash_buckets; + hs->hs_rehash_buckets = NULL; + + hs->hs_cur_bits = hs->hs_rehash_bits; +out: + hs->hs_rehash_bits = 0; + if (rc == -ESRCH) /* never be scheduled again */ + cfs_wi_exit(cfs_sched_rehash, wi); + bsize = cfs_hash_bkt_size(hs); + cfs_hash_unlock(hs, 1); + /* can't refer to @hs anymore because it could be destroyed */ + if (bkts) + cfs_hash_buckets_free(bkts, bsize, new_size, old_size); + if (rc != 0) + CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); + /* return 1 only if cfs_wi_exit is called */ + return rc == -ESRCH; +} + +/** + * Rehash the object referenced by @hnode in the libcfs hash @hs. The + * @old_key must be provided to locate the objects previous location + * in the hash, and the @new_key will be used to reinsert the object. + * Use this function instead of a cfs_hash_add() + cfs_hash_del() + * combo when it is critical that there is no window in time where the + * object is missing from the hash. When an object is being rehashed + * the registered cfs_hash_get() and cfs_hash_put() functions will + * not be called. + */ +void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, + void *new_key, struct hlist_node *hnode) +{ + struct cfs_hash_bd bds[3]; + struct cfs_hash_bd old_bds[2]; + struct cfs_hash_bd new_bd; + + LASSERT(!hlist_unhashed(hnode)); + + cfs_hash_lock(hs, 0); + + cfs_hash_dual_bd_get(hs, old_key, old_bds); + cfs_hash_bd_get(hs, new_key, &new_bd); + + bds[0] = old_bds[0]; + bds[1] = old_bds[1]; + bds[2] = new_bd; + + /* NB: bds[0] and bds[1] are ordered already */ + cfs_hash_bd_order(&bds[1], &bds[2]); + cfs_hash_bd_order(&bds[0], &bds[1]); + + cfs_hash_multi_bd_lock(hs, bds, 3, 1); + if (likely(!old_bds[1].bd_bucket)) { + cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode); + } else { + cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode); + cfs_hash_bd_add_locked(hs, &new_bd, hnode); + } + /* overwrite key inside locks, otherwise may screw up with + * other operations, i.e: rehash + */ + cfs_hash_keycpy(hs, hnode, new_key); + + cfs_hash_multi_bd_unlock(hs, bds, 3, 1); + cfs_hash_unlock(hs, 0); +} +EXPORT_SYMBOL(cfs_hash_rehash_key); + +void cfs_hash_debug_header(struct seq_file *m) +{ + seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n", + CFS_HASH_BIGNAME_LEN, "name"); +} +EXPORT_SYMBOL(cfs_hash_debug_header); + +static struct cfs_hash_bucket ** +cfs_hash_full_bkts(struct cfs_hash *hs) +{ + /* NB: caller should hold hs->hs_rwlock if REHASH is set */ + if (!hs->hs_rehash_buckets) + return hs->hs_buckets; + + LASSERT(hs->hs_rehash_bits != 0); + return hs->hs_rehash_bits > hs->hs_cur_bits ? + hs->hs_rehash_buckets : hs->hs_buckets; +} + +static unsigned int +cfs_hash_full_nbkt(struct cfs_hash *hs) +{ + /* NB: caller should hold hs->hs_rwlock if REHASH is set */ + if (!hs->hs_rehash_buckets) + return CFS_HASH_NBKT(hs); + + LASSERT(hs->hs_rehash_bits != 0); + return hs->hs_rehash_bits > hs->hs_cur_bits ? + CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs); +} + +void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m) +{ + int dist[8] = { 0, }; + int maxdep = -1; + int maxdepb = -1; + int total = 0; + int theta; + int i; + + cfs_hash_lock(hs, 0); + theta = __cfs_hash_theta(hs); + + seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ", + CFS_HASH_BIGNAME_LEN, hs->hs_name, + 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits, + 1 << hs->hs_max_bits, + __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta), + __cfs_hash_theta_int(hs->hs_min_theta), + __cfs_hash_theta_frac(hs->hs_min_theta), + __cfs_hash_theta_int(hs->hs_max_theta), + __cfs_hash_theta_frac(hs->hs_max_theta), + hs->hs_flags, hs->hs_rehash_count); + + /* + * The distribution is a summary of the chained hash depth in + * each of the libcfs hash buckets. Each buckets hsb_count is + * divided by the hash theta value and used to generate a + * histogram of the hash distribution. A uniform hash will + * result in all hash buckets being close to the average thus + * only the first few entries in the histogram will be non-zero. + * If you hash function results in a non-uniform hash the will + * be observable by outlier bucks in the distribution histogram. + * + * Uniform hash distribution: 128/128/0/0/0/0/0/0 + * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1 + */ + for (i = 0; i < cfs_hash_full_nbkt(hs); i++) { + struct cfs_hash_bd bd; + + bd.bd_bucket = cfs_hash_full_bkts(hs)[i]; + cfs_hash_bd_lock(hs, &bd, 0); + if (maxdep < bd.bd_bucket->hsb_depmax) { + maxdep = bd.bd_bucket->hsb_depmax; + maxdepb = ffz(~maxdep); + } + total += bd.bd_bucket->hsb_count; + dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++; + cfs_hash_bd_unlock(hs, &bd, 0); + } + + seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb); + for (i = 0; i < 8; i++) + seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/'); + + cfs_hash_unlock(hs, 0); +} +EXPORT_SYMBOL(cfs_hash_debug_str); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c new file mode 100644 index 000000000..33352af6c --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c @@ -0,0 +1,227 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * GPL HEADER END + */ +/* + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction + * + * Author: liang@whamcloud.com + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../include/linux/libcfs/libcfs.h" + +/** Global CPU partition table */ +struct cfs_cpt_table *cfs_cpt_table __read_mostly; +EXPORT_SYMBOL(cfs_cpt_table); + +#ifndef HAVE_LIBCFS_CPT + +#define CFS_CPU_VERSION_MAGIC 0xbabecafe + +struct cfs_cpt_table * +cfs_cpt_table_alloc(unsigned int ncpt) +{ + struct cfs_cpt_table *cptab; + + if (ncpt != 1) { + CERROR("Can't support cpu partition number %d\n", ncpt); + return NULL; + } + + LIBCFS_ALLOC(cptab, sizeof(*cptab)); + if (cptab) { + cptab->ctb_version = CFS_CPU_VERSION_MAGIC; + node_set(0, cptab->ctb_nodemask); + cptab->ctb_nparts = ncpt; + } + + return cptab; +} +EXPORT_SYMBOL(cfs_cpt_table_alloc); + +void +cfs_cpt_table_free(struct cfs_cpt_table *cptab) +{ + LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC); + + LIBCFS_FREE(cptab, sizeof(*cptab)); +} +EXPORT_SYMBOL(cfs_cpt_table_free); + +#ifdef CONFIG_SMP +int +cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) +{ + int rc; + + rc = snprintf(buf, len, "%d\t: %d\n", 0, 0); + len -= rc; + if (len <= 0) + return -EFBIG; + + return rc; +} +EXPORT_SYMBOL(cfs_cpt_table_print); +#endif /* CONFIG_SMP */ + +int +cfs_cpt_number(struct cfs_cpt_table *cptab) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpt_number); + +int +cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpt_weight); + +int +cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpt_online); + +nodemask_t * +cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt) +{ + return &cptab->ctb_nodemask; +} +EXPORT_SYMBOL(cfs_cpt_cpumask); + +int +cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpt_set_cpu); + +void +cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) +{ +} +EXPORT_SYMBOL(cfs_cpt_unset_cpu); + +int +cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpt_set_cpumask); + +void +cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) +{ +} +EXPORT_SYMBOL(cfs_cpt_unset_cpumask); + +int +cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpt_set_node); + +void +cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) +{ +} +EXPORT_SYMBOL(cfs_cpt_unset_node); + +int +cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpt_set_nodemask); + +void +cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) +{ +} +EXPORT_SYMBOL(cfs_cpt_unset_nodemask); + +void +cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) +{ +} +EXPORT_SYMBOL(cfs_cpt_clear); + +int +cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) +{ + return 0; +} +EXPORT_SYMBOL(cfs_cpt_spread_node); + +int +cfs_cpu_ht_nsiblings(int cpu) +{ + return 1; +} +EXPORT_SYMBOL(cfs_cpu_ht_nsiblings); + +int +cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) +{ + return 0; +} +EXPORT_SYMBOL(cfs_cpt_current); + +int +cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) +{ + return 0; +} +EXPORT_SYMBOL(cfs_cpt_of_cpu); + +int +cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) +{ + return 0; +} +EXPORT_SYMBOL(cfs_cpt_bind); + +void +cfs_cpu_fini(void) +{ + if (cfs_cpt_table) { + cfs_cpt_table_free(cfs_cpt_table); + cfs_cpt_table = NULL; + } +} + +int +cfs_cpu_init(void) +{ + cfs_cpt_table = cfs_cpt_table_alloc(1); + + return cfs_cpt_table ? 0 : -1; +} + +#endif /* HAVE_LIBCFS_CPT */ diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c new file mode 100644 index 000000000..2de9eeae0 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c @@ -0,0 +1,185 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * GPL HEADER END + */ +/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2015 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * Author: liang@whamcloud.com + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../include/linux/libcfs/libcfs.h" + +/** destroy cpu-partition lock, see libcfs_private.h for more detail */ +void +cfs_percpt_lock_free(struct cfs_percpt_lock *pcl) +{ + LASSERT(pcl->pcl_locks); + LASSERT(!pcl->pcl_locked); + + cfs_percpt_free(pcl->pcl_locks); + LIBCFS_FREE(pcl, sizeof(*pcl)); +} +EXPORT_SYMBOL(cfs_percpt_lock_free); + +/** + * create cpu-partition lock, see libcfs_private.h for more detail. + * + * cpu-partition lock is designed for large-scale SMP system, so we need to + * reduce cacheline conflict as possible as we can, that's the + * reason we always allocate cacheline-aligned memory block. + */ +struct cfs_percpt_lock * +cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) +{ + struct cfs_percpt_lock *pcl; + spinlock_t *lock; + int i; + + /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */ + LIBCFS_ALLOC(pcl, sizeof(*pcl)); + if (!pcl) + return NULL; + + pcl->pcl_cptab = cptab; + pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock)); + if (!pcl->pcl_locks) { + LIBCFS_FREE(pcl, sizeof(*pcl)); + return NULL; + } + + cfs_percpt_for_each(lock, i, pcl->pcl_locks) + spin_lock_init(lock); + + return pcl; +} +EXPORT_SYMBOL(cfs_percpt_lock_alloc); + +/** + * lock a CPU partition + * + * \a index != CFS_PERCPT_LOCK_EX + * hold private lock indexed by \a index + * + * \a index == CFS_PERCPT_LOCK_EX + * exclusively lock @pcl and nobody can take private lock + */ +void +cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) + __acquires(pcl->pcl_locks) +{ + int ncpt = cfs_cpt_number(pcl->pcl_cptab); + int i; + + LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt); + + if (ncpt == 1) { + index = 0; + } else { /* serialize with exclusive lock */ + while (pcl->pcl_locked) + cpu_relax(); + } + + if (likely(index != CFS_PERCPT_LOCK_EX)) { + spin_lock(pcl->pcl_locks[index]); + return; + } + + /* exclusive lock request */ + for (i = 0; i < ncpt; i++) { + spin_lock(pcl->pcl_locks[i]); + if (i == 0) { + LASSERT(!pcl->pcl_locked); + /* nobody should take private lock after this + * so I wouldn't starve for too long time + */ + pcl->pcl_locked = 1; + } + } +} +EXPORT_SYMBOL(cfs_percpt_lock); + +/** unlock a CPU partition */ +void +cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) + __releases(pcl->pcl_locks) +{ + int ncpt = cfs_cpt_number(pcl->pcl_cptab); + int i; + + index = ncpt == 1 ? 0 : index; + + if (likely(index != CFS_PERCPT_LOCK_EX)) { + spin_unlock(pcl->pcl_locks[index]); + return; + } + + for (i = ncpt - 1; i >= 0; i--) { + if (i == 0) { + LASSERT(pcl->pcl_locked); + pcl->pcl_locked = 0; + } + spin_unlock(pcl->pcl_locks[i]); + } +} +EXPORT_SYMBOL(cfs_percpt_unlock); + +/** free cpu-partition refcount */ +void +cfs_percpt_atomic_free(atomic_t **refs) +{ + cfs_percpt_free(refs); +} +EXPORT_SYMBOL(cfs_percpt_atomic_free); + +/** allocate cpu-partition refcount with initial value @init_val */ +atomic_t ** +cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val) +{ + atomic_t **refs; + atomic_t *ref; + int i; + + refs = cfs_percpt_alloc(cptab, sizeof(*ref)); + if (!refs) + return NULL; + + cfs_percpt_for_each(ref, i, refs) + atomic_set(ref, init_val); + return refs; +} +EXPORT_SYMBOL(cfs_percpt_atomic_alloc); + +/** return sum of cpu-partition refs */ +int +cfs_percpt_atomic_summary(atomic_t **refs) +{ + atomic_t *ref; + int i; + int val = 0; + + cfs_percpt_for_each(ref, i, refs) + val += atomic_read(ref); + + return val; +} +EXPORT_SYMBOL(cfs_percpt_atomic_summary); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c new file mode 100644 index 000000000..c5a695151 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c @@ -0,0 +1,196 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * GPL HEADER END + */ +/* + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * Author: liang@whamcloud.com + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../include/linux/libcfs/libcfs.h" + +struct cfs_var_array { + unsigned int va_count; /* # of buffers */ + unsigned int va_size; /* size of each var */ + struct cfs_cpt_table *va_cptab; /* cpu partition table */ + void *va_ptrs[0]; /* buffer addresses */ +}; + +/* + * free per-cpu data, see more detail in cfs_percpt_free + */ +void +cfs_percpt_free(void *vars) +{ + struct cfs_var_array *arr; + int i; + + arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); + + for (i = 0; i < arr->va_count; i++) { + if (arr->va_ptrs[i]) + LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); + } + + LIBCFS_FREE(arr, offsetof(struct cfs_var_array, + va_ptrs[arr->va_count])); +} +EXPORT_SYMBOL(cfs_percpt_free); + +/* + * allocate per cpu-partition variables, returned value is an array of pointers, + * variable can be indexed by CPU partition ID, i.e: + * + * arr = cfs_percpt_alloc(cfs_cpu_pt, size); + * then caller can access memory block for CPU 0 by arr[0], + * memory block for CPU 1 by arr[1]... + * memory block for CPU N by arr[N]... + * + * cacheline aligned. + */ +void * +cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) +{ + struct cfs_var_array *arr; + int count; + int i; + + count = cfs_cpt_number(cptab); + + LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); + if (!arr) + return NULL; + + size = L1_CACHE_ALIGN(size); + arr->va_size = size; + arr->va_count = count; + arr->va_cptab = cptab; + + for (i = 0; i < count; i++) { + LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size); + if (!arr->va_ptrs[i]) { + cfs_percpt_free((void *)&arr->va_ptrs[0]); + return NULL; + } + } + + return (void *)&arr->va_ptrs[0]; +} +EXPORT_SYMBOL(cfs_percpt_alloc); + +/* + * return number of CPUs (or number of elements in per-cpu data) + * according to cptab of @vars + */ +int +cfs_percpt_number(void *vars) +{ + struct cfs_var_array *arr; + + arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); + + return arr->va_count; +} +EXPORT_SYMBOL(cfs_percpt_number); + +/* + * return memory block shadowed from current CPU + */ +void * +cfs_percpt_current(void *vars) +{ + struct cfs_var_array *arr; + int cpt; + + arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); + cpt = cfs_cpt_current(arr->va_cptab, 0); + if (cpt < 0) + return NULL; + + return arr->va_ptrs[cpt]; +} + +void * +cfs_percpt_index(void *vars, int idx) +{ + struct cfs_var_array *arr; + + arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); + + LASSERT(idx >= 0 && idx < arr->va_count); + return arr->va_ptrs[idx]; +} + +/* + * free variable array, see more detail in cfs_array_alloc + */ +void +cfs_array_free(void *vars) +{ + struct cfs_var_array *arr; + int i; + + arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); + + for (i = 0; i < arr->va_count; i++) { + if (!arr->va_ptrs[i]) + continue; + + LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); + } + LIBCFS_FREE(arr, offsetof(struct cfs_var_array, + va_ptrs[arr->va_count])); +} +EXPORT_SYMBOL(cfs_array_free); + +/* + * allocate a variable array, returned value is an array of pointers. + * Caller can specify length of array by @count, @size is size of each + * memory block in array. + */ +void * +cfs_array_alloc(int count, unsigned int size) +{ + struct cfs_var_array *arr; + int i; + + LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); + if (!arr) + return NULL; + + arr->va_count = count; + arr->va_size = size; + + for (i = 0; i < count; i++) { + LIBCFS_ALLOC(arr->va_ptrs[i], size); + + if (!arr->va_ptrs[i]) { + cfs_array_free((void *)&arr->va_ptrs[0]); + return NULL; + } + } + + return (void *)&arr->va_ptrs[0]; +} +EXPORT_SYMBOL(cfs_array_alloc); diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c new file mode 100644 index 000000000..50ac1536d --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c @@ -0,0 +1,581 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, 2015 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * String manipulation functions. + * + * libcfs/libcfs/libcfs_string.c + * + * Author: Nathan Rutman + */ + +#include "../../include/linux/libcfs/libcfs.h" + +/* Convert a text string to a bitmask */ +int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), + int *oldmask, int minmask, int allmask) +{ + const char *debugstr; + char op = '\0'; + int newmask = minmask, i, len, found = 0; + + /* must be a list of tokens separated by whitespace + * and optionally an operator ('+' or '-'). If an operator + * appears first in , '*oldmask' is used as the starting point + * (relative), otherwise minmask is used (absolute). An operator + * applies to all following tokens up to the next operator. + */ + while (*str != '\0') { + while (isspace(*str)) + str++; + if (*str == '\0') + break; + if (*str == '+' || *str == '-') { + op = *str++; + if (!found) + /* only if first token is relative */ + newmask = *oldmask; + while (isspace(*str)) + str++; + if (*str == '\0') /* trailing op */ + return -EINVAL; + } + + /* find token length */ + len = 0; + while (str[len] != '\0' && !isspace(str[len]) && + str[len] != '+' && str[len] != '-') + len++; + + /* match token */ + found = 0; + for (i = 0; i < 32; i++) { + debugstr = bit2str(i); + if (debugstr && strlen(debugstr) == len && + strncasecmp(str, debugstr, len) == 0) { + if (op == '-') + newmask &= ~(1 << i); + else + newmask |= (1 << i); + found = 1; + break; + } + } + if (!found && len == 3 && + (strncasecmp(str, "ALL", len) == 0)) { + if (op == '-') + newmask = minmask; + else + newmask = allmask; + found = 1; + } + if (!found) { + CWARN("unknown mask '%.*s'.\n" + "mask usage: [+|-] ...\n", len, str); + return -EINVAL; + } + str += len; + } + + *oldmask = newmask; + return 0; +} + +/* get the first string out of @str */ +char *cfs_firststr(char *str, size_t size) +{ + size_t i = 0; + char *end; + + /* trim leading spaces */ + while (i < size && *str && isspace(*str)) { + ++i; + ++str; + } + + /* string with all spaces */ + if (*str == '\0') + goto out; + + end = str; + while (i < size && *end != '\0' && !isspace(*end)) { + ++i; + ++end; + } + + *end = '\0'; +out: + return str; +} +EXPORT_SYMBOL(cfs_firststr); + +char * +cfs_trimwhite(char *str) +{ + char *end; + + while (isspace(*str)) + str++; + + end = str + strlen(str); + while (end > str) { + if (!isspace(end[-1])) + break; + end--; + } + + *end = 0; + return str; +} +EXPORT_SYMBOL(cfs_trimwhite); + +/** + * Extracts tokens from strings. + * + * Looks for \a delim in string \a next, sets \a res to point to + * substring before the delimiter, sets \a next right after the found + * delimiter. + * + * \retval 1 if \a res points to a string of non-whitespace characters + * \retval 0 otherwise + */ +int +cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res) +{ + char *end; + + if (!next->ls_str) + return 0; + + /* skip leading white spaces */ + while (next->ls_len) { + if (!isspace(*next->ls_str)) + break; + next->ls_str++; + next->ls_len--; + } + + if (next->ls_len == 0) /* whitespaces only */ + return 0; + + if (*next->ls_str == delim) { + /* first non-writespace is the delimiter */ + return 0; + } + + res->ls_str = next->ls_str; + end = memchr(next->ls_str, delim, next->ls_len); + if (!end) { + /* there is no the delimeter in the string */ + end = next->ls_str + next->ls_len; + next->ls_str = NULL; + } else { + next->ls_str = end + 1; + next->ls_len -= (end - res->ls_str + 1); + } + + /* skip ending whitespaces */ + while (--end != res->ls_str) { + if (!isspace(*end)) + break; + } + + res->ls_len = end - res->ls_str + 1; + return 1; +} +EXPORT_SYMBOL(cfs_gettok); + +/** + * Converts string to integer. + * + * Accepts decimal and hexadecimal number recordings. + * + * \retval 1 if first \a nob chars of \a str convert to decimal or + * hexadecimal integer in the range [\a min, \a max] + * \retval 0 otherwise + */ +int +cfs_str2num_check(char *str, int nob, unsigned *num, + unsigned min, unsigned max) +{ + bool all_numbers = true; + char *endp, cache; + int rc; + + str = cfs_trimwhite(str); + + /** + * kstrouint can only handle strings composed + * of only numbers. We need to scan the string + * passed in for the first non-digit character + * and end the string at that location. If we + * don't find any non-digit character we still + * need to place a '\0' at position nob since + * we are not interested in the rest of the + * string which is longer than nob in size. + * After we are done the character at the + * position we placed '\0' must be restored. + */ + for (endp = str; endp < str + nob; endp++) { + if (!isdigit(*endp)) { + all_numbers = false; + break; + } + } + cache = *endp; + *endp = '\0'; + + rc = kstrtouint(str, 10, num); + *endp = cache; + if (rc || !all_numbers) + return 0; + + return (*num >= min && *num <= max); +} +EXPORT_SYMBOL(cfs_str2num_check); + +/** + * Parses \ token of the syntax. If \a bracketed is false, + * \a src should only have a single token which can be \ or \* + * + * \retval pointer to allocated range_expr and initialized + * range_expr::re_lo, range_expr::re_hi and range_expr:re_stride if \a + `* src parses to + * \ | + * \ '-' \ | + * \ '-' \ '/' \ + * \retval 0 will be returned if it can be parsed, otherwise -EINVAL or + * -ENOMEM will be returned. + */ +static int +cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max, + int bracketed, struct cfs_range_expr **expr) +{ + struct cfs_range_expr *re; + struct cfs_lstr tok; + + LIBCFS_ALLOC(re, sizeof(*re)); + if (!re) + return -ENOMEM; + + if (src->ls_len == 1 && src->ls_str[0] == '*') { + re->re_lo = min; + re->re_hi = max; + re->re_stride = 1; + goto out; + } + + if (cfs_str2num_check(src->ls_str, src->ls_len, + &re->re_lo, min, max)) { + /* is parsed */ + re->re_hi = re->re_lo; + re->re_stride = 1; + goto out; + } + + if (!bracketed || !cfs_gettok(src, '-', &tok)) + goto failed; + + if (!cfs_str2num_check(tok.ls_str, tok.ls_len, + &re->re_lo, min, max)) + goto failed; + + /* - */ + if (cfs_str2num_check(src->ls_str, src->ls_len, + &re->re_hi, min, max)) { + /* - is parsed */ + re->re_stride = 1; + goto out; + } + + /* go to check '-' '/' */ + if (cfs_gettok(src, '/', &tok)) { + if (!cfs_str2num_check(tok.ls_str, tok.ls_len, + &re->re_hi, min, max)) + goto failed; + + /* - / ... */ + if (cfs_str2num_check(src->ls_str, src->ls_len, + &re->re_stride, min, max)) { + /* - / is parsed */ + goto out; + } + } + + out: + *expr = re; + return 0; + + failed: + LIBCFS_FREE(re, sizeof(*re)); + return -EINVAL; +} + +/** + * Print the range expression \a re into specified \a buffer. + * If \a bracketed is true, expression does not need additional + * brackets. + * + * \retval number of characters written + */ +static int +cfs_range_expr_print(char *buffer, int count, struct cfs_range_expr *expr, + bool bracketed) +{ + int i; + char s[] = "["; + char e[] = "]"; + + if (bracketed) { + s[0] = '\0'; + e[0] = '\0'; + } + + if (expr->re_lo == expr->re_hi) + i = scnprintf(buffer, count, "%u", expr->re_lo); + else if (expr->re_stride == 1) + i = scnprintf(buffer, count, "%s%u-%u%s", + s, expr->re_lo, expr->re_hi, e); + else + i = scnprintf(buffer, count, "%s%u-%u/%u%s", + s, expr->re_lo, expr->re_hi, expr->re_stride, e); + return i; +} + +/** + * Print a list of range expressions (\a expr_list) into specified \a buffer. + * If the list contains several expressions, separate them with comma + * and surround the list with brackets. + * + * \retval number of characters written + */ +int +cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list) +{ + struct cfs_range_expr *expr; + int i = 0, j = 0; + int numexprs = 0; + + if (count <= 0) + return 0; + + list_for_each_entry(expr, &expr_list->el_exprs, re_link) + numexprs++; + + if (numexprs > 1) + i += scnprintf(buffer + i, count - i, "["); + + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + if (j++ != 0) + i += scnprintf(buffer + i, count - i, ","); + i += cfs_range_expr_print(buffer + i, count - i, expr, + numexprs > 1); + } + + if (numexprs > 1) + i += scnprintf(buffer + i, count - i, "]"); + + return i; +} +EXPORT_SYMBOL(cfs_expr_list_print); + +/** + * Matches value (\a value) against ranges expression list \a expr_list. + * + * \retval 1 if \a value matches + * \retval 0 otherwise + */ +int +cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list) +{ + struct cfs_range_expr *expr; + + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + if (value >= expr->re_lo && value <= expr->re_hi && + ((value - expr->re_lo) % expr->re_stride) == 0) + return 1; + } + + return 0; +} +EXPORT_SYMBOL(cfs_expr_list_match); + +/** + * Convert express list (\a expr_list) to an array of all matched values + * + * \retval N N is total number of all matched values + * \retval 0 if expression list is empty + * \retval < 0 for failure + */ +int +cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) +{ + struct cfs_range_expr *expr; + __u32 *val; + int count = 0; + int i; + + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + for (i = expr->re_lo; i <= expr->re_hi; i++) { + if (((i - expr->re_lo) % expr->re_stride) == 0) + count++; + } + } + + if (count == 0) /* empty expression list */ + return 0; + + if (count > max) { + CERROR("Number of values %d exceeds max allowed %d\n", + max, count); + return -EINVAL; + } + + LIBCFS_ALLOC(val, sizeof(val[0]) * count); + if (!val) + return -ENOMEM; + + count = 0; + list_for_each_entry(expr, &expr_list->el_exprs, re_link) { + for (i = expr->re_lo; i <= expr->re_hi; i++) { + if (((i - expr->re_lo) % expr->re_stride) == 0) + val[count++] = i; + } + } + + *valpp = val; + return count; +} +EXPORT_SYMBOL(cfs_expr_list_values); + +/** + * Frees cfs_range_expr structures of \a expr_list. + * + * \retval none + */ +void +cfs_expr_list_free(struct cfs_expr_list *expr_list) +{ + while (!list_empty(&expr_list->el_exprs)) { + struct cfs_range_expr *expr; + + expr = list_entry(expr_list->el_exprs.next, + struct cfs_range_expr, re_link); + list_del(&expr->re_link); + LIBCFS_FREE(expr, sizeof(*expr)); + } + + LIBCFS_FREE(expr_list, sizeof(*expr_list)); +} +EXPORT_SYMBOL(cfs_expr_list_free); + +/** + * Parses \ token of the syntax. + * + * \retval 0 if \a str parses to \ | \ + * \retval -errno otherwise + */ +int +cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, + struct cfs_expr_list **elpp) +{ + struct cfs_expr_list *expr_list; + struct cfs_range_expr *expr; + struct cfs_lstr src; + int rc; + + LIBCFS_ALLOC(expr_list, sizeof(*expr_list)); + if (!expr_list) + return -ENOMEM; + + src.ls_str = str; + src.ls_len = len; + + INIT_LIST_HEAD(&expr_list->el_exprs); + + if (src.ls_str[0] == '[' && + src.ls_str[src.ls_len - 1] == ']') { + src.ls_str++; + src.ls_len -= 2; + + rc = -EINVAL; + while (src.ls_str) { + struct cfs_lstr tok; + + if (!cfs_gettok(&src, ',', &tok)) { + rc = -EINVAL; + break; + } + + rc = cfs_range_expr_parse(&tok, min, max, 1, &expr); + if (rc != 0) + break; + + list_add_tail(&expr->re_link, &expr_list->el_exprs); + } + } else { + rc = cfs_range_expr_parse(&src, min, max, 0, &expr); + if (rc == 0) + list_add_tail(&expr->re_link, &expr_list->el_exprs); + } + + if (rc != 0) + cfs_expr_list_free(expr_list); + else + *elpp = expr_list; + + return rc; +} +EXPORT_SYMBOL(cfs_expr_list_parse); + +/** + * Frees cfs_expr_list structures of \a list. + * + * For each struct cfs_expr_list structure found on \a list it frees + * range_expr list attached to it and frees the cfs_expr_list itself. + * + * \retval none + */ +void +cfs_expr_list_free_list(struct list_head *list) +{ + struct cfs_expr_list *el; + + while (!list_empty(list)) { + el = list_entry(list->next, struct cfs_expr_list, el_link); + list_del(&el->el_link); + cfs_expr_list_free(el); + } +} +EXPORT_SYMBOL(cfs_expr_list_free_list); diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c new file mode 100644 index 000000000..389fb9eee --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c @@ -0,0 +1,1040 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * GPL HEADER END + */ +/* + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * + * Copyright (c) 2012, 2015 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * Author: liang@whamcloud.com + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include +#include +#include "../../../include/linux/libcfs/libcfs.h" + +#ifdef CONFIG_SMP + +/** + * modparam for setting number of partitions + * + * 0 : estimate best value based on cores or NUMA nodes + * 1 : disable multiple partitions + * >1 : specify number of partitions + */ +static int cpu_npartitions; +module_param(cpu_npartitions, int, 0444); +MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions"); + +/** + * modparam for setting CPU partitions patterns: + * + * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID, + * number in bracket is processor ID (core or HT) + * + * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket + * are NUMA node ID, number before bracket is CPU partition ID. + * + * NB: If user specified cpu_pattern, cpu_npartitions will be ignored + */ +static char *cpu_pattern = ""; +module_param(cpu_pattern, charp, 0444); +MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern"); + +struct cfs_cpt_data { + /* serialize hotplug etc */ + spinlock_t cpt_lock; + /* reserved for hotplug */ + unsigned long cpt_version; + /* mutex to protect cpt_cpumask */ + struct mutex cpt_mutex; + /* scratch buffer for set/unset_node */ + cpumask_t *cpt_cpumask; +}; + +static struct cfs_cpt_data cpt_data; + +void +cfs_cpt_table_free(struct cfs_cpt_table *cptab) +{ + int i; + + if (cptab->ctb_cpu2cpt) { + LIBCFS_FREE(cptab->ctb_cpu2cpt, + num_possible_cpus() * + sizeof(cptab->ctb_cpu2cpt[0])); + } + + for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) { + struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; + + if (part->cpt_nodemask) { + LIBCFS_FREE(part->cpt_nodemask, + sizeof(*part->cpt_nodemask)); + } + + if (part->cpt_cpumask) + LIBCFS_FREE(part->cpt_cpumask, cpumask_size()); + } + + if (cptab->ctb_parts) { + LIBCFS_FREE(cptab->ctb_parts, + cptab->ctb_nparts * sizeof(cptab->ctb_parts[0])); + } + + if (cptab->ctb_nodemask) + LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); + if (cptab->ctb_cpumask) + LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size()); + + LIBCFS_FREE(cptab, sizeof(*cptab)); +} +EXPORT_SYMBOL(cfs_cpt_table_free); + +struct cfs_cpt_table * +cfs_cpt_table_alloc(unsigned int ncpt) +{ + struct cfs_cpt_table *cptab; + int i; + + LIBCFS_ALLOC(cptab, sizeof(*cptab)); + if (!cptab) + return NULL; + + cptab->ctb_nparts = ncpt; + + LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size()); + LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); + + if (!cptab->ctb_cpumask || !cptab->ctb_nodemask) + goto failed; + + LIBCFS_ALLOC(cptab->ctb_cpu2cpt, + num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); + if (!cptab->ctb_cpu2cpt) + goto failed; + + memset(cptab->ctb_cpu2cpt, -1, + num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); + + LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0])); + if (!cptab->ctb_parts) + goto failed; + + for (i = 0; i < ncpt; i++) { + struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; + + LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size()); + LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask)); + if (!part->cpt_cpumask || !part->cpt_nodemask) + goto failed; + } + + spin_lock(&cpt_data.cpt_lock); + /* Reserved for hotplug */ + cptab->ctb_version = cpt_data.cpt_version; + spin_unlock(&cpt_data.cpt_lock); + + return cptab; + + failed: + cfs_cpt_table_free(cptab); + return NULL; +} +EXPORT_SYMBOL(cfs_cpt_table_alloc); + +int +cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) +{ + char *tmp = buf; + int rc = 0; + int i; + int j; + + for (i = 0; i < cptab->ctb_nparts; i++) { + if (len > 0) { + rc = snprintf(tmp, len, "%d\t: ", i); + len -= rc; + } + + if (len <= 0) { + rc = -EFBIG; + goto out; + } + + tmp += rc; + for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) { + rc = snprintf(tmp, len, "%d ", j); + len -= rc; + if (len <= 0) { + rc = -EFBIG; + goto out; + } + tmp += rc; + } + + *tmp = '\n'; + tmp++; + len--; + } + + out: + if (rc < 0) + return rc; + + return tmp - buf; +} +EXPORT_SYMBOL(cfs_cpt_table_print); + +int +cfs_cpt_number(struct cfs_cpt_table *cptab) +{ + return cptab->ctb_nparts; +} +EXPORT_SYMBOL(cfs_cpt_number); + +int +cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) +{ + LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); + + return cpt == CFS_CPT_ANY ? + cpumask_weight(cptab->ctb_cpumask) : + cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask); +} +EXPORT_SYMBOL(cfs_cpt_weight); + +int +cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) +{ + LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); + + return cpt == CFS_CPT_ANY ? + cpumask_any_and(cptab->ctb_cpumask, + cpu_online_mask) < nr_cpu_ids : + cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask, + cpu_online_mask) < nr_cpu_ids; +} +EXPORT_SYMBOL(cfs_cpt_online); + +cpumask_t * +cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt) +{ + LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); + + return cpt == CFS_CPT_ANY ? + cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask; +} +EXPORT_SYMBOL(cfs_cpt_cpumask); + +nodemask_t * +cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt) +{ + LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); + + return cpt == CFS_CPT_ANY ? + cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask; +} +EXPORT_SYMBOL(cfs_cpt_nodemask); + +int +cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) +{ + int node; + + LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); + + if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { + CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu); + return 0; + } + + if (cptab->ctb_cpu2cpt[cpu] != -1) { + CDEBUG(D_INFO, "CPU %d is already in partition %d\n", + cpu, cptab->ctb_cpu2cpt[cpu]); + return 0; + } + + cptab->ctb_cpu2cpt[cpu] = cpt; + + LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask)); + LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); + + cpumask_set_cpu(cpu, cptab->ctb_cpumask); + cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); + + node = cpu_to_node(cpu); + + /* first CPU of @node in this CPT table */ + if (!node_isset(node, *cptab->ctb_nodemask)) + node_set(node, *cptab->ctb_nodemask); + + /* first CPU of @node in this partition */ + if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)) + node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask); + + return 1; +} +EXPORT_SYMBOL(cfs_cpt_set_cpu); + +void +cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) +{ + int node; + int i; + + LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); + + if (cpu < 0 || cpu >= nr_cpu_ids) { + CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu); + return; + } + + if (cpt == CFS_CPT_ANY) { + /* caller doesn't know the partition ID */ + cpt = cptab->ctb_cpu2cpt[cpu]; + if (cpt < 0) { /* not set in this CPT-table */ + CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n", + cpt, cptab); + return; + } + + } else if (cpt != cptab->ctb_cpu2cpt[cpu]) { + CDEBUG(D_INFO, + "CPU %d is not in cpu-partition %d\n", cpu, cpt); + return; + } + + LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); + LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask)); + + cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); + cpumask_clear_cpu(cpu, cptab->ctb_cpumask); + cptab->ctb_cpu2cpt[cpu] = -1; + + node = cpu_to_node(cpu); + + LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); + LASSERT(node_isset(node, *cptab->ctb_nodemask)); + + for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) { + /* this CPT has other CPU belonging to this node? */ + if (cpu_to_node(i) == node) + break; + } + + if (i >= nr_cpu_ids) + node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); + + for_each_cpu(i, cptab->ctb_cpumask) { + /* this CPT-table has other CPU belonging to this node? */ + if (cpu_to_node(i) == node) + break; + } + + if (i >= nr_cpu_ids) + node_clear(node, *cptab->ctb_nodemask); +} +EXPORT_SYMBOL(cfs_cpt_unset_cpu); + +int +cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) +{ + int i; + + if (cpumask_weight(mask) == 0 || + cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { + CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", + cpt); + return 0; + } + + for_each_cpu(i, mask) { + if (!cfs_cpt_set_cpu(cptab, cpt, i)) + return 0; + } + + return 1; +} +EXPORT_SYMBOL(cfs_cpt_set_cpumask); + +void +cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) +{ + int i; + + for_each_cpu(i, mask) + cfs_cpt_unset_cpu(cptab, cpt, i); +} +EXPORT_SYMBOL(cfs_cpt_unset_cpumask); + +int +cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) +{ + cpumask_t *mask; + int rc; + + if (node < 0 || node >= MAX_NUMNODES) { + CDEBUG(D_INFO, + "Invalid NUMA id %d for CPU partition %d\n", node, cpt); + return 0; + } + + mutex_lock(&cpt_data.cpt_mutex); + + mask = cpt_data.cpt_cpumask; + cpumask_copy(mask, cpumask_of_node(node)); + + rc = cfs_cpt_set_cpumask(cptab, cpt, mask); + + mutex_unlock(&cpt_data.cpt_mutex); + + return rc; +} +EXPORT_SYMBOL(cfs_cpt_set_node); + +void +cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) +{ + cpumask_t *mask; + + if (node < 0 || node >= MAX_NUMNODES) { + CDEBUG(D_INFO, + "Invalid NUMA id %d for CPU partition %d\n", node, cpt); + return; + } + + mutex_lock(&cpt_data.cpt_mutex); + + mask = cpt_data.cpt_cpumask; + cpumask_copy(mask, cpumask_of_node(node)); + + cfs_cpt_unset_cpumask(cptab, cpt, mask); + + mutex_unlock(&cpt_data.cpt_mutex); +} +EXPORT_SYMBOL(cfs_cpt_unset_node); + +int +cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) +{ + int i; + + for_each_node_mask(i, *mask) { + if (!cfs_cpt_set_node(cptab, cpt, i)) + return 0; + } + + return 1; +} +EXPORT_SYMBOL(cfs_cpt_set_nodemask); + +void +cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) +{ + int i; + + for_each_node_mask(i, *mask) + cfs_cpt_unset_node(cptab, cpt, i); +} +EXPORT_SYMBOL(cfs_cpt_unset_nodemask); + +void +cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) +{ + int last; + int i; + + if (cpt == CFS_CPT_ANY) { + last = cptab->ctb_nparts - 1; + cpt = 0; + } else { + last = cpt; + } + + for (; cpt <= last; cpt++) { + for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) + cfs_cpt_unset_cpu(cptab, cpt, i); + } +} +EXPORT_SYMBOL(cfs_cpt_clear); + +int +cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) +{ + nodemask_t *mask; + int weight; + int rotor; + int node; + + /* convert CPU partition ID to HW node id */ + + if (cpt < 0 || cpt >= cptab->ctb_nparts) { + mask = cptab->ctb_nodemask; + rotor = cptab->ctb_spread_rotor++; + } else { + mask = cptab->ctb_parts[cpt].cpt_nodemask; + rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++; + } + + weight = nodes_weight(*mask); + LASSERT(weight > 0); + + rotor %= weight; + + for_each_node_mask(node, *mask) { + if (rotor-- == 0) + return node; + } + + LBUG(); + return 0; +} +EXPORT_SYMBOL(cfs_cpt_spread_node); + +int +cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) +{ + int cpu = smp_processor_id(); + int cpt = cptab->ctb_cpu2cpt[cpu]; + + if (cpt < 0) { + if (!remap) + return cpt; + + /* don't return negative value for safety of upper layer, + * instead we shadow the unknown cpu to a valid partition ID + */ + cpt = cpu % cptab->ctb_nparts; + } + + return cpt; +} +EXPORT_SYMBOL(cfs_cpt_current); + +int +cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) +{ + LASSERT(cpu >= 0 && cpu < nr_cpu_ids); + + return cptab->ctb_cpu2cpt[cpu]; +} +EXPORT_SYMBOL(cfs_cpt_of_cpu); + +int +cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) +{ + cpumask_t *cpumask; + nodemask_t *nodemask; + int rc; + int i; + + LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); + + if (cpt == CFS_CPT_ANY) { + cpumask = cptab->ctb_cpumask; + nodemask = cptab->ctb_nodemask; + } else { + cpumask = cptab->ctb_parts[cpt].cpt_cpumask; + nodemask = cptab->ctb_parts[cpt].cpt_nodemask; + } + + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { + CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", + cpt); + return -EINVAL; + } + + for_each_online_cpu(i) { + if (cpumask_test_cpu(i, cpumask)) + continue; + + rc = set_cpus_allowed_ptr(current, cpumask); + set_mems_allowed(*nodemask); + if (rc == 0) + schedule(); /* switch to allowed CPU */ + + return rc; + } + + /* don't need to set affinity because all online CPUs are covered */ + return 0; +} +EXPORT_SYMBOL(cfs_cpt_bind); + +/** + * Choose max to \a number CPUs from \a node and set them in \a cpt. + * We always prefer to choose CPU in the same core/socket. + */ +static int +cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, + cpumask_t *node, int number) +{ + cpumask_t *socket = NULL; + cpumask_t *core = NULL; + int rc = 0; + int cpu; + + LASSERT(number > 0); + + if (number >= cpumask_weight(node)) { + while (!cpumask_empty(node)) { + cpu = cpumask_first(node); + + rc = cfs_cpt_set_cpu(cptab, cpt, cpu); + if (!rc) + return -EINVAL; + cpumask_clear_cpu(cpu, node); + } + return 0; + } + + /* allocate scratch buffer */ + LIBCFS_ALLOC(socket, cpumask_size()); + LIBCFS_ALLOC(core, cpumask_size()); + if (!socket || !core) { + rc = -ENOMEM; + goto out; + } + + while (!cpumask_empty(node)) { + cpu = cpumask_first(node); + + /* get cpumask for cores in the same socket */ + cpumask_copy(socket, topology_core_cpumask(cpu)); + cpumask_and(socket, socket, node); + + LASSERT(!cpumask_empty(socket)); + + while (!cpumask_empty(socket)) { + int i; + + /* get cpumask for hts in the same core */ + cpumask_copy(core, topology_sibling_cpumask(cpu)); + cpumask_and(core, core, node); + + LASSERT(!cpumask_empty(core)); + + for_each_cpu(i, core) { + cpumask_clear_cpu(i, socket); + cpumask_clear_cpu(i, node); + + rc = cfs_cpt_set_cpu(cptab, cpt, i); + if (!rc) { + rc = -EINVAL; + goto out; + } + + if (--number == 0) + goto out; + } + cpu = cpumask_first(socket); + } + } + + out: + if (socket) + LIBCFS_FREE(socket, cpumask_size()); + if (core) + LIBCFS_FREE(core, cpumask_size()); + return rc; +} + +#define CPT_WEIGHT_MIN 4u + +static unsigned int +cfs_cpt_num_estimate(void) +{ + unsigned nnode = num_online_nodes(); + unsigned ncpu = num_online_cpus(); + unsigned ncpt; + + if (ncpu <= CPT_WEIGHT_MIN) { + ncpt = 1; + goto out; + } + + /* generate reasonable number of CPU partitions based on total number + * of CPUs, Preferred N should be power2 and match this condition: + * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 + */ + for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) + ; + + if (ncpt <= nnode) { /* fat numa system */ + while (nnode > ncpt) + nnode >>= 1; + + } else { /* ncpt > nnode */ + while ((nnode << 1) <= ncpt) + nnode <<= 1; + } + + ncpt = nnode; + + out: +#if (BITS_PER_LONG == 32) + /* config many CPU partitions on 32-bit system could consume + * too much memory + */ + ncpt = min(2U, ncpt); +#endif + while (ncpu % ncpt != 0) + ncpt--; /* worst case is 1 */ + + return ncpt; +} + +static struct cfs_cpt_table * +cfs_cpt_table_create(int ncpt) +{ + struct cfs_cpt_table *cptab = NULL; + cpumask_t *mask = NULL; + int cpt = 0; + int num; + int rc; + int i; + + rc = cfs_cpt_num_estimate(); + if (ncpt <= 0) + ncpt = rc; + + if (ncpt > num_online_cpus() || ncpt > 4 * rc) { + CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n", + ncpt, rc); + } + + if (num_online_cpus() % ncpt != 0) { + CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n", + (int)num_online_cpus(), ncpt); + goto failed; + } + + cptab = cfs_cpt_table_alloc(ncpt); + if (!cptab) { + CERROR("Failed to allocate CPU map(%d)\n", ncpt); + goto failed; + } + + num = num_online_cpus() / ncpt; + if (num == 0) { + CERROR("CPU changed while setting CPU partition\n"); + goto failed; + } + + LIBCFS_ALLOC(mask, cpumask_size()); + if (!mask) { + CERROR("Failed to allocate scratch cpumask\n"); + goto failed; + } + + for_each_online_node(i) { + cpumask_copy(mask, cpumask_of_node(i)); + + while (!cpumask_empty(mask)) { + struct cfs_cpu_partition *part; + int n; + + if (cpt >= ncpt) + goto failed; + + part = &cptab->ctb_parts[cpt]; + + n = num - cpumask_weight(part->cpt_cpumask); + LASSERT(n > 0); + + rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); + if (rc < 0) + goto failed; + + LASSERT(num >= cpumask_weight(part->cpt_cpumask)); + if (num == cpumask_weight(part->cpt_cpumask)) + cpt++; + } + } + + if (cpt != ncpt || + num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { + CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", + cptab->ctb_nparts, num, cpt, + cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)); + goto failed; + } + + LIBCFS_FREE(mask, cpumask_size()); + + return cptab; + + failed: + CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n", + ncpt, num_online_nodes(), num_online_cpus()); + + if (mask) + LIBCFS_FREE(mask, cpumask_size()); + + if (cptab) + cfs_cpt_table_free(cptab); + + return NULL; +} + +static struct cfs_cpt_table * +cfs_cpt_table_create_pattern(char *pattern) +{ + struct cfs_cpt_table *cptab; + char *str = pattern; + int node = 0; + int high; + int ncpt; + int c; + + for (ncpt = 0;; ncpt++) { /* quick scan bracket */ + str = strchr(str, '['); + if (!str) + break; + str++; + } + + str = cfs_trimwhite(pattern); + if (*str == 'n' || *str == 'N') { + pattern = str + 1; + node = 1; + } + + if (ncpt == 0 || + (node && ncpt > num_online_nodes()) || + (!node && ncpt > num_online_cpus())) { + CERROR("Invalid pattern %s, or too many partitions %d\n", + pattern, ncpt); + return NULL; + } + + high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; + + cptab = cfs_cpt_table_alloc(ncpt); + if (!cptab) { + CERROR("Failed to allocate cpu partition table\n"); + return NULL; + } + + for (str = cfs_trimwhite(pattern), c = 0;; c++) { + struct cfs_range_expr *range; + struct cfs_expr_list *el; + char *bracket = strchr(str, '['); + int cpt; + int rc; + int i; + int n; + + if (!bracket) { + if (*str != 0) { + CERROR("Invalid pattern %s\n", str); + goto failed; + } + if (c != ncpt) { + CERROR("expect %d partitions but found %d\n", + ncpt, c); + goto failed; + } + break; + } + + if (sscanf(str, "%d%n", &cpt, &n) < 1) { + CERROR("Invalid cpu pattern %s\n", str); + goto failed; + } + + if (cpt < 0 || cpt >= ncpt) { + CERROR("Invalid partition id %d, total partitions %d\n", + cpt, ncpt); + goto failed; + } + + if (cfs_cpt_weight(cptab, cpt) != 0) { + CERROR("Partition %d has already been set.\n", cpt); + goto failed; + } + + str = cfs_trimwhite(str + n); + if (str != bracket) { + CERROR("Invalid pattern %s\n", str); + goto failed; + } + + bracket = strchr(str, ']'); + if (!bracket) { + CERROR("missing right bracket for cpt %d, %s\n", + cpt, str); + goto failed; + } + + if (cfs_expr_list_parse(str, (bracket - str) + 1, + 0, high, &el) != 0) { + CERROR("Can't parse number range: %s\n", str); + goto failed; + } + + list_for_each_entry(range, &el->el_exprs, re_link) { + for (i = range->re_lo; i <= range->re_hi; i++) { + if ((i - range->re_lo) % range->re_stride != 0) + continue; + + rc = node ? cfs_cpt_set_node(cptab, cpt, i) : + cfs_cpt_set_cpu(cptab, cpt, i); + if (!rc) { + cfs_expr_list_free(el); + goto failed; + } + } + } + + cfs_expr_list_free(el); + + if (!cfs_cpt_online(cptab, cpt)) { + CERROR("No online CPU is found on partition %d\n", cpt); + goto failed; + } + + str = cfs_trimwhite(bracket + 1); + } + + return cptab; + + failed: + cfs_cpt_table_free(cptab); + return NULL; +} + +#ifdef CONFIG_HOTPLUG_CPU +static int +cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + bool warn; + + switch (action) { + case CPU_DEAD: + case CPU_DEAD_FROZEN: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + spin_lock(&cpt_data.cpt_lock); + cpt_data.cpt_version++; + spin_unlock(&cpt_data.cpt_lock); + /* Fall through */ + default: + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) { + CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n", + cpu, action); + break; + } + + mutex_lock(&cpt_data.cpt_mutex); + /* if all HTs in a core are offline, it may break affinity */ + cpumask_copy(cpt_data.cpt_cpumask, + topology_sibling_cpumask(cpu)); + warn = cpumask_any_and(cpt_data.cpt_cpumask, + cpu_online_mask) >= nr_cpu_ids; + mutex_unlock(&cpt_data.cpt_mutex); + CDEBUG(warn ? D_WARNING : D_INFO, + "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", + cpu, action); + } + + return NOTIFY_OK; +} + +static struct notifier_block cfs_cpu_notifier = { + .notifier_call = cfs_cpu_notify, + .priority = 0 +}; + +#endif + +void +cfs_cpu_fini(void) +{ + if (cfs_cpt_table) + cfs_cpt_table_free(cfs_cpt_table); + +#ifdef CONFIG_HOTPLUG_CPU + unregister_hotcpu_notifier(&cfs_cpu_notifier); +#endif + if (cpt_data.cpt_cpumask) + LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size()); +} + +int +cfs_cpu_init(void) +{ + LASSERT(!cfs_cpt_table); + + memset(&cpt_data, 0, sizeof(cpt_data)); + + LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size()); + if (!cpt_data.cpt_cpumask) { + CERROR("Failed to allocate scratch buffer\n"); + return -1; + } + + spin_lock_init(&cpt_data.cpt_lock); + mutex_init(&cpt_data.cpt_mutex); + +#ifdef CONFIG_HOTPLUG_CPU + register_hotcpu_notifier(&cfs_cpu_notifier); +#endif + + if (*cpu_pattern != 0) { + cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern); + if (!cfs_cpt_table) { + CERROR("Failed to create cptab from pattern %s\n", + cpu_pattern); + goto failed; + } + + } else { + cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions); + if (!cfs_cpt_table) { + CERROR("Failed to create ptable with npartitions %d\n", + cpu_npartitions); + goto failed; + } + } + + spin_lock(&cpt_data.cpt_lock); + if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) { + spin_unlock(&cpt_data.cpt_lock); + CERROR("CPU hotplug/unplug during setup\n"); + goto failed; + } + spin_unlock(&cpt_data.cpt_lock); + + LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n", + num_online_cpus(), cfs_cpt_number(cfs_cpt_table)); + return 0; + + failed: + cfs_cpu_fini(); + return -1; +} + +#endif diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c new file mode 100644 index 000000000..db0572733 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c @@ -0,0 +1,137 @@ +/* GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see http://www.gnu.org/licenses + * + * Please visit http://www.xyratex.com/contact if you need additional + * information or have any questions. + * + * GPL HEADER END + */ + +/* + * Copyright 2012 Xyratex Technology Limited + */ + +/* + * This is crypto api shash wrappers to zlib_adler32. + */ + +#include +#include +#include +#include "linux-crypto.h" + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +static int adler32_cra_init(struct crypto_tfm *tfm) +{ + u32 *key = crypto_tfm_ctx(tfm); + + *key = 1; + + return 0; +} + +static int adler32_setkey(struct crypto_shash *hash, const u8 *key, + unsigned int keylen) +{ + u32 *mctx = crypto_shash_ctx(hash); + + if (keylen != sizeof(u32)) { + crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + *mctx = *(u32 *)key; + return 0; +} + +static int adler32_init(struct shash_desc *desc) +{ + u32 *mctx = crypto_shash_ctx(desc->tfm); + u32 *cksump = shash_desc_ctx(desc); + + *cksump = *mctx; + + return 0; +} + +static int adler32_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + u32 *cksump = shash_desc_ctx(desc); + + *cksump = zlib_adler32(*cksump, data, len); + return 0; +} + +static int __adler32_finup(u32 *cksump, const u8 *data, unsigned int len, + u8 *out) +{ + *(u32 *)out = zlib_adler32(*cksump, data, len); + return 0; +} + +static int adler32_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __adler32_finup(shash_desc_ctx(desc), data, len, out); +} + +static int adler32_final(struct shash_desc *desc, u8 *out) +{ + u32 *cksump = shash_desc_ctx(desc); + + *(u32 *)out = *cksump; + return 0; +} + +static int adler32_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len, + out); +} + +static struct shash_alg alg = { + .setkey = adler32_setkey, + .init = adler32_init, + .update = adler32_update, + .final = adler32_final, + .finup = adler32_finup, + .digest = adler32_digest, + .descsize = sizeof(u32), + .digestsize = CHKSUM_DIGEST_SIZE, + .base = { + .cra_name = "adler32", + .cra_driver_name = "adler32-zlib", + .cra_priority = 100, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_ctxsize = sizeof(u32), + .cra_module = THIS_MODULE, + .cra_init = adler32_cra_init, + } +}; + +int cfs_crypto_adler32_register(void) +{ + return crypto_register_shash(&alg); +} + +void cfs_crypto_adler32_unregister(void) +{ + crypto_unregister_shash(&alg); +} diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c new file mode 100644 index 000000000..8c9377ed8 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c @@ -0,0 +1,297 @@ +/* GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see http://www.gnu.org/licenses + * + * Please visit http://www.xyratex.com/contact if you need additional + * information or have any questions. + * + * GPL HEADER END + */ + +/* + * Copyright 2012 Xyratex Technology Limited + * + * Copyright (c) 2012, Intel Corporation. + */ + +#include +#include +#include "../../../include/linux/libcfs/libcfs.h" +#include "linux-crypto.h" +/** + * Array of hash algorithm speed in MByte per second + */ +static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX]; + +static int cfs_crypto_hash_alloc(unsigned char alg_id, + const struct cfs_crypto_hash_type **type, + struct ahash_request **req, + unsigned char *key, + unsigned int key_len) +{ + struct crypto_ahash *tfm; + int err = 0; + + *type = cfs_crypto_hash_type(alg_id); + + if (!*type) { + CWARN("Unsupported hash algorithm id = %d, max id is %d\n", + alg_id, CFS_HASH_ALG_MAX); + return -EINVAL; + } + tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC); + + if (IS_ERR(tfm)) { + CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", + (*type)->cht_name); + return PTR_ERR(tfm); + } + + *req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!*req) { + CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n", + (*type)->cht_name); + crypto_free_ahash(tfm); + return -ENOMEM; + } + + ahash_request_set_callback(*req, 0, NULL, NULL); + + /** Shash have different logic for initialization then digest + * shash: crypto_hash_setkey, crypto_hash_init + * digest: crypto_digest_init, crypto_digest_setkey + * Skip this function for digest, because we use shash logic at + * cfs_crypto_hash_alloc. + */ + if (key) + err = crypto_ahash_setkey(tfm, key, key_len); + else if ((*type)->cht_key != 0) + err = crypto_ahash_setkey(tfm, + (unsigned char *)&((*type)->cht_key), + (*type)->cht_size); + + if (err != 0) { + crypto_free_ahash(tfm); + return err; + } + + CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", + crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm), + cfs_crypto_hash_speeds[alg_id]); + + err = crypto_ahash_init(*req); + if (err) { + ahash_request_free(*req); + crypto_free_ahash(tfm); + } + return err; +} + +int cfs_crypto_hash_digest(unsigned char alg_id, + const void *buf, unsigned int buf_len, + unsigned char *key, unsigned int key_len, + unsigned char *hash, unsigned int *hash_len) +{ + struct scatterlist sl; + struct ahash_request *req; + int err; + const struct cfs_crypto_hash_type *type; + + if (!buf || buf_len == 0 || !hash_len) + return -EINVAL; + + err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); + if (err != 0) + return err; + + if (!hash || *hash_len < type->cht_size) { + *hash_len = type->cht_size; + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); + return -ENOSPC; + } + sg_init_one(&sl, buf, buf_len); + + ahash_request_set_crypt(req, &sl, hash, sl.length); + err = crypto_ahash_digest(req); + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); + + return err; +} +EXPORT_SYMBOL(cfs_crypto_hash_digest); + +struct cfs_crypto_hash_desc * + cfs_crypto_hash_init(unsigned char alg_id, + unsigned char *key, unsigned int key_len) +{ + struct ahash_request *req; + int err; + const struct cfs_crypto_hash_type *type; + + err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len); + + if (err) + return ERR_PTR(err); + return (struct cfs_crypto_hash_desc *)req; +} +EXPORT_SYMBOL(cfs_crypto_hash_init); + +int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, + struct page *page, unsigned int offset, + unsigned int len) +{ + struct ahash_request *req = (void *)hdesc; + struct scatterlist sl; + + sg_init_table(&sl, 1); + sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK); + + ahash_request_set_crypt(req, &sl, NULL, sl.length); + return crypto_ahash_update(req); +} +EXPORT_SYMBOL(cfs_crypto_hash_update_page); + +int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc, + const void *buf, unsigned int buf_len) +{ + struct ahash_request *req = (void *)hdesc; + struct scatterlist sl; + + sg_init_one(&sl, buf, buf_len); + + ahash_request_set_crypt(req, &sl, NULL, sl.length); + return crypto_ahash_update(req); +} +EXPORT_SYMBOL(cfs_crypto_hash_update); + +/* If hash_len pointer is NULL - destroy descriptor. */ +int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, + unsigned char *hash, unsigned int *hash_len) +{ + int err; + struct ahash_request *req = (void *)hdesc; + int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); + + if (!hash_len) { + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); + return 0; + } + if (!hash || *hash_len < size) { + *hash_len = size; + return -ENOSPC; + } + ahash_request_set_crypt(req, NULL, hash, 0); + err = crypto_ahash_final(req); + + if (err < 0) { + /* May be caller can fix error */ + return err; + } + crypto_free_ahash(crypto_ahash_reqtfm(req)); + ahash_request_free(req); + return err; +} +EXPORT_SYMBOL(cfs_crypto_hash_final); + +static void cfs_crypto_performance_test(unsigned char alg_id, + const unsigned char *buf, + unsigned int buf_len) +{ + unsigned long start, end; + int bcount, err = 0; + int sec = 1; /* do test only 1 sec */ + unsigned char hash[64]; + unsigned int hash_len = 64; + + for (start = jiffies, end = start + sec * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0, + hash, &hash_len); + if (err) + break; + } + end = jiffies; + + if (err) { + cfs_crypto_hash_speeds[alg_id] = -1; + CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n", + cfs_crypto_hash_name(alg_id), err); + } else { + unsigned long tmp; + + tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * + 1000) / (1024 * 1024); + cfs_crypto_hash_speeds[alg_id] = (int)tmp; + } + CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n", + cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]); +} + +int cfs_crypto_hash_speed(unsigned char hash_alg) +{ + if (hash_alg < CFS_HASH_ALG_MAX) + return cfs_crypto_hash_speeds[hash_alg]; + return -1; +} +EXPORT_SYMBOL(cfs_crypto_hash_speed); + +/** + * Do performance test for all hash algorithms. + */ +static int cfs_crypto_test_hashes(void) +{ + unsigned char i; + unsigned char *data; + unsigned int j; + /* Data block size for testing hash. Maximum + * kmalloc size for 2.6.18 kernel is 128K + */ + unsigned int data_len = 1 * 128 * 1024; + + data = kmalloc(data_len, 0); + if (!data) + return -ENOMEM; + + for (j = 0; j < data_len; j++) + data[j] = j & 0xff; + + for (i = 0; i < CFS_HASH_ALG_MAX; i++) + cfs_crypto_performance_test(i, data, data_len); + + kfree(data); + return 0; +} + +static int adler32; + +int cfs_crypto_register(void) +{ + request_module("crc32c"); + + adler32 = cfs_crypto_adler32_register(); + + /* check all algorithms and do performance test */ + cfs_crypto_test_hashes(); + return 0; +} + +void cfs_crypto_unregister(void) +{ + if (adler32 == 0) + cfs_crypto_adler32_unregister(); +} diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h new file mode 100644 index 000000000..18e8cd4d8 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h @@ -0,0 +1,29 @@ + /* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see http://www.gnu.org/licenses + * + * Please visit http://www.xyratex.com/contact if you need additional + * information or have any questions. + * + * GPL HEADER END + */ + +/** + * Functions for start/stop shash adler32 algorithm. + */ +int cfs_crypto_adler32_register(void); +void cfs_crypto_adler32_unregister(void); diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c new file mode 100644 index 000000000..13d31e8a9 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c @@ -0,0 +1,111 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2015, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * libcfs/libcfs/linux/linux-curproc.c + * + * Lustre curproc API implementation for Linux kernel + * + * Author: Nikita Danilov + */ + +#include +#include + +#include +#include + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../../include/linux/libcfs/libcfs.h" + +/* + * Implementation of cfs_curproc API (see portals/include/libcfs/curproc.h) + * for Linux kernel. + */ + +void cfs_cap_raise(cfs_cap_t cap) +{ + struct cred *cred; + + cred = prepare_creds(); + if (cred) { + cap_raise(cred->cap_effective, cap); + commit_creds(cred); + } +} +EXPORT_SYMBOL(cfs_cap_raise); + +void cfs_cap_lower(cfs_cap_t cap) +{ + struct cred *cred; + + cred = prepare_creds(); + if (cred) { + cap_lower(cred->cap_effective, cap); + commit_creds(cred); + } +} +EXPORT_SYMBOL(cfs_cap_lower); + +int cfs_cap_raised(cfs_cap_t cap) +{ + return cap_raised(current_cap(), cap); +} +EXPORT_SYMBOL(cfs_cap_raised); + +static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap) +{ + /* XXX lost high byte */ + *cap = kcap.cap[0]; +} + +cfs_cap_t cfs_curproc_cap_pack(void) +{ + cfs_cap_t cap; + + cfs_kernel_cap_pack(current_cap(), &cap); + return cap; +} +EXPORT_SYMBOL(cfs_curproc_cap_pack); + +/* + * Local variables: + * c-indentation-style: "K&R" + * c-basic-offset: 8 + * tab-width: 8 + * fill-column: 80 + * scroll-step: 1 + * End: + */ diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c new file mode 100644 index 000000000..638e4b33d --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c @@ -0,0 +1,200 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * libcfs/libcfs/linux/linux-debug.c + * + * Author: Phil Schwan + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +# define DEBUG_SUBSYSTEM S_LNET + +#include "../../../include/linux/libcfs/libcfs.h" + +#include "../tracefile.h" + +#include + +char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall"; +char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall"; + +/** + * Upcall function once a Lustre log has been dumped. + * + * \param file path of the dumped log + */ +void libcfs_run_debug_log_upcall(char *file) +{ + char *argv[3]; + int rc; + char *envp[] = { + "HOME=/", + "PATH=/sbin:/bin:/usr/sbin:/usr/bin", + NULL}; + + argv[0] = lnet_debug_log_upcall; + + LASSERTF(file, "called on a null filename\n"); + argv[1] = file; /* only need to pass the path of the file */ + + argv[2] = NULL; + + rc = call_usermodehelper(argv[0], argv, envp, 1); + if (rc < 0 && rc != -ENOENT) { + CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n", + rc, argv[0], argv[1]); + } else { + CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n", + argv[0], argv[1]); + } +} + +void libcfs_run_upcall(char **argv) +{ + int rc; + int argc; + char *envp[] = { + "HOME=/", + "PATH=/sbin:/bin:/usr/sbin:/usr/bin", + NULL}; + + argv[0] = lnet_upcall; + argc = 1; + while (argv[argc]) + argc++; + + LASSERT(argc >= 2); + + rc = call_usermodehelper(argv[0], argv, envp, 1); + if (rc < 0 && rc != -ENOENT) { + CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n", + rc, argv[0], argv[1], + argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], + argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], + argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], + argc < 6 ? "" : ",..."); + } else { + CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n", + argv[0], argv[1], + argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], + argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], + argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], + argc < 6 ? "" : ",..."); + } +} + +void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata) +{ + char *argv[6]; + char buf[32]; + + snprintf(buf, sizeof(buf), "%d", msgdata->msg_line); + + argv[1] = "LBUG"; + argv[2] = (char *)msgdata->msg_file; + argv[3] = (char *)msgdata->msg_fn; + argv[4] = buf; + argv[5] = NULL; + + libcfs_run_upcall(argv); +} +EXPORT_SYMBOL(libcfs_run_lbug_upcall); + +/* coverity[+kill] */ +void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) +{ + libcfs_catastrophe = 1; + libcfs_debug_msg(msgdata, "LBUG\n"); + + if (in_interrupt()) { + panic("LBUG in interrupt.\n"); + /* not reached */ + } + + dump_stack(); + if (!libcfs_panic_on_lbug) + libcfs_debug_dumplog(); + libcfs_run_lbug_upcall(msgdata); + if (libcfs_panic_on_lbug) + panic("LBUG"); + set_task_state(current, TASK_UNINTERRUPTIBLE); + while (1) + schedule(); +} +EXPORT_SYMBOL(lbug_with_loc); + +static int panic_notifier(struct notifier_block *self, unsigned long unused1, + void *unused2) +{ + if (libcfs_panic_in_progress) + return 0; + + libcfs_panic_in_progress = 1; + mb(); + + return 0; +} + +static struct notifier_block libcfs_panic_notifier = { + .notifier_call = panic_notifier, + .next = NULL, + .priority = 10000, +}; + +void libcfs_register_panic_notifier(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &libcfs_panic_notifier); +} + +void libcfs_unregister_panic_notifier(void) +{ + atomic_notifier_chain_unregister(&panic_notifier_list, + &libcfs_panic_notifier); +} diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c new file mode 100644 index 000000000..86f32ffc5 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c @@ -0,0 +1,59 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + */ +/* + * This file creates a memory allocation primitive for Lustre, that + * allows to fallback to vmalloc allocations should regular kernel allocations + * fail due to size or system memory fragmentation. + * + * Author: Oleg Drokin + * + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Seagate Technology. + */ +#include +#include + +#include "../../../include/linux/libcfs/libcfs.h" + +void *libcfs_kvzalloc(size_t size, gfp_t flags) +{ + void *ret; + + ret = kzalloc(size, flags | __GFP_NOWARN); + if (!ret) + ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); + return ret; +} +EXPORT_SYMBOL(libcfs_kvzalloc); + +void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size, + gfp_t flags) +{ + void *ret; + + ret = kzalloc_node(size, flags | __GFP_NOWARN, + cfs_cpt_spread_node(cptab, cpt)); + if (!ret) { + WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH))); + ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); + } + + return ret; +} +EXPORT_SYMBOL(libcfs_kvzalloc_cpt); diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c new file mode 100644 index 000000000..ebc60ac9b --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c @@ -0,0 +1,159 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../../include/linux/libcfs/libcfs.h" + +#define LNET_MINOR 240 + +int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data) +{ + if (libcfs_ioctl_is_invalid(data)) { + CERROR("LNET: ioctl not correctly formatted\n"); + return -EINVAL; + } + + if (data->ioc_inllen1) + data->ioc_inlbuf1 = &data->ioc_bulk[0]; + + if (data->ioc_inllen2) + data->ioc_inlbuf2 = &data->ioc_bulk[0] + + cfs_size_round(data->ioc_inllen1); + + return 0; +} + +int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg, + __u32 *len) +{ + struct libcfs_ioctl_hdr hdr; + + if (copy_from_user(&hdr, arg, sizeof(hdr))) + return -EFAULT; + + if (hdr.ioc_version != LIBCFS_IOCTL_VERSION && + hdr.ioc_version != LIBCFS_IOCTL_VERSION2) { + CERROR("LNET: version mismatch expected %#x, got %#x\n", + LIBCFS_IOCTL_VERSION, hdr.ioc_version); + return -EINVAL; + } + + *len = hdr.ioc_len; + + return 0; +} + +int libcfs_ioctl_popdata(void __user *arg, void *data, int size) +{ + if (copy_to_user(arg, data, size)) + return -EFAULT; + return 0; +} + +static int +libcfs_psdev_open(struct inode *inode, struct file *file) +{ + int rc = 0; + + if (!inode) + return -EINVAL; + if (libcfs_psdev_ops.p_open) + rc = libcfs_psdev_ops.p_open(0, NULL); + else + return -EPERM; + return rc; +} + +/* called when closing /dev/device */ +static int +libcfs_psdev_release(struct inode *inode, struct file *file) +{ + int rc = 0; + + if (!inode) + return -EINVAL; + if (libcfs_psdev_ops.p_close) + rc = libcfs_psdev_ops.p_close(0, NULL); + else + rc = -EPERM; + return rc; +} + +static long libcfs_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct cfs_psdev_file pfile; + int rc = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE || + _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR || + _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) { + CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n", + _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd)); + return -EINVAL; + } + + /* Handle platform-dependent IOC requests */ + switch (cmd) { + case IOC_LIBCFS_PANIC: + if (!capable(CFS_CAP_SYS_BOOT)) + return -EPERM; + panic("debugctl-invoked panic"); + return 0; + } + + if (libcfs_psdev_ops.p_ioctl) + rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg); + else + rc = -EPERM; + return rc; +} + +static const struct file_operations libcfs_fops = { + .unlocked_ioctl = libcfs_ioctl, + .open = libcfs_psdev_open, + .release = libcfs_psdev_release, +}; + +struct miscdevice libcfs_dev = { + .minor = LNET_MINOR, + .name = "lnet", + .fops = &libcfs_fops, +}; diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c new file mode 100644 index 000000000..890844602 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c @@ -0,0 +1,147 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + */ + +#define DEBUG_SUBSYSTEM S_LNET +#include +#include +#include +#include + +#include "../../../include/linux/libcfs/libcfs.h" + +#if defined(CONFIG_KGDB) +#include +#endif + +/** + * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively + * waiting threads, which is not always desirable because all threads will + * be waken up again and again, even user only needs a few of them to be + * active most time. This is not good for performance because cache can + * be polluted by different threads. + * + * LIFO list can resolve this problem because we always wakeup the most + * recent active thread by default. + * + * NB: please don't call non-exclusive & exclusive wait on the same + * waitq if add_wait_queue_exclusive_head is used. + */ +void +add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link) +{ + unsigned long flags; + + spin_lock_irqsave(&waitq->lock, flags); + __add_wait_queue_exclusive(waitq, link); + spin_unlock_irqrestore(&waitq->lock, flags); +} +EXPORT_SYMBOL(add_wait_queue_exclusive_head); + +sigset_t +cfs_block_allsigs(void) +{ + unsigned long flags; + sigset_t old; + + spin_lock_irqsave(¤t->sighand->siglock, flags); + old = current->blocked; + sigfillset(¤t->blocked); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + + return old; +} +EXPORT_SYMBOL(cfs_block_allsigs); + +sigset_t cfs_block_sigs(unsigned long sigs) +{ + unsigned long flags; + sigset_t old; + + spin_lock_irqsave(¤t->sighand->siglock, flags); + old = current->blocked; + sigaddsetmask(¤t->blocked, sigs); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + return old; +} +EXPORT_SYMBOL(cfs_block_sigs); + +/* Block all signals except for the @sigs */ +sigset_t cfs_block_sigsinv(unsigned long sigs) +{ + unsigned long flags; + sigset_t old; + + spin_lock_irqsave(¤t->sighand->siglock, flags); + old = current->blocked; + sigaddsetmask(¤t->blocked, ~sigs); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + + return old; +} +EXPORT_SYMBOL(cfs_block_sigsinv); + +void +cfs_restore_sigs(sigset_t old) +{ + unsigned long flags; + + spin_lock_irqsave(¤t->sighand->siglock, flags); + current->blocked = old; + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); +} +EXPORT_SYMBOL(cfs_restore_sigs); + +int +cfs_signal_pending(void) +{ + return signal_pending(current); +} +EXPORT_SYMBOL(cfs_signal_pending); + +void +cfs_clear_sigpending(void) +{ + unsigned long flags; + + spin_lock_irqsave(¤t->sighand->siglock, flags); + clear_tsk_thread_flag(current, TIF_SIGPENDING); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); +} +EXPORT_SYMBOL(cfs_clear_sigpending); diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c new file mode 100644 index 000000000..91c2ae8f9 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c @@ -0,0 +1,259 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + */ + +#define DEBUG_SUBSYSTEM S_LNET +#define LUSTRE_TRACEFILE_PRIVATE + +#include "../../../include/linux/libcfs/libcfs.h" +#include "../tracefile.h" + +/* percents to share the total debug memory for each type */ +static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = { + 80, /* 80% pages for CFS_TCD_TYPE_PROC */ + 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */ + 10 /* 10% pages for CFS_TCD_TYPE_IRQ */ +}; + +char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; + +static DECLARE_RWSEM(cfs_tracefile_sem); + +int cfs_tracefile_init_arch(void) +{ + int i; + int j; + struct cfs_trace_cpu_data *tcd; + + /* initialize trace_data */ + memset(cfs_trace_data, 0, sizeof(cfs_trace_data)); + for (i = 0; i < CFS_TCD_TYPE_MAX; i++) { + cfs_trace_data[i] = + kmalloc(sizeof(union cfs_trace_data_union) * + num_possible_cpus(), GFP_KERNEL); + if (!cfs_trace_data[i]) + goto out; + } + + /* arch related info initialized */ + cfs_tcd_for_each(tcd, i, j) { + spin_lock_init(&tcd->tcd_lock); + tcd->tcd_pages_factor = pages_factor[i]; + tcd->tcd_type = i; + tcd->tcd_cpu = j; + } + + for (i = 0; i < num_possible_cpus(); i++) + for (j = 0; j < 3; j++) { + cfs_trace_console_buffers[i][j] = + kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE, + GFP_KERNEL); + + if (!cfs_trace_console_buffers[i][j]) + goto out; + } + + return 0; + +out: + cfs_tracefile_fini_arch(); + printk(KERN_ERR "lnet: Not enough memory\n"); + return -ENOMEM; +} + +void cfs_tracefile_fini_arch(void) +{ + int i; + int j; + + for (i = 0; i < num_possible_cpus(); i++) + for (j = 0; j < 3; j++) { + kfree(cfs_trace_console_buffers[i][j]); + cfs_trace_console_buffers[i][j] = NULL; + } + + for (i = 0; cfs_trace_data[i]; i++) { + kfree(cfs_trace_data[i]); + cfs_trace_data[i] = NULL; + } +} + +void cfs_tracefile_read_lock(void) +{ + down_read(&cfs_tracefile_sem); +} + +void cfs_tracefile_read_unlock(void) +{ + up_read(&cfs_tracefile_sem); +} + +void cfs_tracefile_write_lock(void) +{ + down_write(&cfs_tracefile_sem); +} + +void cfs_tracefile_write_unlock(void) +{ + up_write(&cfs_tracefile_sem); +} + +enum cfs_trace_buf_type cfs_trace_buf_idx_get(void) +{ + if (in_irq()) + return CFS_TCD_TYPE_IRQ; + if (in_softirq()) + return CFS_TCD_TYPE_SOFTIRQ; + return CFS_TCD_TYPE_PROC; +} + +/* + * The walking argument indicates the locking comes from all tcd types + * iterator and we must lock it and dissable local irqs to avoid deadlocks + * with other interrupt locks that might be happening. See LU-1311 + * for details. + */ +int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking) + __acquires(&tcd->tc_lock) +{ + __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); + if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) + spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags); + else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) + spin_lock_bh(&tcd->tcd_lock); + else if (unlikely(walking)) + spin_lock_irq(&tcd->tcd_lock); + else + spin_lock(&tcd->tcd_lock); + return 1; +} + +void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking) + __releases(&tcd->tcd_lock) +{ + __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); + if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) + spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags); + else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) + spin_unlock_bh(&tcd->tcd_lock); + else if (unlikely(walking)) + spin_unlock_irq(&tcd->tcd_lock); + else + spin_unlock(&tcd->tcd_lock); +} + +void +cfs_set_ptldebug_header(struct ptldebug_header *header, + struct libcfs_debug_msg_data *msgdata, + unsigned long stack) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + + header->ph_subsys = msgdata->msg_subsys; + header->ph_mask = msgdata->msg_mask; + header->ph_cpu_id = smp_processor_id(); + header->ph_type = cfs_trace_buf_idx_get(); + /* y2038 safe since all user space treats this as unsigned, but + * will overflow in 2106 + */ + header->ph_sec = (u32)ts.tv_sec; + header->ph_usec = ts.tv_nsec / NSEC_PER_USEC; + header->ph_stack = stack; + header->ph_pid = current->pid; + header->ph_line_num = msgdata->msg_line; + header->ph_extern_pid = 0; +} + +static char * +dbghdr_to_err_string(struct ptldebug_header *hdr) +{ + switch (hdr->ph_subsys) { + case S_LND: + case S_LNET: + return "LNetError"; + default: + return "LustreError"; + } +} + +static char * +dbghdr_to_info_string(struct ptldebug_header *hdr) +{ + switch (hdr->ph_subsys) { + case S_LND: + case S_LNET: + return "LNet"; + default: + return "Lustre"; + } +} + +void cfs_print_to_console(struct ptldebug_header *hdr, int mask, + const char *buf, int len, const char *file, + const char *fn) +{ + char *prefix = "Lustre", *ptype = NULL; + + if ((mask & D_EMERG) != 0) { + prefix = dbghdr_to_err_string(hdr); + ptype = KERN_EMERG; + } else if ((mask & D_ERROR) != 0) { + prefix = dbghdr_to_err_string(hdr); + ptype = KERN_ERR; + } else if ((mask & D_WARNING) != 0) { + prefix = dbghdr_to_info_string(hdr); + ptype = KERN_WARNING; + } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) { + prefix = dbghdr_to_info_string(hdr); + ptype = KERN_INFO; + } + + if ((mask & D_CONSOLE) != 0) { + printk("%s%s: %.*s", ptype, prefix, len, buf); + } else { + printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num, + fn, len, buf); + } +} + +int cfs_trace_max_debug_mb(void) +{ + int total_mb = (totalram_pages >> (20 - PAGE_SHIFT)); + + return max(512, (total_mb * 80) / 100); +} diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c new file mode 100644 index 000000000..cdc640bfd --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/module.c @@ -0,0 +1,674 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, 2015 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include + +# define DEBUG_SUBSYSTEM S_LNET + +#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \ + sizeof(struct lnet_ioctl_config_data)) + +#include "../../include/linux/libcfs/libcfs.h" +#include + +#include "../../include/linux/libcfs/libcfs_crypto.h" +#include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" +#include "../../include/linux/lnet/lnet.h" +#include "tracefile.h" + +static struct dentry *lnet_debugfs_root; + +/* called when opening /dev/device */ +static int libcfs_psdev_open(unsigned long flags, void *args) +{ + try_module_get(THIS_MODULE); + return 0; +} + +/* called when closing /dev/device */ +static int libcfs_psdev_release(unsigned long flags, void *args) +{ + module_put(THIS_MODULE); + return 0; +} + +static DECLARE_RWSEM(ioctl_list_sem); +static LIST_HEAD(ioctl_list); + +int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand) +{ + int rc = 0; + + down_write(&ioctl_list_sem); + if (!list_empty(&hand->item)) + rc = -EBUSY; + else + list_add_tail(&hand->item, &ioctl_list); + up_write(&ioctl_list_sem); + + return rc; +} +EXPORT_SYMBOL(libcfs_register_ioctl); + +int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand) +{ + int rc = 0; + + down_write(&ioctl_list_sem); + if (list_empty(&hand->item)) + rc = -ENOENT; + else + list_del_init(&hand->item); + up_write(&ioctl_list_sem); + + return rc; +} +EXPORT_SYMBOL(libcfs_deregister_ioctl); + +static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd, + void __user *arg, struct libcfs_ioctl_hdr *hdr) +{ + struct libcfs_ioctl_data *data = NULL; + int err = -EINVAL; + + /* + * The libcfs_ioctl_data_adjust() function performs adjustment + * operations on the libcfs_ioctl_data structure to make + * it usable by the code. This doesn't need to be called + * for new data structures added. + */ + if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) { + data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); + err = libcfs_ioctl_data_adjust(data); + if (err) + return err; + } + + switch (cmd) { + case IOC_LIBCFS_CLEAR_DEBUG: + libcfs_debug_clear_buffer(); + return 0; + /* + * case IOC_LIBCFS_PANIC: + * Handled in arch/cfs_module.c + */ + case IOC_LIBCFS_MARK_DEBUG: + if (!data->ioc_inlbuf1 || + data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') + return -EINVAL; + libcfs_debug_mark_buffer(data->ioc_inlbuf1); + return 0; + + default: { + struct libcfs_ioctl_handler *hand; + + err = -EINVAL; + down_read(&ioctl_list_sem); + list_for_each_entry(hand, &ioctl_list, item) { + err = hand->handle_ioctl(cmd, hdr); + if (err != -EINVAL) { + if (err == 0) + err = libcfs_ioctl_popdata(arg, + hdr, hdr->ioc_len); + break; + } + } + up_read(&ioctl_list_sem); + break; + } + } + + return err; +} + +static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, + void __user *arg) +{ + struct libcfs_ioctl_hdr *hdr; + int err = 0; + __u32 buf_len; + + err = libcfs_ioctl_getdata_len(arg, &buf_len); + if (err) + return err; + + /* + * do a check here to restrict the size of the memory + * to allocate to guard against DoS attacks. + */ + if (buf_len > LNET_MAX_IOCTL_BUF_LEN) { + CERROR("LNET: user buffer exceeds kernel buffer\n"); + return -EINVAL; + } + + LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + /* 'cmd' and permissions get checked in our arch-specific caller */ + if (copy_from_user(hdr, arg, buf_len)) { + CERROR("LNET ioctl: data error\n"); + err = -EFAULT; + goto out; + } + + err = libcfs_ioctl_handle(pfile, cmd, arg, hdr); + +out: + LIBCFS_FREE(hdr, buf_len); + return err; +} + +struct cfs_psdev_ops libcfs_psdev_ops = { + libcfs_psdev_open, + libcfs_psdev_release, + NULL, + NULL, + libcfs_ioctl +}; + +int lprocfs_call_handler(void *data, int write, loff_t *ppos, + void __user *buffer, size_t *lenp, + int (*handler)(void *data, int write, loff_t pos, + void __user *buffer, int len)) +{ + int rc = handler(data, write, *ppos, buffer, *lenp); + + if (rc < 0) + return rc; + + if (write) { + *ppos += *lenp; + } else { + *lenp = rc; + *ppos += rc; + } + return 0; +} +EXPORT_SYMBOL(lprocfs_call_handler); + +static int __proc_dobitmasks(void *data, int write, + loff_t pos, void __user *buffer, int nob) +{ + const int tmpstrlen = 512; + char *tmpstr; + int rc; + unsigned int *mask = data; + int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0; + int is_printk = (mask == &libcfs_printk) ? 1 : 0; + + rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen); + if (rc < 0) + return rc; + + if (!write) { + libcfs_debug_mask2str(tmpstr, tmpstrlen, *mask, is_subsys); + rc = strlen(tmpstr); + + if (pos >= rc) { + rc = 0; + } else { + rc = cfs_trace_copyout_string(buffer, nob, + tmpstr + pos, "\n"); + } + } else { + rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob); + if (rc < 0) { + kfree(tmpstr); + return rc; + } + + rc = libcfs_debug_str2mask(mask, tmpstr, is_subsys); + /* Always print LBUG/LASSERT to console, so keep this mask */ + if (is_printk) + *mask |= D_EMERG; + } + + kfree(tmpstr); + return rc; +} + +static int proc_dobitmasks(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_dobitmasks); +} + +static int __proc_dump_kernel(void *data, int write, + loff_t pos, void __user *buffer, int nob) +{ + if (!write) + return 0; + + return cfs_trace_dump_debug_buffer_usrstr(buffer, nob); +} + +static int proc_dump_kernel(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_dump_kernel); +} + +static int __proc_daemon_file(void *data, int write, + loff_t pos, void __user *buffer, int nob) +{ + if (!write) { + int len = strlen(cfs_tracefile); + + if (pos >= len) + return 0; + + return cfs_trace_copyout_string(buffer, nob, + cfs_tracefile + pos, "\n"); + } + + return cfs_trace_daemon_command_usrstr(buffer, nob); +} + +static int proc_daemon_file(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_daemon_file); +} + +static int libcfs_force_lbug(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + if (write) + LBUG(); + return 0; +} + +static int proc_fail_loc(struct ctl_table *table, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int rc; + long old_fail_loc = cfs_fail_loc; + + rc = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); + if (old_fail_loc != cfs_fail_loc) + wake_up(&cfs_race_waitq); + return rc; +} + +static int __proc_cpt_table(void *data, int write, + loff_t pos, void __user *buffer, int nob) +{ + char *buf = NULL; + int len = 4096; + int rc = 0; + + if (write) + return -EPERM; + + LASSERT(cfs_cpt_table); + + while (1) { + LIBCFS_ALLOC(buf, len); + if (!buf) + return -ENOMEM; + + rc = cfs_cpt_table_print(cfs_cpt_table, buf, len); + if (rc >= 0) + break; + + if (rc == -EFBIG) { + LIBCFS_FREE(buf, len); + len <<= 1; + continue; + } + goto out; + } + + if (pos >= rc) { + rc = 0; + goto out; + } + + rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL); + out: + if (buf) + LIBCFS_FREE(buf, len); + return rc; +} + +static int proc_cpt_table(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_cpt_table); +} + +static struct ctl_table lnet_table[] = { + { + .procname = "debug", + .data = &libcfs_debug, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dobitmasks, + }, + { + .procname = "subsystem_debug", + .data = &libcfs_subsystem_debug, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dobitmasks, + }, + { + .procname = "printk", + .data = &libcfs_printk, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dobitmasks, + }, + { + .procname = "cpu_partition_table", + .maxlen = 128, + .mode = 0444, + .proc_handler = &proc_cpt_table, + }, + + { + .procname = "upcall", + .data = lnet_upcall, + .maxlen = sizeof(lnet_upcall), + .mode = 0644, + .proc_handler = &proc_dostring, + }, + { + .procname = "debug_log_upcall", + .data = lnet_debug_log_upcall, + .maxlen = sizeof(lnet_debug_log_upcall), + .mode = 0644, + .proc_handler = &proc_dostring, + }, + { + .procname = "catastrophe", + .data = &libcfs_catastrophe, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = &proc_dointvec, + }, + { + .procname = "dump_kernel", + .maxlen = 256, + .mode = 0200, + .proc_handler = &proc_dump_kernel, + }, + { + .procname = "daemon_file", + .mode = 0644, + .maxlen = 256, + .proc_handler = &proc_daemon_file, + }, + { + .procname = "force_lbug", + .data = NULL, + .maxlen = 0, + .mode = 0200, + .proc_handler = &libcfs_force_lbug + }, + { + .procname = "fail_loc", + .data = &cfs_fail_loc, + .maxlen = sizeof(cfs_fail_loc), + .mode = 0644, + .proc_handler = &proc_fail_loc + }, + { + .procname = "fail_val", + .data = &cfs_fail_val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec + }, + { + } +}; + +static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = { + { "console_ratelimit", + "/sys/module/libcfs/parameters/libcfs_console_ratelimit"}, + { "debug_path", + "/sys/module/libcfs/parameters/libcfs_debug_file_path"}, + { "panic_on_lbug", + "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"}, + { "libcfs_console_backoff", + "/sys/module/libcfs/parameters/libcfs_console_backoff"}, + { "debug_mb", + "/sys/module/libcfs/parameters/libcfs_debug_mb"}, + { "console_min_delay_centisecs", + "/sys/module/libcfs/parameters/libcfs_console_min_delay"}, + { "console_max_delay_centisecs", + "/sys/module/libcfs/parameters/libcfs_console_max_delay"}, + {}, +}; + +static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + struct ctl_table *table = filp->private_data; + int error; + + error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos); + if (!error) + error = count; + + return error; +} + +static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ctl_table *table = filp->private_data; + int error; + + error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos); + if (!error) + error = count; + + return error; +} + +static const struct file_operations lnet_debugfs_file_operations_rw = { + .open = simple_open, + .read = lnet_debugfs_read, + .write = lnet_debugfs_write, + .llseek = default_llseek, +}; + +static const struct file_operations lnet_debugfs_file_operations_ro = { + .open = simple_open, + .read = lnet_debugfs_read, + .llseek = default_llseek, +}; + +static const struct file_operations lnet_debugfs_file_operations_wo = { + .open = simple_open, + .write = lnet_debugfs_write, + .llseek = default_llseek, +}; + +static const struct file_operations *lnet_debugfs_fops_select(umode_t mode) +{ + if (!(mode & S_IWUGO)) + return &lnet_debugfs_file_operations_ro; + + if (!(mode & S_IRUGO)) + return &lnet_debugfs_file_operations_wo; + + return &lnet_debugfs_file_operations_rw; +} + +void lustre_insert_debugfs(struct ctl_table *table, + const struct lnet_debugfs_symlink_def *symlinks) +{ + if (!lnet_debugfs_root) + lnet_debugfs_root = debugfs_create_dir("lnet", NULL); + + /* Even if we cannot create, just ignore it altogether) */ + if (IS_ERR_OR_NULL(lnet_debugfs_root)) + return; + + /* We don't save the dentry returned in next two calls, because + * we don't call debugfs_remove() but rather remove_recursive() + */ + for (; table->procname; table++) + debugfs_create_file(table->procname, table->mode, + lnet_debugfs_root, table, + lnet_debugfs_fops_select(table->mode)); + + for (; symlinks && symlinks->name; symlinks++) + debugfs_create_symlink(symlinks->name, lnet_debugfs_root, + symlinks->target); +} +EXPORT_SYMBOL_GPL(lustre_insert_debugfs); + +static void lustre_remove_debugfs(void) +{ + debugfs_remove_recursive(lnet_debugfs_root); + + lnet_debugfs_root = NULL; +} + +static int libcfs_init(void) +{ + int rc; + + rc = libcfs_debug_init(5 * 1024 * 1024); + if (rc < 0) { + pr_err("LustreError: libcfs_debug_init: %d\n", rc); + return rc; + } + + rc = cfs_cpu_init(); + if (rc != 0) + goto cleanup_debug; + + rc = misc_register(&libcfs_dev); + if (rc) { + CERROR("misc_register: error %d\n", rc); + goto cleanup_cpu; + } + + rc = cfs_wi_startup(); + if (rc) { + CERROR("initialize workitem: error %d\n", rc); + goto cleanup_deregister; + } + + /* max to 4 threads, should be enough for rehash */ + rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4); + rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY, + rc, &cfs_sched_rehash); + if (rc != 0) { + CERROR("Startup workitem scheduler: error: %d\n", rc); + goto cleanup_deregister; + } + + rc = cfs_crypto_register(); + if (rc) { + CERROR("cfs_crypto_register: error %d\n", rc); + goto cleanup_wi; + } + + lustre_insert_debugfs(lnet_table, lnet_debugfs_symlinks); + + CDEBUG(D_OTHER, "portals setup OK\n"); + return 0; + cleanup_wi: + cfs_wi_shutdown(); + cleanup_deregister: + misc_deregister(&libcfs_dev); +cleanup_cpu: + cfs_cpu_fini(); + cleanup_debug: + libcfs_debug_cleanup(); + return rc; +} + +static void libcfs_exit(void) +{ + int rc; + + lustre_remove_debugfs(); + + if (cfs_sched_rehash) { + cfs_wi_sched_destroy(cfs_sched_rehash); + cfs_sched_rehash = NULL; + } + + cfs_crypto_unregister(); + cfs_wi_shutdown(); + + misc_deregister(&libcfs_dev); + + cfs_cpu_fini(); + + rc = libcfs_debug_cleanup(); + if (rc) + pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc); +} + +MODULE_AUTHOR("OpenSFS, Inc. "); +MODULE_DESCRIPTION("Lustre helper library"); +MODULE_VERSION(LIBCFS_VERSION); +MODULE_LICENSE("GPL"); + +module_init(libcfs_init); +module_exit(libcfs_exit); diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c new file mode 100644 index 000000000..c75ae9a68 --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/prng.c @@ -0,0 +1,140 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * libcfs/libcfs/prng.c + * + * concatenation of following two 16-bit multiply with carry generators + * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16, + * number and carry packed within the same 32 bit integer. + * algorithm recommended by Marsaglia +*/ + +#include "../../include/linux/libcfs/libcfs.h" + +/* + * From: George Marsaglia + * Newsgroups: sci.math + * Subject: Re: A RANDOM NUMBER GENERATOR FOR C + * Date: Tue, 30 Sep 1997 05:29:35 -0700 + * + * You may replace the two constants 36969 and 18000 by any + * pair of distinct constants from this list: + * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584 + * 19599 19950 20088 20508 20544 20664 20814 20970 21153 21243 + * 21423 21723 21954 22125 22188 22293 22860 22938 22965 22974 + * 23109 23124 23163 23208 23508 23520 23553 23658 23865 24114 + * 24219 24660 24699 24864 24948 25023 25308 25443 26004 26088 + * 26154 26550 26679 26838 27183 27258 27753 27795 27810 27834 + * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013 + * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083 + * (or any other 16-bit constants k for which both k*2^16-1 + * and k*2^15-1 are prime) + */ + +#define RANDOM_CONST_A 18030 +#define RANDOM_CONST_B 29013 + +static unsigned int seed_x = 521288629; +static unsigned int seed_y = 362436069; + +/** + * cfs_rand - creates new seeds + * + * First it creates new seeds from the previous seeds. Then it generates a + * new pseudo random number for use. + * + * Returns a pseudo-random 32-bit integer + */ +unsigned int cfs_rand(void) +{ + seed_x = RANDOM_CONST_A * (seed_x & 65535) + (seed_x >> 16); + seed_y = RANDOM_CONST_B * (seed_y & 65535) + (seed_y >> 16); + + return ((seed_x << 16) + (seed_y & 65535)); +} +EXPORT_SYMBOL(cfs_rand); + +/** + * cfs_srand - sets the initial seed + * @seed1 : (seed_x) should have the most entropy in the low bits of the word + * @seed2 : (seed_y) should have the most entropy in the high bits of the word + * + * Replaces the original seeds with new values. Used to generate a new pseudo + * random numbers. + */ +void cfs_srand(unsigned int seed1, unsigned int seed2) +{ + if (seed1) + seed_x = seed1; /* use default seeds if parameter is 0 */ + if (seed2) + seed_y = seed2; +} +EXPORT_SYMBOL(cfs_srand); + +/** + * cfs_get_random_bytes - generate a bunch of random numbers + * @buf : buffer to fill with random numbers + * @size: size of passed in buffer + * + * Fills a buffer with random bytes + */ +void cfs_get_random_bytes(void *buf, int size) +{ + int *p = buf; + int rem, tmp; + + LASSERT(size >= 0); + + rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size); + if (rem) { + get_random_bytes(&tmp, sizeof(tmp)); + tmp ^= cfs_rand(); + memcpy(buf, &tmp, rem); + p = buf + rem; + size -= rem; + } + + while (size >= sizeof(int)) { + get_random_bytes(&tmp, sizeof(tmp)); + *p = cfs_rand() ^ tmp; + size -= sizeof(int); + p++; + } + buf = p; + if (size) { + get_random_bytes(&tmp, sizeof(tmp)); + tmp ^= cfs_rand(); + memcpy(buf, &tmp, size); + } +} +EXPORT_SYMBOL(cfs_get_random_bytes); diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c new file mode 100644 index 000000000..244eb89ee --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -0,0 +1,1208 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * libcfs/libcfs/tracefile.c + * + * Author: Zach Brown + * Author: Phil Schwan + */ + +#define DEBUG_SUBSYSTEM S_LNET +#define LUSTRE_TRACEFILE_PRIVATE +#include "tracefile.h" + +#include "../../include/linux/libcfs/libcfs.h" + +/* XXX move things up to the top, comment */ +union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned; + +char cfs_tracefile[TRACEFILE_NAME_SIZE]; +long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; +static struct tracefiled_ctl trace_tctl; +static DEFINE_MUTEX(cfs_trace_thread_mutex); +static int thread_running; + +static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); + +struct page_collection { + struct list_head pc_pages; + /* + * if this flag is set, collect_pages() will spill both + * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, + * only ->tcd_pages are spilled. + */ + int pc_want_daemon_pages; +}; + +struct tracefiled_ctl { + struct completion tctl_start; + struct completion tctl_stop; + wait_queue_head_t tctl_waitq; + pid_t tctl_pid; + atomic_t tctl_shutdown; +}; + +/* + * small data-structure for each page owned by tracefiled. + */ +struct cfs_trace_page { + /* + * page itself + */ + struct page *page; + /* + * linkage into one of the lists in trace_data_union or + * page_collection + */ + struct list_head linkage; + /* + * number of bytes used within this page + */ + unsigned int used; + /* + * cpu that owns this page + */ + unsigned short cpu; + /* + * type(context) of this page + */ + unsigned short type; +}; + +static void put_pages_on_tcd_daemon_list(struct page_collection *pc, + struct cfs_trace_cpu_data *tcd); + +static inline struct cfs_trace_page * +cfs_tage_from_list(struct list_head *list) +{ + return list_entry(list, struct cfs_trace_page, linkage); +} + +static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) +{ + struct page *page; + struct cfs_trace_page *tage; + + /* My caller is trying to free memory */ + if (!in_interrupt() && memory_pressure_get()) + return NULL; + + /* + * Don't spam console with allocation failures: they will be reported + * by upper layer anyway. + */ + gfp |= __GFP_NOWARN; + page = alloc_page(gfp); + if (!page) + return NULL; + + tage = kmalloc(sizeof(*tage), gfp); + if (!tage) { + __free_page(page); + return NULL; + } + + tage->page = page; + atomic_inc(&cfs_tage_allocated); + return tage; +} + +static void cfs_tage_free(struct cfs_trace_page *tage) +{ + __free_page(tage->page); + kfree(tage); + atomic_dec(&cfs_tage_allocated); +} + +static void cfs_tage_to_tail(struct cfs_trace_page *tage, + struct list_head *queue) +{ + list_move_tail(&tage->linkage, queue); +} + +int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, + struct list_head *stock) +{ + int i; + + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ + + for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) { + struct cfs_trace_page *tage; + + tage = cfs_tage_alloc(gfp); + if (!tage) + break; + list_add_tail(&tage->linkage, stock); + } + return i; +} + +/* return a page that has 'len' bytes left at the end */ +static struct cfs_trace_page * +cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) +{ + struct cfs_trace_page *tage; + + if (tcd->tcd_cur_pages > 0) { + __LASSERT(!list_empty(&tcd->tcd_pages)); + tage = cfs_tage_from_list(tcd->tcd_pages.prev); + if (tage->used + len <= PAGE_SIZE) + return tage; + } + + if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { + if (tcd->tcd_cur_stock_pages > 0) { + tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); + --tcd->tcd_cur_stock_pages; + list_del_init(&tage->linkage); + } else { + tage = cfs_tage_alloc(GFP_ATOMIC); + if (unlikely(!tage)) { + if ((!memory_pressure_get() || + in_interrupt()) && printk_ratelimit()) + printk(KERN_WARNING + "cannot allocate a tage (%ld)\n", + tcd->tcd_cur_pages); + return NULL; + } + } + + tage->used = 0; + tage->cpu = smp_processor_id(); + tage->type = tcd->tcd_type; + list_add_tail(&tage->linkage, &tcd->tcd_pages); + tcd->tcd_cur_pages++; + + if (tcd->tcd_cur_pages > 8 && thread_running) { + struct tracefiled_ctl *tctl = &trace_tctl; + /* + * wake up tracefiled to process some pages. + */ + wake_up(&tctl->tctl_waitq); + } + return tage; + } + return NULL; +} + +static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) +{ + int pgcount = tcd->tcd_cur_pages / 10; + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ + + if (printk_ratelimit()) + printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n", + pgcount + 1, tcd->tcd_cur_pages); + + INIT_LIST_HEAD(&pc.pc_pages); + + list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { + if (pgcount-- == 0) + break; + + list_move_tail(&tage->linkage, &pc.pc_pages); + tcd->tcd_cur_pages--; + } + put_pages_on_tcd_daemon_list(&pc, tcd); +} + +/* return a page that has 'len' bytes left at the end */ +static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, + unsigned long len) +{ + struct cfs_trace_page *tage; + + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ + + if (len > PAGE_SIZE) { + pr_err("cowardly refusing to write %lu bytes in a page\n", len); + return NULL; + } + + tage = cfs_trace_get_tage_try(tcd, len); + if (tage) + return tage; + if (thread_running) + cfs_tcd_shrink(tcd); + if (tcd->tcd_cur_pages > 0) { + tage = cfs_tage_from_list(tcd->tcd_pages.next); + tage->used = 0; + cfs_tage_to_tail(tage, &tcd->tcd_pages); + } + return tage; +} + +int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, + const char *format, ...) +{ + va_list args; + int rc; + + va_start(args, format); + rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); + va_end(args); + + return rc; +} +EXPORT_SYMBOL(libcfs_debug_msg); + +int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, + const char *format1, va_list args, + const char *format2, ...) +{ + struct cfs_trace_cpu_data *tcd = NULL; + struct ptldebug_header header = {0}; + struct cfs_trace_page *tage; + /* string_buf is used only if tcd != NULL, and is always set then */ + char *string_buf = NULL; + char *debug_buf; + int known_size; + int needed = 85; /* average message length */ + int max_nob; + va_list ap; + int depth; + int i; + int remain; + int mask = msgdata->msg_mask; + const char *file = kbasename(msgdata->msg_file); + struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; + + tcd = cfs_trace_get_tcd(); + + /* cfs_trace_get_tcd() grabs a lock, which disables preemption and + * pins us to a particular CPU. This avoids an smp_processor_id() + * warning on Linux when debugging is enabled. + */ + cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); + + if (!tcd) /* arch may not log in IRQ context */ + goto console; + + if (tcd->tcd_cur_pages == 0) + header.ph_flags |= PH_FLAG_FIRST_RECORD; + + if (tcd->tcd_shutting_down) { + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + depth = __current_nesting_level(); + known_size = strlen(file) + 1 + depth; + if (msgdata->msg_fn) + known_size += strlen(msgdata->msg_fn) + 1; + + if (libcfs_debug_binary) + known_size += sizeof(header); + + /* + * '2' used because vsnprintf return real size required for output + * _without_ terminating NULL. + * if needed is to small for this format. + */ + for (i = 0; i < 2; i++) { + tage = cfs_trace_get_tage(tcd, needed + known_size + 1); + if (!tage) { + if (needed + known_size > PAGE_SIZE) + mask |= D_ERROR; + + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + string_buf = (char *)page_address(tage->page) + + tage->used + known_size; + + max_nob = PAGE_SIZE - tage->used - known_size; + if (max_nob <= 0) { + printk(KERN_EMERG "negative max_nob: %d\n", + max_nob); + mask |= D_ERROR; + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + needed = 0; + if (format1) { + va_copy(ap, args); + needed = vsnprintf(string_buf, max_nob, format1, ap); + va_end(ap); + } + + if (format2) { + remain = max_nob - needed; + if (remain < 0) + remain = 0; + + va_start(ap, format2); + needed += vsnprintf(string_buf + needed, remain, + format2, ap); + va_end(ap); + } + + if (needed < max_nob) /* well. printing ok.. */ + break; + } + + if (*(string_buf + needed - 1) != '\n') + printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n", + file, msgdata->msg_line, msgdata->msg_fn); + + header.ph_len = known_size + needed; + debug_buf = (char *)page_address(tage->page) + tage->used; + + if (libcfs_debug_binary) { + memcpy(debug_buf, &header, sizeof(header)); + tage->used += sizeof(header); + debug_buf += sizeof(header); + } + + /* indent message according to the nesting level */ + while (depth-- > 0) { + *(debug_buf++) = '.'; + ++tage->used; + } + + strcpy(debug_buf, file); + tage->used += strlen(file) + 1; + debug_buf += strlen(file) + 1; + + if (msgdata->msg_fn) { + strcpy(debug_buf, msgdata->msg_fn); + tage->used += strlen(msgdata->msg_fn) + 1; + debug_buf += strlen(msgdata->msg_fn) + 1; + } + + __LASSERT(debug_buf == string_buf); + + tage->used += needed; + __LASSERT(tage->used <= PAGE_SIZE); + +console: + if ((mask & libcfs_printk) == 0) { + /* no console output requested */ + if (tcd) + cfs_trace_put_tcd(tcd); + return 1; + } + + if (cdls) { + if (libcfs_console_ratelimit && + cdls->cdls_next != 0 && /* not first time ever */ + !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { + /* skipping a console message */ + cdls->cdls_count++; + if (tcd) + cfs_trace_put_tcd(tcd); + return 1; + } + + if (cfs_time_after(cfs_time_current(), + cdls->cdls_next + libcfs_console_max_delay + + cfs_time_seconds(10))) { + /* last timeout was a long time ago */ + cdls->cdls_delay /= libcfs_console_backoff * 4; + } else { + cdls->cdls_delay *= libcfs_console_backoff; + } + + if (cdls->cdls_delay < libcfs_console_min_delay) + cdls->cdls_delay = libcfs_console_min_delay; + else if (cdls->cdls_delay > libcfs_console_max_delay) + cdls->cdls_delay = libcfs_console_max_delay; + + /* ensure cdls_next is never zero after it's been seen */ + cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; + } + + if (tcd) { + cfs_print_to_console(&header, mask, string_buf, needed, file, + msgdata->msg_fn); + cfs_trace_put_tcd(tcd); + } else { + string_buf = cfs_trace_get_console_buffer(); + + needed = 0; + if (format1) { + va_copy(ap, args); + needed = vsnprintf(string_buf, + CFS_TRACE_CONSOLE_BUFFER_SIZE, + format1, ap); + va_end(ap); + } + if (format2) { + remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed; + if (remain > 0) { + va_start(ap, format2); + needed += vsnprintf(string_buf + needed, remain, + format2, ap); + va_end(ap); + } + } + cfs_print_to_console(&header, mask, + string_buf, needed, file, msgdata->msg_fn); + + put_cpu(); + } + + if (cdls && cdls->cdls_count != 0) { + string_buf = cfs_trace_get_console_buffer(); + + needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, + "Skipped %d previous similar message%s\n", + cdls->cdls_count, + (cdls->cdls_count > 1) ? "s" : ""); + + cfs_print_to_console(&header, mask, + string_buf, needed, file, msgdata->msg_fn); + + put_cpu(); + cdls->cdls_count = 0; + } + + return 0; +} +EXPORT_SYMBOL(libcfs_debug_vmsg2); + +void +cfs_trace_assertion_failed(const char *str, + struct libcfs_debug_msg_data *msgdata) +{ + struct ptldebug_header hdr; + + libcfs_panic_in_progress = 1; + libcfs_catastrophe = 1; + mb(); + + cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); + + cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), + msgdata->msg_file, msgdata->msg_fn); + + panic("Lustre debug assertion failure\n"); + + /* not reached */ +} + +static void +panic_collect_pages(struct page_collection *pc) +{ + /* Do the collect_pages job on a single CPU: assumes that all other + * CPUs have been stopped during a panic. If this isn't true for some + * arch, this will have to be implemented separately in each arch. + */ + int i; + int j; + struct cfs_trace_cpu_data *tcd; + + INIT_LIST_HEAD(&pc->pc_pages); + + cfs_tcd_for_each(tcd, i, j) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; + + if (pc->pc_want_daemon_pages) { + list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages); + tcd->tcd_cur_daemon_pages = 0; + } + } +} + +static void collect_pages_on_all_cpus(struct page_collection *pc) +{ + struct cfs_trace_cpu_data *tcd; + int i, cpu; + + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; + if (pc->pc_want_daemon_pages) { + list_splice_init(&tcd->tcd_daemon_pages, + &pc->pc_pages); + tcd->tcd_cur_daemon_pages = 0; + } + } + } +} + +static void collect_pages(struct page_collection *pc) +{ + INIT_LIST_HEAD(&pc->pc_pages); + + if (libcfs_panic_in_progress) + panic_collect_pages(pc); + else + collect_pages_on_all_cpus(pc); +} + +static void put_pages_back_on_all_cpus(struct page_collection *pc) +{ + struct cfs_trace_cpu_data *tcd; + struct list_head *cur_head; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + int i, cpu; + + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + cur_head = tcd->tcd_pages.next; + + list_for_each_entry_safe(tage, tmp, &pc->pc_pages, + linkage) { + __LASSERT_TAGE_INVARIANT(tage); + + if (tage->cpu != cpu || tage->type != i) + continue; + + cfs_tage_to_tail(tage, cur_head); + tcd->tcd_cur_pages++; + } + } + } +} + +static void put_pages_back(struct page_collection *pc) +{ + if (!libcfs_panic_in_progress) + put_pages_back_on_all_cpus(pc); +} + +/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that + * we have a good amount of data at all times for dumping during an LBUG, even + * if we have been steadily writing (and otherwise discarding) pages via the + * debug daemon. + */ +static void put_pages_on_tcd_daemon_list(struct page_collection *pc, + struct cfs_trace_cpu_data *tcd) +{ + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + + list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { + __LASSERT_TAGE_INVARIANT(tage); + + if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) + continue; + + cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); + tcd->tcd_cur_daemon_pages++; + + if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { + struct cfs_trace_page *victim; + + __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); + victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); + + __LASSERT_TAGE_INVARIANT(victim); + + list_del(&victim->linkage); + cfs_tage_free(victim); + tcd->tcd_cur_daemon_pages--; + } + } +} + +static void put_pages_on_daemon_list(struct page_collection *pc) +{ + struct cfs_trace_cpu_data *tcd; + int i, cpu; + + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) + put_pages_on_tcd_daemon_list(pc, tcd); + } +} + +void cfs_trace_debug_print(void) +{ + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + + pc.pc_want_daemon_pages = 1; + collect_pages(&pc); + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + char *p, *file, *fn; + struct page *page; + + __LASSERT_TAGE_INVARIANT(tage); + + page = tage->page; + p = page_address(page); + while (p < ((char *)page_address(page) + tage->used)) { + struct ptldebug_header *hdr; + int len; + + hdr = (void *)p; + p += sizeof(*hdr); + file = p; + p += strlen(file) + 1; + fn = p; + p += strlen(fn) + 1; + len = hdr->ph_len - (int)(p - (char *)hdr); + + cfs_print_to_console(hdr, D_EMERG, p, len, file, fn); + + p += len; + } + + list_del(&tage->linkage); + cfs_tage_free(tage); + } +} + +int cfs_tracefile_dump_all_pages(char *filename) +{ + struct page_collection pc; + struct file *filp; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + char *buf; + int rc; + + DECL_MMSPACE; + + cfs_tracefile_write_lock(); + + filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, + 0600); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + filp = NULL; + pr_err("LustreError: can't open %s for dump: rc %d\n", + filename, rc); + goto out; + } + + pc.pc_want_daemon_pages = 1; + collect_pages(&pc); + if (list_empty(&pc.pc_pages)) { + rc = 0; + goto close; + } + + /* ok, for now, just write the pages. in the future we'll be building + * iobufs with the pages and calling generic_direct_IO + */ + MMSPACE_OPEN; + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + __LASSERT_TAGE_INVARIANT(tage); + + buf = kmap(tage->page); + rc = vfs_write(filp, (__force const char __user *)buf, + tage->used, &filp->f_pos); + kunmap(tage->page); + + if (rc != (int)tage->used) { + printk(KERN_WARNING "wanted to write %u but wrote %d\n", + tage->used, rc); + put_pages_back(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + break; + } + list_del(&tage->linkage); + cfs_tage_free(tage); + } + MMSPACE_CLOSE; + rc = vfs_fsync(filp, 1); + if (rc) + pr_err("sync returns %d\n", rc); +close: + filp_close(filp, NULL); +out: + cfs_tracefile_write_unlock(); + return rc; +} + +void cfs_trace_flush_pages(void) +{ + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + + pc.pc_want_daemon_pages = 1; + collect_pages(&pc); + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + __LASSERT_TAGE_INVARIANT(tage); + + list_del(&tage->linkage); + cfs_tage_free(tage); + } +} + +int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, + const char __user *usr_buffer, int usr_buffer_nob) +{ + int nob; + + if (usr_buffer_nob > knl_buffer_nob) + return -EOVERFLOW; + + if (copy_from_user((void *)knl_buffer, + usr_buffer, usr_buffer_nob)) + return -EFAULT; + + nob = strnlen(knl_buffer, usr_buffer_nob); + while (nob-- >= 0) /* strip trailing whitespace */ + if (!isspace(knl_buffer[nob])) + break; + + if (nob < 0) /* empty string */ + return -EINVAL; + + if (nob == knl_buffer_nob) /* no space to terminate */ + return -EOVERFLOW; + + knl_buffer[nob + 1] = 0; /* terminate */ + return 0; +} +EXPORT_SYMBOL(cfs_trace_copyin_string); + +int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, + const char *knl_buffer, char *append) +{ + /* + * NB if 'append' != NULL, it's a single character to append to the + * copied out string - usually "\n" or "" (i.e. a terminating zero byte) + */ + int nob = strlen(knl_buffer); + + if (nob > usr_buffer_nob) + nob = usr_buffer_nob; + + if (copy_to_user(usr_buffer, knl_buffer, nob)) + return -EFAULT; + + if (append && nob < usr_buffer_nob) { + if (copy_to_user(usr_buffer + nob, append, 1)) + return -EFAULT; + + nob++; + } + + return nob; +} +EXPORT_SYMBOL(cfs_trace_copyout_string); + +int cfs_trace_allocate_string_buffer(char **str, int nob) +{ + if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */ + return -EINVAL; + + *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); + if (!*str) + return -ENOMEM; + + return 0; +} + +int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) +{ + char *str; + int rc; + + rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); + if (rc != 0) + return rc; + + rc = cfs_trace_copyin_string(str, usr_str_nob + 1, + usr_str, usr_str_nob); + if (rc != 0) + goto out; + + if (str[0] != '/') { + rc = -EINVAL; + goto out; + } + rc = cfs_tracefile_dump_all_pages(str); +out: + kfree(str); + return rc; +} + +int cfs_trace_daemon_command(char *str) +{ + int rc = 0; + + cfs_tracefile_write_lock(); + + if (strcmp(str, "stop") == 0) { + cfs_tracefile_write_unlock(); + cfs_trace_stop_thread(); + cfs_tracefile_write_lock(); + memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); + + } else if (strncmp(str, "size=", 5) == 0) { + unsigned long tmp; + + rc = kstrtoul(str + 5, 10, &tmp); + if (!rc) { + if (tmp < 10 || tmp > 20480) + cfs_tracefile_size = CFS_TRACEFILE_SIZE; + else + cfs_tracefile_size = tmp << 20; + } + } else if (strlen(str) >= sizeof(cfs_tracefile)) { + rc = -ENAMETOOLONG; + } else if (str[0] != '/') { + rc = -EINVAL; + } else { + strcpy(cfs_tracefile, str); + + printk(KERN_INFO + "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n", + cfs_tracefile, + (long)(cfs_tracefile_size >> 10)); + + cfs_trace_start_thread(); + } + + cfs_tracefile_write_unlock(); + return rc; +} + +int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) +{ + char *str; + int rc; + + rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); + if (rc != 0) + return rc; + + rc = cfs_trace_copyin_string(str, usr_str_nob + 1, + usr_str, usr_str_nob); + if (rc == 0) + rc = cfs_trace_daemon_command(str); + + kfree(str); + return rc; +} + +int cfs_trace_set_debug_mb(int mb) +{ + int i; + int j; + int pages; + int limit = cfs_trace_max_debug_mb(); + struct cfs_trace_cpu_data *tcd; + + if (mb < num_possible_cpus()) { + printk(KERN_WARNING + "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n", + mb, num_possible_cpus()); + mb = num_possible_cpus(); + } + + if (mb > limit) { + printk(KERN_WARNING + "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n", + mb, limit); + mb = limit; + } + + mb /= num_possible_cpus(); + pages = mb << (20 - PAGE_SHIFT); + + cfs_tracefile_write_lock(); + + cfs_tcd_for_each(tcd, i, j) + tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; + + cfs_tracefile_write_unlock(); + + return 0; +} + +int cfs_trace_get_debug_mb(void) +{ + int i; + int j; + struct cfs_trace_cpu_data *tcd; + int total_pages = 0; + + cfs_tracefile_read_lock(); + + cfs_tcd_for_each(tcd, i, j) + total_pages += tcd->tcd_max_pages; + + cfs_tracefile_read_unlock(); + + return (total_pages >> (20 - PAGE_SHIFT)) + 1; +} + +static int tracefiled(void *arg) +{ + struct page_collection pc; + struct tracefiled_ctl *tctl = arg; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + struct file *filp; + char *buf; + int last_loop = 0; + int rc; + + DECL_MMSPACE; + + /* we're started late enough that we pick up init's fs context */ + /* this is so broken in uml? what on earth is going on? */ + + complete(&tctl->tctl_start); + + while (1) { + wait_queue_t __wait; + + pc.pc_want_daemon_pages = 0; + collect_pages(&pc); + if (list_empty(&pc.pc_pages)) + goto end_loop; + + filp = NULL; + cfs_tracefile_read_lock(); + if (cfs_tracefile[0] != 0) { + filp = filp_open(cfs_tracefile, + O_CREAT | O_RDWR | O_LARGEFILE, + 0600); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + filp = NULL; + printk(KERN_WARNING "couldn't open %s: %d\n", + cfs_tracefile, rc); + } + } + cfs_tracefile_read_unlock(); + if (!filp) { + put_pages_on_daemon_list(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + goto end_loop; + } + + MMSPACE_OPEN; + + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + static loff_t f_pos; + + __LASSERT_TAGE_INVARIANT(tage); + + if (f_pos >= (off_t)cfs_tracefile_size) + f_pos = 0; + else if (f_pos > i_size_read(file_inode(filp))) + f_pos = i_size_read(file_inode(filp)); + + buf = kmap(tage->page); + rc = vfs_write(filp, (__force const char __user *)buf, + tage->used, &f_pos); + kunmap(tage->page); + + if (rc != (int)tage->used) { + printk(KERN_WARNING "wanted to write %u but wrote %d\n", + tage->used, rc); + put_pages_back(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + break; + } + } + MMSPACE_CLOSE; + + filp_close(filp, NULL); + put_pages_on_daemon_list(&pc); + if (!list_empty(&pc.pc_pages)) { + int i; + + printk(KERN_ALERT "Lustre: trace pages aren't empty\n"); + pr_err("total cpus(%d): ", num_possible_cpus()); + for (i = 0; i < num_possible_cpus(); i++) + if (cpu_online(i)) + pr_cont("%d(on) ", i); + else + pr_cont("%d(off) ", i); + pr_cont("\n"); + + i = 0; + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, + linkage) + pr_err("page %d belongs to cpu %d\n", + ++i, tage->cpu); + pr_err("There are %d pages unwritten\n", i); + } + __LASSERT(list_empty(&pc.pc_pages)); +end_loop: + if (atomic_read(&tctl->tctl_shutdown)) { + if (last_loop == 0) { + last_loop = 1; + continue; + } else { + break; + } + } + init_waitqueue_entry(&__wait, current); + add_wait_queue(&tctl->tctl_waitq, &__wait); + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); + remove_wait_queue(&tctl->tctl_waitq, &__wait); + } + complete(&tctl->tctl_stop); + return 0; +} + +int cfs_trace_start_thread(void) +{ + struct tracefiled_ctl *tctl = &trace_tctl; + struct task_struct *task; + int rc = 0; + + mutex_lock(&cfs_trace_thread_mutex); + if (thread_running) + goto out; + + init_completion(&tctl->tctl_start); + init_completion(&tctl->tctl_stop); + init_waitqueue_head(&tctl->tctl_waitq); + atomic_set(&tctl->tctl_shutdown, 0); + + task = kthread_run(tracefiled, tctl, "ktracefiled"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + goto out; + } + + wait_for_completion(&tctl->tctl_start); + thread_running = 1; +out: + mutex_unlock(&cfs_trace_thread_mutex); + return rc; +} + +void cfs_trace_stop_thread(void) +{ + struct tracefiled_ctl *tctl = &trace_tctl; + + mutex_lock(&cfs_trace_thread_mutex); + if (thread_running) { + printk(KERN_INFO + "Lustre: shutting down debug daemon thread...\n"); + atomic_set(&tctl->tctl_shutdown, 1); + wait_for_completion(&tctl->tctl_stop); + thread_running = 0; + } + mutex_unlock(&cfs_trace_thread_mutex); +} + +int cfs_tracefile_init(int max_pages) +{ + struct cfs_trace_cpu_data *tcd; + int i; + int j; + int rc; + int factor; + + rc = cfs_tracefile_init_arch(); + if (rc != 0) + return rc; + + cfs_tcd_for_each(tcd, i, j) { + /* tcd_pages_factor is initialized int tracefile_init_arch. */ + factor = tcd->tcd_pages_factor; + INIT_LIST_HEAD(&tcd->tcd_pages); + INIT_LIST_HEAD(&tcd->tcd_stock_pages); + INIT_LIST_HEAD(&tcd->tcd_daemon_pages); + tcd->tcd_cur_pages = 0; + tcd->tcd_cur_stock_pages = 0; + tcd->tcd_cur_daemon_pages = 0; + tcd->tcd_max_pages = (max_pages * factor) / 100; + LASSERT(tcd->tcd_max_pages > 0); + tcd->tcd_shutting_down = 0; + } + + return 0; +} + +static void trace_cleanup_on_all_cpus(void) +{ + struct cfs_trace_cpu_data *tcd; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + int i, cpu; + + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + tcd->tcd_shutting_down = 1; + + list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, + linkage) { + __LASSERT_TAGE_INVARIANT(tage); + + list_del(&tage->linkage); + cfs_tage_free(tage); + } + + tcd->tcd_cur_pages = 0; + } + } +} + +static void cfs_trace_cleanup(void) +{ + struct page_collection pc; + + INIT_LIST_HEAD(&pc.pc_pages); + + trace_cleanup_on_all_cpus(); + + cfs_tracefile_fini_arch(); +} + +void cfs_tracefile_exit(void) +{ + cfs_trace_stop_thread(); + cfs_trace_cleanup(); +} diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h new file mode 100644 index 000000000..ac84e7f4c --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h @@ -0,0 +1,266 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + */ + +#ifndef __LIBCFS_TRACEFILE_H__ +#define __LIBCFS_TRACEFILE_H__ + +#include "../../include/linux/libcfs/libcfs.h" + +enum cfs_trace_buf_type { + CFS_TCD_TYPE_PROC = 0, + CFS_TCD_TYPE_SOFTIRQ, + CFS_TCD_TYPE_IRQ, + CFS_TCD_TYPE_MAX +}; + +/* trace file lock routines */ + +#define TRACEFILE_NAME_SIZE 1024 +extern char cfs_tracefile[TRACEFILE_NAME_SIZE]; +extern long long cfs_tracefile_size; + +void libcfs_run_debug_log_upcall(char *file); + +int cfs_tracefile_init_arch(void); +void cfs_tracefile_fini_arch(void); + +void cfs_tracefile_read_lock(void); +void cfs_tracefile_read_unlock(void); +void cfs_tracefile_write_lock(void); +void cfs_tracefile_write_unlock(void); + +int cfs_tracefile_dump_all_pages(char *filename); +void cfs_trace_debug_print(void); +void cfs_trace_flush_pages(void); +int cfs_trace_start_thread(void); +void cfs_trace_stop_thread(void); +int cfs_tracefile_init(int max_pages); +void cfs_tracefile_exit(void); + +int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, + const char __user *usr_buffer, int usr_buffer_nob); +int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, + const char *knl_str, char *append); +int cfs_trace_allocate_string_buffer(char **str, int nob); +int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob); +int cfs_trace_daemon_command(char *str); +int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob); +int cfs_trace_set_debug_mb(int mb); +int cfs_trace_get_debug_mb(void); + +void libcfs_debug_dumplog_internal(void *arg); +void libcfs_register_panic_notifier(void); +void libcfs_unregister_panic_notifier(void); +extern int libcfs_panic_in_progress; +int cfs_trace_max_debug_mb(void); + +#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) +#define TCD_STOCK_PAGES (TCD_MAX_PAGES) +#define CFS_TRACEFILE_SIZE (500 << 20) + +#ifdef LUSTRE_TRACEFILE_PRIVATE + +/* + * Private declare for tracefile + */ +#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) +#define TCD_STOCK_PAGES (TCD_MAX_PAGES) + +#define CFS_TRACEFILE_SIZE (500 << 20) + +/* + * Size of a buffer for sprinting console messages if we can't get a page + * from system + */ +#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024 + +union cfs_trace_data_union { + struct cfs_trace_cpu_data { + /* + * Even though this structure is meant to be per-CPU, locking + * is needed because in some places the data may be accessed + * from other CPUs. This lock is directly used in trace_get_tcd + * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and + * tcd_for_each_type_lock + */ + spinlock_t tcd_lock; + unsigned long tcd_lock_flags; + + /* + * pages with trace records not yet processed by tracefiled. + */ + struct list_head tcd_pages; + /* number of pages on ->tcd_pages */ + unsigned long tcd_cur_pages; + + /* + * pages with trace records already processed by + * tracefiled. These pages are kept in memory, so that some + * portion of log can be written in the event of LBUG. This + * list is maintained in LRU order. + * + * Pages are moved to ->tcd_daemon_pages by tracefiled() + * (put_pages_on_daemon_list()). LRU pages from this list are + * discarded when list grows too large. + */ + struct list_head tcd_daemon_pages; + /* number of pages on ->tcd_daemon_pages */ + unsigned long tcd_cur_daemon_pages; + + /* + * Maximal number of pages allowed on ->tcd_pages and + * ->tcd_daemon_pages each. + * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current + * implementation. + */ + unsigned long tcd_max_pages; + + /* + * preallocated pages to write trace records into. Pages from + * ->tcd_stock_pages are moved to ->tcd_pages by + * portals_debug_msg(). + * + * This list is necessary, because on some platforms it's + * impossible to perform efficient atomic page allocation in a + * non-blockable context. + * + * Such platforms fill ->tcd_stock_pages "on occasion", when + * tracing code is entered in blockable context. + * + * trace_get_tage_try() tries to get a page from + * ->tcd_stock_pages first and resorts to atomic page + * allocation only if this queue is empty. ->tcd_stock_pages + * is replenished when tracing code is entered in blocking + * context (darwin-tracefile.c:trace_get_tcd()). We try to + * maintain TCD_STOCK_PAGES (40 by default) pages in this + * queue. Atomic allocation is only required if more than + * TCD_STOCK_PAGES pagesful are consumed by trace records all + * emitted in non-blocking contexts. Which is quite unlikely. + */ + struct list_head tcd_stock_pages; + /* number of pages on ->tcd_stock_pages */ + unsigned long tcd_cur_stock_pages; + + unsigned short tcd_shutting_down; + unsigned short tcd_cpu; + unsigned short tcd_type; + /* The factors to share debug memory. */ + unsigned short tcd_pages_factor; + } tcd; + char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))]; +}; + +#define TCD_MAX_TYPES 8 +extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS]; + +#define cfs_tcd_for_each(tcd, i, j) \ + for (i = 0; cfs_trace_data[i]; i++) \ + for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ + j < num_possible_cpus(); \ + j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) + +#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \ + for (i = 0; cfs_trace_data[i] && \ + (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ + cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) + +void cfs_set_ptldebug_header(struct ptldebug_header *header, + struct libcfs_debug_msg_data *m, + unsigned long stack); +void cfs_print_to_console(struct ptldebug_header *hdr, int mask, + const char *buf, int len, const char *file, + const char *fn); + +int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking); +void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking); + +extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; +enum cfs_trace_buf_type cfs_trace_buf_idx_get(void); + +static inline char * +cfs_trace_get_console_buffer(void) +{ + unsigned int i = get_cpu(); + unsigned int j = cfs_trace_buf_idx_get(); + + return cfs_trace_console_buffers[i][j]; +} + +static inline struct cfs_trace_cpu_data * +cfs_trace_get_tcd(void) +{ + struct cfs_trace_cpu_data *tcd = + &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd; + + cfs_trace_lock_tcd(tcd, 0); + + return tcd; +} + +static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd) +{ + cfs_trace_unlock_tcd(tcd, 0); + + put_cpu(); +} + +int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, + struct list_head *stock); + +void cfs_trace_assertion_failed(const char *str, + struct libcfs_debug_msg_data *m); + +/* ASSERTION that is safe to use within the debug system */ +#define __LASSERT(cond) \ +do { \ + if (unlikely(!(cond))) { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ + cfs_trace_assertion_failed("ASSERTION("#cond") failed", \ + &msgdata); \ + } \ +} while (0) + +#define __LASSERT_TAGE_INVARIANT(tage) \ +do { \ + __LASSERT(tage); \ + __LASSERT(tage->page); \ + __LASSERT(tage->used <= PAGE_SIZE); \ + __LASSERT(page_count(tage->page) > 0); \ +} while (0) + +#endif /* LUSTRE_TRACEFILE_PRIVATE */ + +#endif /* __LIBCFS_TRACEFILE_H__ */ diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c new file mode 100644 index 000000000..c72fe00dc --- /dev/null +++ b/drivers/staging/lustre/lnet/libcfs/workitem.c @@ -0,0 +1,469 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * libcfs/libcfs/workitem.c + * + * Author: Isaac Huang + * Liang Zhen + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../include/linux/libcfs/libcfs.h" + +#define CFS_WS_NAME_LEN 16 + +struct cfs_wi_sched { + /* chain on global list */ + struct list_head ws_list; + /** serialised workitems */ + spinlock_t ws_lock; + /** where schedulers sleep */ + wait_queue_head_t ws_waitq; + /** concurrent workitems */ + struct list_head ws_runq; + /** + * rescheduled running-workitems, a workitem can be rescheduled + * while running in wi_action(), but we don't to execute it again + * unless it returns from wi_action(), so we put it on ws_rerunq + * while rescheduling, and move it to runq after it returns + * from wi_action() + */ + struct list_head ws_rerunq; + /** CPT-table for this scheduler */ + struct cfs_cpt_table *ws_cptab; + /** CPT id for affinity */ + int ws_cpt; + /** number of scheduled workitems */ + int ws_nscheduled; + /** started scheduler thread, protected by cfs_wi_data::wi_glock */ + unsigned int ws_nthreads:30; + /** shutting down, protected by cfs_wi_data::wi_glock */ + unsigned int ws_stopping:1; + /** serialize starting thread, protected by cfs_wi_data::wi_glock */ + unsigned int ws_starting:1; + /** scheduler name */ + char ws_name[CFS_WS_NAME_LEN]; +}; + +static struct cfs_workitem_data { + /** serialize */ + spinlock_t wi_glock; + /** list of all schedulers */ + struct list_head wi_scheds; + /** WI module is initialized */ + int wi_init; + /** shutting down the whole WI module */ + int wi_stopping; +} cfs_wi_data; + +static inline int +cfs_wi_sched_cansleep(struct cfs_wi_sched *sched) +{ + spin_lock(&sched->ws_lock); + if (sched->ws_stopping) { + spin_unlock(&sched->ws_lock); + return 0; + } + + if (!list_empty(&sched->ws_runq)) { + spin_unlock(&sched->ws_lock); + return 0; + } + spin_unlock(&sched->ws_lock); + return 1; +} + +/* XXX: + * 0. it only works when called from wi->wi_action. + * 1. when it returns no one shall try to schedule the workitem. + */ +void +cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +{ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ + LASSERT(!sched->ws_stopping); + + spin_lock(&sched->ws_lock); + + LASSERT(wi->wi_running); + if (wi->wi_scheduled) { /* cancel pending schedules */ + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); + + LASSERT(sched->ws_nscheduled > 0); + sched->ws_nscheduled--; + } + + LASSERT(list_empty(&wi->wi_list)); + + wi->wi_scheduled = 1; /* LBUG future schedule attempts */ + spin_unlock(&sched->ws_lock); +} +EXPORT_SYMBOL(cfs_wi_exit); + +/** + * cancel schedule request of workitem \a wi + */ +int +cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +{ + int rc; + + LASSERT(!in_interrupt()); /* because we use plain spinlock */ + LASSERT(!sched->ws_stopping); + + /* + * return 0 if it's running already, otherwise return 1, which + * means the workitem will not be scheduled and will not have + * any race with wi_action. + */ + spin_lock(&sched->ws_lock); + + rc = !(wi->wi_running); + + if (wi->wi_scheduled) { /* cancel pending schedules */ + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); + + LASSERT(sched->ws_nscheduled > 0); + sched->ws_nscheduled--; + + wi->wi_scheduled = 0; + } + + LASSERT(list_empty(&wi->wi_list)); + + spin_unlock(&sched->ws_lock); + return rc; +} +EXPORT_SYMBOL(cfs_wi_deschedule); + +/* + * Workitem scheduled with (serial == 1) is strictly serialised not only with + * itself, but also with others scheduled this way. + * + * Now there's only one static serialised queue, but in the future more might + * be added, and even dynamic creation of serialised queues might be supported. + */ +void +cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +{ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ + LASSERT(!sched->ws_stopping); + + spin_lock(&sched->ws_lock); + + if (!wi->wi_scheduled) { + LASSERT(list_empty(&wi->wi_list)); + + wi->wi_scheduled = 1; + sched->ws_nscheduled++; + if (!wi->wi_running) { + list_add_tail(&wi->wi_list, &sched->ws_runq); + wake_up(&sched->ws_waitq); + } else { + list_add(&wi->wi_list, &sched->ws_rerunq); + } + } + + LASSERT(!list_empty(&wi->wi_list)); + spin_unlock(&sched->ws_lock); +} +EXPORT_SYMBOL(cfs_wi_schedule); + +static int cfs_wi_scheduler(void *arg) +{ + struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; + + cfs_block_allsigs(); + + /* CPT affinity scheduler? */ + if (sched->ws_cptab) + if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0) + CWARN("Failed to bind %s on CPT %d\n", + sched->ws_name, sched->ws_cpt); + + spin_lock(&cfs_wi_data.wi_glock); + + LASSERT(sched->ws_starting == 1); + sched->ws_starting--; + sched->ws_nthreads++; + + spin_unlock(&cfs_wi_data.wi_glock); + + spin_lock(&sched->ws_lock); + + while (!sched->ws_stopping) { + int nloops = 0; + int rc; + cfs_workitem_t *wi; + + while (!list_empty(&sched->ws_runq) && + nloops < CFS_WI_RESCHED) { + wi = list_entry(sched->ws_runq.next, cfs_workitem_t, + wi_list); + LASSERT(wi->wi_scheduled && !wi->wi_running); + + list_del_init(&wi->wi_list); + + LASSERT(sched->ws_nscheduled > 0); + sched->ws_nscheduled--; + + wi->wi_running = 1; + wi->wi_scheduled = 0; + + spin_unlock(&sched->ws_lock); + nloops++; + + rc = (*wi->wi_action) (wi); + + spin_lock(&sched->ws_lock); + if (rc != 0) /* WI should be dead, even be freed! */ + continue; + + wi->wi_running = 0; + if (list_empty(&wi->wi_list)) + continue; + + LASSERT(wi->wi_scheduled); + /* wi is rescheduled, should be on rerunq now, we + * move it to runq so it can run action now + */ + list_move_tail(&wi->wi_list, &sched->ws_runq); + } + + if (!list_empty(&sched->ws_runq)) { + spin_unlock(&sched->ws_lock); + /* don't sleep because some workitems still + * expect me to come back soon + */ + cond_resched(); + spin_lock(&sched->ws_lock); + continue; + } + + spin_unlock(&sched->ws_lock); + rc = wait_event_interruptible_exclusive(sched->ws_waitq, + !cfs_wi_sched_cansleep(sched)); + spin_lock(&sched->ws_lock); + } + + spin_unlock(&sched->ws_lock); + + spin_lock(&cfs_wi_data.wi_glock); + sched->ws_nthreads--; + spin_unlock(&cfs_wi_data.wi_glock); + + return 0; +} + +void +cfs_wi_sched_destroy(struct cfs_wi_sched *sched) +{ + int i; + + LASSERT(cfs_wi_data.wi_init); + LASSERT(!cfs_wi_data.wi_stopping); + + spin_lock(&cfs_wi_data.wi_glock); + if (sched->ws_stopping) { + CDEBUG(D_INFO, "%s is in progress of stopping\n", + sched->ws_name); + spin_unlock(&cfs_wi_data.wi_glock); + return; + } + + LASSERT(!list_empty(&sched->ws_list)); + sched->ws_stopping = 1; + + spin_unlock(&cfs_wi_data.wi_glock); + + i = 2; + wake_up_all(&sched->ws_waitq); + + spin_lock(&cfs_wi_data.wi_glock); + while (sched->ws_nthreads > 0) { + CDEBUG(is_power_of_2(++i) ? D_WARNING : D_NET, + "waiting for %d threads of WI sched[%s] to terminate\n", + sched->ws_nthreads, sched->ws_name); + + spin_unlock(&cfs_wi_data.wi_glock); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1) / 20); + spin_lock(&cfs_wi_data.wi_glock); + } + + list_del(&sched->ws_list); + + spin_unlock(&cfs_wi_data.wi_glock); + LASSERT(sched->ws_nscheduled == 0); + + LIBCFS_FREE(sched, sizeof(*sched)); +} +EXPORT_SYMBOL(cfs_wi_sched_destroy); + +int +cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, + int cpt, int nthrs, struct cfs_wi_sched **sched_pp) +{ + struct cfs_wi_sched *sched; + int rc; + + LASSERT(cfs_wi_data.wi_init); + LASSERT(!cfs_wi_data.wi_stopping); + LASSERT(!cptab || cpt == CFS_CPT_ANY || + (cpt >= 0 && cpt < cfs_cpt_number(cptab))); + + LIBCFS_ALLOC(sched, sizeof(*sched)); + if (!sched) + return -ENOMEM; + + if (strlen(name) > sizeof(sched->ws_name) - 1) { + LIBCFS_FREE(sched, sizeof(*sched)); + return -E2BIG; + } + strncpy(sched->ws_name, name, sizeof(sched->ws_name)); + + sched->ws_cptab = cptab; + sched->ws_cpt = cpt; + + spin_lock_init(&sched->ws_lock); + init_waitqueue_head(&sched->ws_waitq); + INIT_LIST_HEAD(&sched->ws_runq); + INIT_LIST_HEAD(&sched->ws_rerunq); + INIT_LIST_HEAD(&sched->ws_list); + + rc = 0; + while (nthrs > 0) { + char name[16]; + struct task_struct *task; + + spin_lock(&cfs_wi_data.wi_glock); + while (sched->ws_starting > 0) { + spin_unlock(&cfs_wi_data.wi_glock); + schedule(); + spin_lock(&cfs_wi_data.wi_glock); + } + + sched->ws_starting++; + spin_unlock(&cfs_wi_data.wi_glock); + + if (sched->ws_cptab && sched->ws_cpt >= 0) { + snprintf(name, sizeof(name), "%s_%02d_%02u", + sched->ws_name, sched->ws_cpt, + sched->ws_nthreads); + } else { + snprintf(name, sizeof(name), "%s_%02u", + sched->ws_name, sched->ws_nthreads); + } + + task = kthread_run(cfs_wi_scheduler, sched, "%s", name); + if (!IS_ERR(task)) { + nthrs--; + continue; + } + rc = PTR_ERR(task); + + CERROR("Failed to create thread for WI scheduler %s: %d\n", + name, rc); + + spin_lock(&cfs_wi_data.wi_glock); + + /* make up for cfs_wi_sched_destroy */ + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + sched->ws_starting--; + + spin_unlock(&cfs_wi_data.wi_glock); + + cfs_wi_sched_destroy(sched); + return rc; + } + spin_lock(&cfs_wi_data.wi_glock); + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + spin_unlock(&cfs_wi_data.wi_glock); + + *sched_pp = sched; + return 0; +} +EXPORT_SYMBOL(cfs_wi_sched_create); + +int +cfs_wi_startup(void) +{ + memset(&cfs_wi_data, 0, sizeof(cfs_wi_data)); + + spin_lock_init(&cfs_wi_data.wi_glock); + INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); + cfs_wi_data.wi_init = 1; + + return 0; +} + +void +cfs_wi_shutdown(void) +{ + struct cfs_wi_sched *sched; + struct cfs_wi_sched *temp; + + spin_lock(&cfs_wi_data.wi_glock); + cfs_wi_data.wi_stopping = 1; + spin_unlock(&cfs_wi_data.wi_glock); + + /* nobody should contend on this list */ + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + sched->ws_stopping = 1; + wake_up_all(&sched->ws_waitq); + } + + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + spin_lock(&cfs_wi_data.wi_glock); + + while (sched->ws_nthreads != 0) { + spin_unlock(&cfs_wi_data.wi_glock); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1) / 20); + spin_lock(&cfs_wi_data.wi_glock); + } + spin_unlock(&cfs_wi_data.wi_glock); + } + list_for_each_entry_safe(sched, temp, &cfs_wi_data.wi_scheds, ws_list) { + list_del(&sched->ws_list); + LIBCFS_FREE(sched, sizeof(*sched)); + } + + cfs_wi_data.wi_stopping = 0; + cfs_wi_data.wi_init = 0; +} diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile index e276fe2bf..4c81fa194 100644 --- a/drivers/staging/lustre/lnet/lnet/Makefile +++ b/drivers/staging/lustre/lnet/lnet/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_LNET) += lnet.o -lnet-y := api-ni.o config.o nidstrings.o \ +lnet-y := api-ni.o config.o nidstrings.o net_fault.o \ lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \ lib-socket.o lib-move.o module.o lo.o \ router.o router_proc.o acceptor.o peer.o diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c index fed57d900..1452bb3ad 100644 --- a/drivers/staging/lustre/lnet/lnet/acceptor.c +++ b/drivers/staging/lustre/lnet/lnet/acceptor.c @@ -36,6 +36,7 @@ #define DEBUG_SUBSYSTEM S_LNET #include +#include #include "../../include/linux/lnet/lib-lnet.h" static int accept_port = 988; @@ -46,7 +47,9 @@ static struct { int pta_shutdown; struct socket *pta_sock; struct completion pta_signal; -} lnet_acceptor_state; +} lnet_acceptor_state = { + .pta_shutdown = 1 +}; int lnet_acceptor_port(void) @@ -78,9 +81,11 @@ static char *accept_type; static int lnet_acceptor_get_tunables(void) { - /* Userland acceptor uses 'accept_type' instead of 'accept', due to + /* + * Userland acceptor uses 'accept_type' instead of 'accept', due to * conflict with 'accept(2)', but kernel acceptor still uses 'accept' - * for compatibility. Hence the trick. */ + * for compatibility. Hence the trick. + */ accept_type = accept; return 0; } @@ -140,7 +145,7 @@ EXPORT_SYMBOL(lnet_connect_console_error); int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, - __u32 local_ip, __u32 peer_ip, int peer_port) + __u32 local_ip, __u32 peer_ip, int peer_port) { lnet_acceptor_connreq_t cr; struct socket *sock; @@ -157,7 +162,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip, peer_port); - if (rc != 0) { + if (rc) { if (fatal) goto failed; continue; @@ -169,14 +174,14 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; cr.acr_nid = peer_nid; - if (the_lnet.ln_testprotocompat != 0) { + if (the_lnet.ln_testprotocompat) { /* single-shot proto check */ lnet_net_lock(LNET_LOCK_EX); - if ((the_lnet.ln_testprotocompat & 4) != 0) { + if (the_lnet.ln_testprotocompat & 4) { cr.acr_version++; the_lnet.ln_testprotocompat &= ~4; } - if ((the_lnet.ln_testprotocompat & 8) != 0) { + if (the_lnet.ln_testprotocompat & 8) { cr.acr_magic = LNET_PROTO_MAGIC; the_lnet.ln_testprotocompat &= ~8; } @@ -184,7 +189,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, } rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc != 0) + if (rc) goto failed_sock; *sockp = sock; @@ -202,8 +207,6 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, } EXPORT_SYMBOL(lnet_connect); -/* Below is the code common for both kernel and MT user-space */ - static int lnet_accept(struct socket *sock, __u32 magic) { @@ -218,23 +221,23 @@ lnet_accept(struct socket *sock, __u32 magic) LASSERT(sizeof(cr) <= 16); /* not too big for the stack */ rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); - LASSERT(rc == 0); /* we succeeded before */ + LASSERT(!rc); /* we succeeded before */ if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) { - if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) { - /* future version compatibility! + /* + * future version compatibility! * When LNET unifies protocols over all LNDs, the first - * thing sent will be a version query. I send back - * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */ - + * thing sent will be a version query. I send back + * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" + */ memset(&cr, 0, sizeof(cr)); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc != 0) + if (rc) CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n", &peer_ip, rc); return -EPROTO; @@ -254,9 +257,9 @@ lnet_accept(struct socket *sock, __u32 magic) rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version), accept_timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading connection request version from %pI4h\n", - rc, &peer_ip); + rc, &peer_ip); return -EIO; } @@ -264,10 +267,12 @@ lnet_accept(struct socket *sock, __u32 magic) __swab32s(&cr.acr_version); if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) { - /* future version compatibility! + /* + * future version compatibility! * An acceptor-specific protocol rev will first send a version * query. I send back my current version to tell her I'm - * "old". */ + * "old". + */ int peer_version = cr.acr_version; memset(&cr, 0, sizeof(cr)); @@ -275,7 +280,7 @@ lnet_accept(struct socket *sock, __u32 magic) cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); - if (rc != 0) + if (rc) CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n", peer_version, &peer_ip, rc); return -EPROTO; @@ -285,9 +290,9 @@ lnet_accept(struct socket *sock, __u32 magic) sizeof(cr) - offsetof(lnet_acceptor_connreq_t, acr_nid), accept_timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading connection request from %pI4h\n", - rc, &peer_ip); + rc, &peer_ip); return -EIO; } @@ -295,20 +300,20 @@ lnet_accept(struct socket *sock, __u32 magic) __swab64s(&cr.acr_nid); ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid)); - if (ni == NULL || /* no matching net */ + if (!ni || /* no matching net */ ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */ - if (ni != NULL) + if (ni) lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n", &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } - if (ni->ni_lnd->lnd_accept == NULL) { + if (!ni->ni_lnd->lnd_accept) { /* This catches a request for the loopback LND */ lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n", - &peer_ip, libcfs_nid2str(cr.acr_nid)); + &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } @@ -331,13 +336,13 @@ lnet_acceptor(void *arg) int peer_port; int secure = (int)((long_ptr_t)arg); - LASSERT(lnet_acceptor_state.pta_sock == NULL); + LASSERT(!lnet_acceptor_state.pta_sock); cfs_block_allsigs(); rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port, accept_backlog); - if (rc != 0) { + if (rc) { if (rc == -EADDRINUSE) LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n", accept_port); @@ -354,13 +359,12 @@ lnet_acceptor(void *arg) lnet_acceptor_state.pta_shutdown = rc; complete(&lnet_acceptor_state.pta_signal); - if (rc != 0) + if (rc) return rc; while (!lnet_acceptor_state.pta_shutdown) { - rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock); - if (rc != 0) { + if (rc) { if (rc != -EAGAIN) { CWARN("Accept error %d: pausing...\n", rc); set_current_state(TASK_UNINTERRUPTIBLE); @@ -376,7 +380,7 @@ lnet_acceptor(void *arg) } rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port); - if (rc != 0) { + if (rc) { CERROR("Can't determine new connection's address\n"); goto failed; } @@ -389,14 +393,14 @@ lnet_acceptor(void *arg) rc = lnet_sock_read(newsock, &magic, sizeof(magic), accept_timeout); - if (rc != 0) { + if (rc) { CERROR("Error %d reading connection request from %pI4h\n", - rc, &peer_ip); + rc, &peer_ip); goto failed; } rc = lnet_accept(newsock, magic); - if (rc != 0) + if (rc) goto failed; continue; @@ -436,14 +440,19 @@ accept2secure(const char *acc, long *sec) int lnet_acceptor_start(void) { + struct task_struct *task; int rc; long rc2; long secure; - LASSERT(lnet_acceptor_state.pta_sock == NULL); + /* if acceptor is already running return immediately */ + if (!lnet_acceptor_state.pta_shutdown) + return 0; + + LASSERT(!lnet_acceptor_state.pta_sock); rc = lnet_acceptor_get_tunables(); - if (rc != 0) + if (rc) return rc; init_completion(&lnet_acceptor_state.pta_signal); @@ -451,13 +460,13 @@ lnet_acceptor_start(void) if (rc <= 0) return rc; - if (lnet_count_acceptor_nis() == 0) /* not required */ + if (!lnet_count_acceptor_nis()) /* not required */ return 0; - rc2 = PTR_ERR(kthread_run(lnet_acceptor, - (void *)(ulong_ptr_t)secure, - "acceptor_%03ld", secure)); - if (IS_ERR_VALUE(rc2)) { + task = kthread_run(lnet_acceptor, (void *)(ulong_ptr_t)secure, + "acceptor_%03ld", secure); + if (IS_ERR(task)) { + rc2 = PTR_ERR(task); CERROR("Can't start acceptor thread: %ld\n", rc2); return -ESRCH; @@ -468,11 +477,11 @@ lnet_acceptor_start(void) if (!lnet_acceptor_state.pta_shutdown) { /* started OK */ - LASSERT(lnet_acceptor_state.pta_sock != NULL); + LASSERT(lnet_acceptor_state.pta_sock); return 0; } - LASSERT(lnet_acceptor_state.pta_sock == NULL); + LASSERT(!lnet_acceptor_state.pta_sock); return -ENETDOWN; } @@ -480,11 +489,17 @@ lnet_acceptor_start(void) void lnet_acceptor_stop(void) { - if (lnet_acceptor_state.pta_sock == NULL) /* not running */ + struct sock *sk; + + if (lnet_acceptor_state.pta_shutdown) /* not running */ return; lnet_acceptor_state.pta_shutdown = 1; - wake_up_all(sk_sleep(lnet_acceptor_state.pta_sock->sk)); + + sk = lnet_acceptor_state.pta_sock->sk; + + /* awake any sleepers using safe method */ + sk->sk_state_change(sk); /* block until acceptor signals exit */ wait_for_completion(&lnet_acceptor_state.pta_signal); diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c index 362282fa0..876475554 100644 --- a/drivers/staging/lustre/lnet/lnet/api-ni.c +++ b/drivers/staging/lustre/lnet/lnet/api-ni.c @@ -39,6 +39,7 @@ #include #include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" #define D_LNI D_CONSOLE @@ -61,6 +62,9 @@ static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT; module_param(rnet_htable_size, int, 0444); MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table"); +static int lnet_ping(lnet_process_id_t id, int timeout_ms, + lnet_process_id_t __user *ids, int n_ids); + static char * lnet_get_routes(void) { @@ -73,17 +77,17 @@ lnet_get_networks(void) char *nets; int rc; - if (*networks != 0 && *ip2nets != 0) { + if (*networks && *ip2nets) { LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n"); return NULL; } - if (*ip2nets != 0) { + if (*ip2nets) { rc = lnet_parse_ip2nets(&nets, ip2nets); - return (rc == 0) ? nets : NULL; + return !rc ? nets : NULL; } - if (*networks != 0) + if (*networks) return networks; return "tcp"; @@ -94,6 +98,7 @@ lnet_init_locks(void) { spin_lock_init(&the_lnet.ln_eq_wait_lock); init_waitqueue_head(&the_lnet.ln_eq_waitq); + init_waitqueue_head(&the_lnet.ln_rc_waitq); mutex_init(&the_lnet.ln_lnd_mutex); mutex_init(&the_lnet.ln_api_mutex); } @@ -104,10 +109,10 @@ lnet_create_remote_nets_table(void) int i; struct list_head *hash; - LASSERT(the_lnet.ln_remote_nets_hash == NULL); + LASSERT(!the_lnet.ln_remote_nets_hash); LASSERT(the_lnet.ln_remote_nets_hbits > 0); LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash)); - if (hash == NULL) { + if (!hash) { CERROR("Failed to create remote nets hash table\n"); return -ENOMEM; } @@ -123,7 +128,7 @@ lnet_destroy_remote_nets_table(void) { int i; - if (the_lnet.ln_remote_nets_hash == NULL) + if (!the_lnet.ln_remote_nets_hash) return; for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) @@ -138,12 +143,12 @@ lnet_destroy_remote_nets_table(void) static void lnet_destroy_locks(void) { - if (the_lnet.ln_res_lock != NULL) { + if (the_lnet.ln_res_lock) { cfs_percpt_lock_free(the_lnet.ln_res_lock); the_lnet.ln_res_lock = NULL; } - if (the_lnet.ln_net_lock != NULL) { + if (the_lnet.ln_net_lock) { cfs_percpt_lock_free(the_lnet.ln_net_lock); the_lnet.ln_net_lock = NULL; } @@ -155,11 +160,11 @@ lnet_create_locks(void) lnet_init_locks(); the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table()); - if (the_lnet.ln_res_lock == NULL) + if (!the_lnet.ln_res_lock) goto failed; the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table()); - if (the_lnet.ln_net_lock == NULL) + if (!the_lnet.ln_net_lock) goto failed; return 0; @@ -171,10 +176,12 @@ lnet_create_locks(void) static void lnet_assert_wire_constants(void) { - /* Wire protocol assertions generated by 'wirecheck' + /* + * Wire protocol assertions generated by 'wirecheck' * running on Linux robert.bartonsoftware.com 2.6.8-1.521 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux - * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */ + * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) + */ /* Constants... */ CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded); @@ -284,9 +291,8 @@ lnet_register_lnd(lnd_t *lnd) { mutex_lock(&the_lnet.ln_lnd_mutex); - LASSERT(the_lnet.ln_init); LASSERT(libcfs_isknown_lnd(lnd->lnd_type)); - LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL); + LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type)); list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds); lnd->lnd_refcount = 0; @@ -302,9 +308,8 @@ lnet_unregister_lnd(lnd_t *lnd) { mutex_lock(&the_lnet.ln_lnd_mutex); - LASSERT(the_lnet.ln_init); LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd); - LASSERT(lnd->lnd_refcount == 0); + LASSERT(!lnd->lnd_refcount); list_del(&lnd->lnd_list); CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type)); @@ -335,7 +340,6 @@ lnet_counters_get(lnet_counters_t *counters) counters->recv_length += ctr->recv_length; counters->route_length += ctr->route_length; counters->drop_length += ctr->drop_length; - } lnet_net_unlock(LNET_LOCK_EX); } @@ -375,7 +379,7 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) { int count = 0; - if (rec->rec_type == 0) /* not set yet, it's uninitialized */ + if (!rec->rec_type) /* not set yet, it's uninitialized */ return; while (!list_empty(&rec->rec_active)) { @@ -395,14 +399,16 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) } if (count > 0) { - /* Found alive MD/ME/EQ, user really should unlink/free + /* + * Found alive MD/ME/EQ, user really should unlink/free * all of them before finalize LNet, but if someone didn't, - * we have to recycle garbage for him */ + * we have to recycle garbage for him + */ CERROR("%d active elements on exit of %s container\n", count, lnet_res_type2str(rec->rec_type)); } - if (rec->rec_lh_hash != NULL) { + if (rec->rec_lh_hash) { LIBCFS_FREE(rec->rec_lh_hash, LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); rec->rec_lh_hash = NULL; @@ -417,7 +423,7 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) int rc = 0; int i; - LASSERT(rec->rec_type == 0); + LASSERT(!rec->rec_type); rec->rec_type = type; INIT_LIST_HEAD(&rec->rec_active); @@ -426,7 +432,7 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) /* Arbitrary choice of hash table size */ LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt, LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); - if (rec->rec_lh_hash == NULL) { + if (!rec->rec_lh_hash) { rc = -ENOMEM; goto out; } @@ -464,7 +470,7 @@ lnet_res_containers_create(int type) int i; recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec)); - if (recs == NULL) { + if (!recs) { CERROR("Failed to allocate %s resource containers\n", lnet_res_type2str(type)); return NULL; @@ -472,7 +478,7 @@ lnet_res_containers_create(int type) cfs_percpt_for_each(rec, i, recs) { rc = lnet_res_container_setup(rec, i, type); - if (rc != 0) { + if (rc) { lnet_res_containers_destroy(recs); return NULL; } @@ -518,7 +524,7 @@ lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh) list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]); } -int lnet_unprepare(void); +static int lnet_unprepare(void); static int lnet_prepare(lnet_pid_t requested_pid) @@ -527,11 +533,16 @@ lnet_prepare(lnet_pid_t requested_pid) struct lnet_res_container **recs; int rc = 0; - LASSERT(the_lnet.ln_refcount == 0); + if (requested_pid == LNET_PID_ANY) { + /* Don't instantiate LNET just for me */ + return -ENETDOWN; + } + + LASSERT(!the_lnet.ln_refcount); the_lnet.ln_routing = 0; - LASSERT((requested_pid & LNET_PID_USERFLAG) == 0); + LASSERT(!(requested_pid & LNET_PID_USERFLAG)); the_lnet.ln_pid = requested_pid; INIT_LIST_HEAD(&the_lnet.ln_test_peers); @@ -539,9 +550,11 @@ lnet_prepare(lnet_pid_t requested_pid) INIT_LIST_HEAD(&the_lnet.ln_nis_cpt); INIT_LIST_HEAD(&the_lnet.ln_nis_zombie); INIT_LIST_HEAD(&the_lnet.ln_routers); + INIT_LIST_HEAD(&the_lnet.ln_drop_rules); + INIT_LIST_HEAD(&the_lnet.ln_delay_rules); rc = lnet_create_remote_nets_table(); - if (rc != 0) + if (rc) goto failed; /* * NB the interface cookie in wire handles guards against delayed @@ -551,27 +564,27 @@ lnet_prepare(lnet_pid_t requested_pid) the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(), sizeof(lnet_counters_t)); - if (the_lnet.ln_counters == NULL) { + if (!the_lnet.ln_counters) { CERROR("Failed to allocate counters for LNet\n"); rc = -ENOMEM; goto failed; } rc = lnet_peer_tables_create(); - if (rc != 0) + if (rc) goto failed; rc = lnet_msg_containers_create(); - if (rc != 0) + if (rc) goto failed; rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0, LNET_COOKIE_TYPE_EQ); - if (rc != 0) + if (rc) goto failed; recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME); - if (recs == NULL) { + if (!recs) { rc = -ENOMEM; goto failed; } @@ -579,7 +592,7 @@ lnet_prepare(lnet_pid_t requested_pid) the_lnet.ln_me_containers = recs; recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD); - if (recs == NULL) { + if (!recs) { rc = -ENOMEM; goto failed; } @@ -587,7 +600,7 @@ lnet_prepare(lnet_pid_t requested_pid) the_lnet.ln_md_containers = recs; rc = lnet_portals_create(); - if (rc != 0) { + if (rc) { CERROR("Failed to create portals for LNet: %d\n", rc); goto failed; } @@ -599,17 +612,18 @@ lnet_prepare(lnet_pid_t requested_pid) return rc; } -int +static int lnet_unprepare(void) { - /* NB no LNET_LOCK since this is the last reference. All LND instances + /* + * NB no LNET_LOCK since this is the last reference. All LND instances * have shut down already, so it is safe to unlink and free all * descriptors, even those that appear committed to a network op (eg MD - * with non-zero pending count) */ - + * with non-zero pending count) + */ lnet_fail_nid(LNET_NID_ANY, 0); - LASSERT(the_lnet.ln_refcount == 0); + LASSERT(!the_lnet.ln_refcount); LASSERT(list_empty(&the_lnet.ln_test_peers)); LASSERT(list_empty(&the_lnet.ln_nis)); LASSERT(list_empty(&the_lnet.ln_nis_cpt)); @@ -617,12 +631,12 @@ lnet_unprepare(void) lnet_portals_destroy(); - if (the_lnet.ln_md_containers != NULL) { + if (the_lnet.ln_md_containers) { lnet_res_containers_destroy(the_lnet.ln_md_containers); the_lnet.ln_md_containers = NULL; } - if (the_lnet.ln_me_containers != NULL) { + if (the_lnet.ln_me_containers) { lnet_res_containers_destroy(the_lnet.ln_me_containers); the_lnet.ln_me_containers = NULL; } @@ -631,9 +645,9 @@ lnet_unprepare(void) lnet_msg_containers_destroy(); lnet_peer_tables_destroy(); - lnet_rtrpools_free(); + lnet_rtrpools_free(0); - if (the_lnet.ln_counters != NULL) { + if (the_lnet.ln_counters) { cfs_percpt_free(the_lnet.ln_counters); the_lnet.ln_counters = NULL; } @@ -709,7 +723,7 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid) if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) continue; - LASSERT(ni->ni_cpts != NULL); + LASSERT(ni->ni_cpts); return ni->ni_cpts[lnet_nid_cpt_hash (nid, ni->ni_ncpts)]; } @@ -747,12 +761,12 @@ lnet_islocalnet(__u32 net) cpt = lnet_net_lock_current(); ni = lnet_net2ni_locked(net, cpt); - if (ni != NULL) + if (ni) lnet_ni_decref_locked(ni, cpt); lnet_net_unlock(cpt); - return ni != NULL; + return !!ni; } lnet_ni_t * @@ -783,11 +797,11 @@ lnet_islocalnid(lnet_nid_t nid) cpt = lnet_net_lock_current(); ni = lnet_nid2ni_locked(nid, cpt); - if (ni != NULL) + if (ni) lnet_ni_decref_locked(ni, cpt); lnet_net_unlock(cpt); - return ni != NULL; + return !!ni; } int @@ -803,7 +817,7 @@ lnet_count_acceptor_nis(void) list_for_each(tmp, &the_lnet.ln_nis) { ni = list_entry(tmp, lnet_ni_t, ni_list); - if (ni->ni_lnd->lnd_accept != NULL) + if (ni->ni_lnd->lnd_accept) count++; } @@ -812,90 +826,280 @@ lnet_count_acceptor_nis(void) return count; } -static int -lnet_ni_tq_credits(lnet_ni_t *ni) +static lnet_ping_info_t * +lnet_ping_info_create(int num_ni) { - int credits; + lnet_ping_info_t *ping_info; + unsigned int infosz; - LASSERT(ni->ni_ncpts >= 1); + infosz = offsetof(lnet_ping_info_t, pi_ni[num_ni]); + LIBCFS_ALLOC(ping_info, infosz); + if (!ping_info) { + CERROR("Can't allocate ping info[%d]\n", num_ni); + return NULL; + } - if (ni->ni_ncpts == 1) - return ni->ni_maxtxcredits; + ping_info->pi_nnis = num_ni; + ping_info->pi_pid = the_lnet.ln_pid; + ping_info->pi_magic = LNET_PROTO_PING_MAGIC; + ping_info->pi_features = LNET_PING_FEAT_NI_STATUS; - credits = ni->ni_maxtxcredits / ni->ni_ncpts; - credits = max(credits, 8 * ni->ni_peertxcredits); - credits = min(credits, ni->ni_maxtxcredits); + return ping_info; +} - return credits; +static inline int +lnet_get_ni_count(void) +{ + struct lnet_ni *ni; + int count = 0; + + lnet_net_lock(0); + + list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) + count++; + + lnet_net_unlock(0); + + return count; +} + +static inline void +lnet_ping_info_free(lnet_ping_info_t *pinfo) +{ + LIBCFS_FREE(pinfo, + offsetof(lnet_ping_info_t, + pi_ni[pinfo->pi_nnis])); } static void -lnet_shutdown_lndnis(void) +lnet_ping_info_destroy(void) { - int i; - int islo; - lnet_ni_t *ni; + struct lnet_ni *ni; - /* NB called holding the global mutex */ + lnet_net_lock(LNET_LOCK_EX); - /* All quiet on the API front */ - LASSERT(!the_lnet.ln_shutdown); - LASSERT(the_lnet.ln_refcount == 0); - LASSERT(list_empty(&the_lnet.ln_nis_zombie)); + list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { + lnet_ni_lock(ni); + ni->ni_status = NULL; + lnet_ni_unlock(ni); + } - lnet_net_lock(LNET_LOCK_EX); - the_lnet.ln_shutdown = 1; /* flag shutdown */ + lnet_ping_info_free(the_lnet.ln_ping_info); + the_lnet.ln_ping_info = NULL; - /* Unlink NIs from the global table */ - while (!list_empty(&the_lnet.ln_nis)) { - ni = list_entry(the_lnet.ln_nis.next, - lnet_ni_t, ni_list); - /* move it to zombie list and nobody can find it anymore */ - list_move(&ni->ni_list, &the_lnet.ln_nis_zombie); - lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */ - - if (!list_empty(&ni->ni_cptlist)) { - list_del_init(&ni->ni_cptlist); - lnet_ni_decref_locked(ni, 0); + lnet_net_unlock(LNET_LOCK_EX); +} + +static void +lnet_ping_event_handler(lnet_event_t *event) +{ + lnet_ping_info_t *pinfo = event->md.user_ptr; + + if (event->unlinked) + pinfo->pi_features = LNET_PING_FEAT_INVAL; +} + +static int +lnet_ping_info_setup(lnet_ping_info_t **ppinfo, lnet_handle_md_t *md_handle, + int ni_count, bool set_eq) +{ + lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY}; + lnet_handle_me_t me_handle; + lnet_md_t md = { NULL }; + int rc, rc2; + + if (set_eq) { + rc = LNetEQAlloc(0, lnet_ping_event_handler, + &the_lnet.ln_ping_target_eq); + if (rc) { + CERROR("Can't allocate ping EQ: %d\n", rc); + return rc; } } - /* Drop the cached eqwait NI. */ - if (the_lnet.ln_eq_waitni != NULL) { - lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0); - the_lnet.ln_eq_waitni = NULL; + *ppinfo = lnet_ping_info_create(ni_count); + if (!*ppinfo) { + rc = -ENOMEM; + goto failed_0; } - /* Drop the cached loopback NI. */ - if (the_lnet.ln_loni != NULL) { - lnet_ni_decref_locked(the_lnet.ln_loni, 0); - the_lnet.ln_loni = NULL; + rc = LNetMEAttach(LNET_RESERVED_PORTAL, id, + LNET_PROTO_PING_MATCHBITS, 0, + LNET_UNLINK, LNET_INS_AFTER, + &me_handle); + if (rc) { + CERROR("Can't create ping ME: %d\n", rc); + goto failed_1; } - lnet_net_unlock(LNET_LOCK_EX); + /* initialize md content */ + md.start = *ppinfo; + md.length = offsetof(lnet_ping_info_t, + pi_ni[(*ppinfo)->pi_nnis]); + md.threshold = LNET_MD_THRESH_INF; + md.max_size = 0; + md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE | + LNET_MD_MANAGE_REMOTE; + md.user_ptr = NULL; + md.eq_handle = the_lnet.ln_ping_target_eq; + md.user_ptr = *ppinfo; - /* Clear lazy portals and drop delayed messages which hold refs - * on their lnet_msg_t::msg_rxpeer */ - for (i = 0; i < the_lnet.ln_nportals; i++) - LNetClearLazyPortal(i); + rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle); + if (rc) { + CERROR("Can't attach ping MD: %d\n", rc); + goto failed_2; + } + + return 0; - /* Clear the peer table and wait for all peers to go (they hold refs on - * their NIs) */ - lnet_peer_tables_cleanup(); +failed_2: + rc2 = LNetMEUnlink(me_handle); + LASSERT(!rc2); +failed_1: + lnet_ping_info_free(*ppinfo); + *ppinfo = NULL; +failed_0: + if (set_eq) + LNetEQFree(the_lnet.ln_ping_target_eq); + return rc; +} + +static void +lnet_ping_md_unlink(lnet_ping_info_t *pinfo, lnet_handle_md_t *md_handle) +{ + sigset_t blocked = cfs_block_allsigs(); + + LNetMDUnlink(*md_handle); + LNetInvalidateHandle(md_handle); + + /* NB md could be busy; this just starts the unlink */ + while (pinfo->pi_features != LNET_PING_FEAT_INVAL) { + CDEBUG(D_NET, "Still waiting for ping MD to unlink\n"); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); + } + + cfs_restore_sigs(blocked); +} + +static void +lnet_ping_info_install_locked(lnet_ping_info_t *ping_info) +{ + lnet_ni_status_t *ns; + lnet_ni_t *ni; + int i = 0; + + list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { + LASSERT(i < ping_info->pi_nnis); + + ns = &ping_info->pi_ni[i]; + + ns->ns_nid = ni->ni_nid; + + lnet_ni_lock(ni); + ns->ns_status = (ni->ni_status) ? + ni->ni_status->ns_status : LNET_NI_STATUS_UP; + ni->ni_status = ns; + lnet_ni_unlock(ni); + + i++; + } +} +static void +lnet_ping_target_update(lnet_ping_info_t *pinfo, lnet_handle_md_t md_handle) +{ + lnet_ping_info_t *old_pinfo = NULL; + lnet_handle_md_t old_md; + + /* switch the NIs to point to the new ping info created */ lnet_net_lock(LNET_LOCK_EX); - /* Now wait for the NI's I just nuked to show up on ln_zombie_nis - * and shut them down in guaranteed thread context */ + + if (!the_lnet.ln_routing) + pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED; + lnet_ping_info_install_locked(pinfo); + + if (the_lnet.ln_ping_info) { + old_pinfo = the_lnet.ln_ping_info; + old_md = the_lnet.ln_ping_target_md; + } + the_lnet.ln_ping_target_md = md_handle; + the_lnet.ln_ping_info = pinfo; + + lnet_net_unlock(LNET_LOCK_EX); + + if (old_pinfo) { + /* unlink the old ping info */ + lnet_ping_md_unlink(old_pinfo, &old_md); + lnet_ping_info_free(old_pinfo); + } +} + +static void +lnet_ping_target_fini(void) +{ + int rc; + + lnet_ping_md_unlink(the_lnet.ln_ping_info, + &the_lnet.ln_ping_target_md); + + rc = LNetEQFree(the_lnet.ln_ping_target_eq); + LASSERT(!rc); + + lnet_ping_info_destroy(); +} + +static int +lnet_ni_tq_credits(lnet_ni_t *ni) +{ + int credits; + + LASSERT(ni->ni_ncpts >= 1); + + if (ni->ni_ncpts == 1) + return ni->ni_maxtxcredits; + + credits = ni->ni_maxtxcredits / ni->ni_ncpts; + credits = max(credits, 8 * ni->ni_peertxcredits); + credits = min(credits, ni->ni_maxtxcredits); + + return credits; +} + +static void +lnet_ni_unlink_locked(lnet_ni_t *ni) +{ + if (!list_empty(&ni->ni_cptlist)) { + list_del_init(&ni->ni_cptlist); + lnet_ni_decref_locked(ni, 0); + } + + /* move it to zombie list and nobody can find it anymore */ + LASSERT(!list_empty(&ni->ni_list)); + list_move(&ni->ni_list, &the_lnet.ln_nis_zombie); + lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */ +} + +static void +lnet_clear_zombies_nis_locked(void) +{ + int i; + int islo; + lnet_ni_t *ni; + lnet_ni_t *temp; + + /* + * Now wait for the NI's I just nuked to show up on ln_zombie_nis + * and shut them down in guaranteed thread context + */ i = 2; - while (!list_empty(&the_lnet.ln_nis_zombie)) { + list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis_zombie, ni_list) { int *ref; int j; - ni = list_entry(the_lnet.ln_nis_zombie.next, - lnet_ni_t, ni_list); list_del_init(&ni->ni_list); cfs_percpt_for_each(ref, j, ni->ni_refs) { - if (*ref == 0) + if (!*ref) continue; /* still busy, add it back to zombie list */ list_add(&ni->ni_list, &the_lnet.ln_nis_zombie); @@ -921,11 +1125,12 @@ lnet_shutdown_lndnis(void) islo = ni->ni_lnd->lnd_type == LOLND; LASSERT(!in_interrupt()); - (ni->ni_lnd->lnd_shutdown)(ni); - - /* can't deref lnd anymore now; it might have unregistered - * itself... */ + ni->ni_lnd->lnd_shutdown(ni); + /* + * can't deref lnd anymore now; it might have unregistered + * itself... + */ if (!islo) CDEBUG(D_LNI, "Removed LNI %s\n", libcfs_nid2str(ni->ni_nid)); @@ -935,176 +1140,263 @@ lnet_shutdown_lndnis(void) lnet_net_lock(LNET_LOCK_EX); } +} + +static void +lnet_shutdown_lndnis(void) +{ + lnet_ni_t *ni; + lnet_ni_t *temp; + int i; + + /* NB called holding the global mutex */ + + /* All quiet on the API front */ + LASSERT(!the_lnet.ln_shutdown); + LASSERT(!the_lnet.ln_refcount); + LASSERT(list_empty(&the_lnet.ln_nis_zombie)); + + lnet_net_lock(LNET_LOCK_EX); + the_lnet.ln_shutdown = 1; /* flag shutdown */ + + /* Unlink NIs from the global table */ + list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis, ni_list) { + lnet_ni_unlink_locked(ni); + } + + /* Drop the cached loopback NI. */ + if (the_lnet.ln_loni) { + lnet_ni_decref_locked(the_lnet.ln_loni, 0); + the_lnet.ln_loni = NULL; + } + + lnet_net_unlock(LNET_LOCK_EX); + + /* + * Clear lazy portals and drop delayed messages which hold refs + * on their lnet_msg_t::msg_rxpeer + */ + for (i = 0; i < the_lnet.ln_nportals; i++) + LNetClearLazyPortal(i); + + /* + * Clear the peer table and wait for all peers to go (they hold refs on + * their NIs) + */ + lnet_peer_tables_cleanup(NULL); + + lnet_net_lock(LNET_LOCK_EX); + lnet_clear_zombies_nis_locked(); the_lnet.ln_shutdown = 0; lnet_net_unlock(LNET_LOCK_EX); +} - if (the_lnet.ln_network_tokens != NULL) { - LIBCFS_FREE(the_lnet.ln_network_tokens, - the_lnet.ln_network_tokens_nob); - the_lnet.ln_network_tokens = NULL; - } +/* shutdown down the NI and release refcount */ +static void +lnet_shutdown_lndni(struct lnet_ni *ni) +{ + int i; + + lnet_net_lock(LNET_LOCK_EX); + lnet_ni_unlink_locked(ni); + lnet_net_unlock(LNET_LOCK_EX); + + /* clear messages for this NI on the lazy portal */ + for (i = 0; i < the_lnet.ln_nportals; i++) + lnet_clear_lazy_portal(ni, i, "Shutting down NI"); + + /* Do peer table cleanup for this ni */ + lnet_peer_tables_cleanup(ni); + + lnet_net_lock(LNET_LOCK_EX); + lnet_clear_zombies_nis_locked(); + lnet_net_unlock(LNET_LOCK_EX); } static int -lnet_startup_lndnis(void) +lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout, + __s32 peer_cr, __s32 peer_buf_cr, __s32 credits) { + int rc = -EINVAL; + int lnd_type; lnd_t *lnd; - struct lnet_ni *ni; struct lnet_tx_queue *tq; - struct list_head nilist; int i; - int rc = 0; - __u32 lnd_type; - int nicount = 0; - char *nets = lnet_get_networks(); - INIT_LIST_HEAD(&nilist); + lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); - if (nets == NULL) - goto failed; + LASSERT(libcfs_isknown_lnd(lnd_type)); - rc = lnet_parse_networks(&nilist, nets); - if (rc != 0) - goto failed; + if (lnd_type == CIBLND || lnd_type == OPENIBLND || + lnd_type == IIBLND || lnd_type == VIBLND) { + CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type)); + goto failed0; + } - while (!list_empty(&nilist)) { - ni = list_entry(nilist.next, lnet_ni_t, ni_list); - lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); + /* Make sure this new NI is unique. */ + lnet_net_lock(LNET_LOCK_EX); + rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis); + lnet_net_unlock(LNET_LOCK_EX); + if (!rc) { + if (lnd_type == LOLND) { + lnet_ni_free(ni); + return 0; + } - LASSERT(libcfs_isknown_lnd(lnd_type)); + CERROR("Net %s is not unique\n", + libcfs_net2str(LNET_NIDNET(ni->ni_nid))); + rc = -EEXIST; + goto failed0; + } - if (lnd_type == CIBLND || - lnd_type == OPENIBLND || - lnd_type == IIBLND || - lnd_type == VIBLND) { - CERROR("LND %s obsoleted\n", - libcfs_lnd2str(lnd_type)); - goto failed; - } + mutex_lock(&the_lnet.ln_lnd_mutex); + lnd = lnet_find_lnd_by_type(lnd_type); + if (!lnd) { + mutex_unlock(&the_lnet.ln_lnd_mutex); + rc = request_module("%s", libcfs_lnd2modname(lnd_type)); mutex_lock(&the_lnet.ln_lnd_mutex); - lnd = lnet_find_lnd_by_type(lnd_type); - if (lnd == NULL) { + lnd = lnet_find_lnd_by_type(lnd_type); + if (!lnd) { mutex_unlock(&the_lnet.ln_lnd_mutex); - rc = request_module("%s", - libcfs_lnd2modname(lnd_type)); - mutex_lock(&the_lnet.ln_lnd_mutex); - - lnd = lnet_find_lnd_by_type(lnd_type); - if (lnd == NULL) { - mutex_unlock(&the_lnet.ln_lnd_mutex); - CERROR("Can't load LND %s, module %s, rc=%d\n", - libcfs_lnd2str(lnd_type), - libcfs_lnd2modname(lnd_type), rc); - goto failed; - } + CERROR("Can't load LND %s, module %s, rc=%d\n", + libcfs_lnd2str(lnd_type), + libcfs_lnd2modname(lnd_type), rc); + rc = -EINVAL; + goto failed0; } + } - lnet_net_lock(LNET_LOCK_EX); - lnd->lnd_refcount++; - lnet_net_unlock(LNET_LOCK_EX); + lnet_net_lock(LNET_LOCK_EX); + lnd->lnd_refcount++; + lnet_net_unlock(LNET_LOCK_EX); - ni->ni_lnd = lnd; + ni->ni_lnd = lnd; - rc = (lnd->lnd_startup)(ni); + rc = lnd->lnd_startup(ni); - mutex_unlock(&the_lnet.ln_lnd_mutex); - - if (rc != 0) { - LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n", - rc, libcfs_lnd2str(lnd->lnd_type)); - lnet_net_lock(LNET_LOCK_EX); - lnd->lnd_refcount--; - lnet_net_unlock(LNET_LOCK_EX); - goto failed; - } - - LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL); - - list_del(&ni->ni_list); + mutex_unlock(&the_lnet.ln_lnd_mutex); + if (rc) { + LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n", + rc, libcfs_lnd2str(lnd->lnd_type)); lnet_net_lock(LNET_LOCK_EX); - /* refcount for ln_nis */ - lnet_ni_addref_locked(ni, 0); - list_add_tail(&ni->ni_list, &the_lnet.ln_nis); - if (ni->ni_cpts != NULL) { - list_add_tail(&ni->ni_cptlist, - &the_lnet.ln_nis_cpt); - lnet_ni_addref_locked(ni, 0); - } - + lnd->lnd_refcount--; lnet_net_unlock(LNET_LOCK_EX); + goto failed0; + } - if (lnd->lnd_type == LOLND) { - lnet_ni_addref(ni); - LASSERT(the_lnet.ln_loni == NULL); - the_lnet.ln_loni = ni; - continue; - } + /* + * If given some LND tunable parameters, parse those now to + * override the values in the NI structure. + */ + if (peer_buf_cr >= 0) + ni->ni_peerrtrcredits = peer_buf_cr; + if (peer_timeout >= 0) + ni->ni_peertimeout = peer_timeout; + /* + * TODO + * Note: For now, don't allow the user to change + * peertxcredits as this number is used in the + * IB LND to control queue depth. + * if (peer_cr != -1) + * ni->ni_peertxcredits = peer_cr; + */ + if (credits >= 0) + ni->ni_maxtxcredits = credits; - if (ni->ni_peertxcredits == 0 || - ni->ni_maxtxcredits == 0) { - LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n", - libcfs_lnd2str(lnd->lnd_type), - ni->ni_peertxcredits == 0 ? - "" : "per-peer "); - goto failed; - } + LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query); - cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { - tq->tq_credits_min = - tq->tq_credits_max = - tq->tq_credits = lnet_ni_tq_credits(ni); - } + lnet_net_lock(LNET_LOCK_EX); + /* refcount for ln_nis */ + lnet_ni_addref_locked(ni, 0); + list_add_tail(&ni->ni_list, &the_lnet.ln_nis); + if (ni->ni_cpts) { + lnet_ni_addref_locked(ni, 0); + list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt); + } - CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n", - libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits, - lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER, - ni->ni_peerrtrcredits, ni->ni_peertimeout); + lnet_net_unlock(LNET_LOCK_EX); - nicount++; + if (lnd->lnd_type == LOLND) { + lnet_ni_addref(ni); + LASSERT(!the_lnet.ln_loni); + the_lnet.ln_loni = ni; + return 0; } - if (the_lnet.ln_eq_waitni != NULL && nicount > 1) { - lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type; - LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network\n", - libcfs_lnd2str(lnd_type)); - goto failed; + if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) { + LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n", + libcfs_lnd2str(lnd->lnd_type), + !ni->ni_peertxcredits ? + "" : "per-peer "); + /* + * shutdown the NI since if we get here then it must've already + * been started + */ + lnet_shutdown_lndni(ni); + return -EINVAL; + } + + cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { + tq->tq_credits_min = + tq->tq_credits_max = + tq->tq_credits = lnet_ni_tq_credits(ni); } + CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n", + libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits, + lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER, + ni->ni_peerrtrcredits, ni->ni_peertimeout); + return 0; +failed0: + lnet_ni_free(ni); + return rc; +} - failed: - lnet_shutdown_lndnis(); +static int +lnet_startup_lndnis(struct list_head *nilist) +{ + struct lnet_ni *ni; + int rc; + int ni_count = 0; - while (!list_empty(&nilist)) { - ni = list_entry(nilist.next, lnet_ni_t, ni_list); + while (!list_empty(nilist)) { + ni = list_entry(nilist->next, lnet_ni_t, ni_list); list_del(&ni->ni_list); - lnet_ni_free(ni); + rc = lnet_startup_lndni(ni, -1, -1, -1, -1); + + if (rc < 0) + goto failed; + + ni_count++; } - return -ENETDOWN; + return ni_count; +failed: + lnet_shutdown_lndnis(); + + return rc; } /** * Initialize LNet library. * - * Only userspace program needs to call this function - it's automatically - * called in the kernel at module loading time. Caller has to call lnet_fini() - * after a call to lnet_init(), if and only if the latter returned 0. It must - * be called exactly once. + * Automatically called at module loading time. Caller has to call + * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the + * latter returned 0. It must be called exactly once. * - * \return 0 on success, and -ve on failures. + * \retval 0 on success + * \retval -ve on failures. */ -int -lnet_init(void) +int lnet_lib_init(void) { int rc; lnet_assert_wire_constants(); - LASSERT(!the_lnet.ln_init); memset(&the_lnet, 0, sizeof(the_lnet)); @@ -1117,28 +1409,29 @@ lnet_init(void) /* we are under risk of consuming all lh_cookie */ CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n", the_lnet.ln_cpt_number, LNET_CPT_MAX); - return -1; + return -E2BIG; } while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number) the_lnet.ln_cpt_bits++; rc = lnet_create_locks(); - if (rc != 0) { + if (rc) { CERROR("Can't create LNet global locks: %d\n", rc); - return -1; + return rc; } the_lnet.ln_refcount = 0; - the_lnet.ln_init = 1; LNetInvalidateHandle(&the_lnet.ln_rc_eqh); INIT_LIST_HEAD(&the_lnet.ln_lnds); INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie); INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow); - /* The hash table size is the number of bits it takes to express the set + /* + * The hash table size is the number of bits it takes to express the set * ln_num_routes, minus 1 (better to under estimate than over so we - * don't waste memory). */ + * don't waste memory). + */ if (rnet_htable_size <= 0) rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT; else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX) @@ -1146,9 +1439,11 @@ lnet_init(void) the_lnet.ln_remote_nets_hbits = max_t(int, 1, order_base_2(rnet_htable_size) - 1); - /* All LNDs apart from the LOLND are in separate modules. They + /* + * All LNDs apart from the LOLND are in separate modules. They * register themselves when their module loads, and unregister - * themselves when their module is unloaded. */ + * themselves when their module is unloaded. + */ lnet_register_lnd(&the_lolnd); return 0; } @@ -1156,30 +1451,22 @@ lnet_init(void) /** * Finalize LNet library. * - * Only userspace program needs to call this function. It can be called - * at most once. - * - * \pre lnet_init() called with success. + * \pre lnet_lib_init() called with success. * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls. */ -void -lnet_fini(void) +void lnet_lib_exit(void) { - LASSERT(the_lnet.ln_init); - LASSERT(the_lnet.ln_refcount == 0); + LASSERT(!the_lnet.ln_refcount); while (!list_empty(&the_lnet.ln_lnds)) lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next, - lnd_t, lnd_list)); + lnd_t, lnd_list)); lnet_destroy_locks(); - - the_lnet.ln_init = 0; } /** * Set LNet PID and start LNet interfaces, routing, and forwarding. * - * Userspace program should call this after a successful call to lnet_init(). * Users must call this function at least once before any other functions. * For each successful call there must be a corresponding call to * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is @@ -1197,77 +1484,114 @@ LNetNIInit(lnet_pid_t requested_pid) { int im_a_router = 0; int rc; + int ni_count; + lnet_ping_info_t *pinfo; + lnet_handle_md_t md_handle; + struct list_head net_head; + + INIT_LIST_HEAD(&net_head); mutex_lock(&the_lnet.ln_api_mutex); - LASSERT(the_lnet.ln_init); CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount); if (the_lnet.ln_refcount > 0) { rc = the_lnet.ln_refcount++; - goto out; + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } - if (requested_pid == LNET_PID_ANY) { - /* Don't instantiate LNET just for me */ - rc = -ENETDOWN; - goto failed0; + rc = lnet_prepare(requested_pid); + if (rc) { + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; } - rc = lnet_prepare(requested_pid); - if (rc != 0) - goto failed0; + /* Add in the loopback network */ + if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) { + rc = -ENOMEM; + goto err_empty_list; + } - rc = lnet_startup_lndnis(); - if (rc != 0) - goto failed1; + /* + * If LNet is being initialized via DLC it is possible + * that the user requests not to load module parameters (ones which + * are supported by DLC) on initialization. Therefore, make sure not + * to load networks, routes and forwarding from module parameters + * in this case. On cleanup in case of failure only clean up + * routes if it has been loaded + */ + if (!the_lnet.ln_nis_from_mod_params) { + rc = lnet_parse_networks(&net_head, lnet_get_networks()); + if (rc < 0) + goto err_empty_list; + } - rc = lnet_parse_routes(lnet_get_routes(), &im_a_router); - if (rc != 0) - goto failed2; + ni_count = lnet_startup_lndnis(&net_head); + if (ni_count < 0) { + rc = ni_count; + goto err_empty_list; + } + + if (!the_lnet.ln_nis_from_mod_params) { + rc = lnet_parse_routes(lnet_get_routes(), &im_a_router); + if (rc) + goto err_shutdown_lndnis; - rc = lnet_check_routes(); - if (rc != 0) - goto failed2; + rc = lnet_check_routes(); + if (rc) + goto err_destory_routes; - rc = lnet_rtrpools_alloc(im_a_router); - if (rc != 0) - goto failed2; + rc = lnet_rtrpools_alloc(im_a_router); + if (rc) + goto err_destory_routes; + } rc = lnet_acceptor_start(); - if (rc != 0) - goto failed2; + if (rc) + goto err_destory_routes; the_lnet.ln_refcount = 1; /* Now I may use my own API functions... */ - /* NB router checker needs the_lnet.ln_ping_info in - * lnet_router_checker -> lnet_update_ni_status_locked */ - rc = lnet_ping_target_init(); - if (rc != 0) - goto failed3; + rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true); + if (rc) + goto err_acceptor_stop; + + lnet_ping_target_update(pinfo, md_handle); rc = lnet_router_checker_start(); - if (rc != 0) - goto failed4; + if (rc) + goto err_stop_ping; + lnet_fault_init(); lnet_router_debugfs_init(); - goto out; - failed4: + mutex_unlock(&the_lnet.ln_api_mutex); + + return 0; + +err_stop_ping: lnet_ping_target_fini(); - failed3: +err_acceptor_stop: the_lnet.ln_refcount = 0; lnet_acceptor_stop(); - failed2: - lnet_destroy_routes(); +err_destory_routes: + if (!the_lnet.ln_nis_from_mod_params) + lnet_destroy_routes(); +err_shutdown_lndnis: lnet_shutdown_lndnis(); - failed1: +err_empty_list: lnet_unprepare(); - failed0: LASSERT(rc < 0); - out: mutex_unlock(&the_lnet.ln_api_mutex); + while (!list_empty(&net_head)) { + struct lnet_ni *ni; + + ni = list_entry(net_head.next, struct lnet_ni, ni_list); + list_del_init(&ni->ni_list); + lnet_ni_free(ni); + } return rc; } EXPORT_SYMBOL(LNetNIInit); @@ -1286,7 +1610,6 @@ LNetNIFini(void) { mutex_lock(&the_lnet.ln_api_mutex); - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (the_lnet.ln_refcount != 1) { @@ -1294,6 +1617,7 @@ LNetNIFini(void) } else { LASSERT(!the_lnet.ln_niinit_self); + lnet_fault_fini(); lnet_router_debugfs_fini(); lnet_router_checker_stop(); lnet_ping_target_fini(); @@ -1313,30 +1637,233 @@ LNetNIFini(void) EXPORT_SYMBOL(LNetNIFini); /** - * This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and - * IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet - * internal ioctl handler. - * - * IOC_LIBCFS_PORTALS_COMPATIBILITY is now deprecated, don't use it. + * Grabs the ni data from the ni structure and fills the out + * parameters * - * \param cmd IOC_LIBCFS_DEBUG_PEER to print debugging data about a peer. - * The data will be printed to system console. Don't use it excessively. - * \param arg A pointer to lnet_process_id_t, process ID of the peer. + * \param[in] ni network interface structure + * \param[out] cpt_count the number of cpts the ni is on + * \param[out] nid Network Interface ID + * \param[out] peer_timeout NI peer timeout + * \param[out] peer_tx_crdits NI peer transmit credits + * \param[out] peer_rtr_credits NI peer router credits + * \param[out] max_tx_credits NI max transmit credit + * \param[out] net_config Network configuration + */ +static void +lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid, + int *peer_timeout, int *peer_tx_credits, + int *peer_rtr_credits, int *max_tx_credits, + struct lnet_ioctl_net_config *net_config) +{ + int i; + + if (!ni) + return; + + if (!net_config) + return; + + BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) != + ARRAY_SIZE(net_config->ni_interfaces)); + + for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) { + if (!ni->ni_interfaces[i]) + break; + + strncpy(net_config->ni_interfaces[i], + ni->ni_interfaces[i], + sizeof(net_config->ni_interfaces[i])); + } + + *nid = ni->ni_nid; + *peer_timeout = ni->ni_peertimeout; + *peer_tx_credits = ni->ni_peertxcredits; + *peer_rtr_credits = ni->ni_peerrtrcredits; + *max_tx_credits = ni->ni_maxtxcredits; + + net_config->ni_status = ni->ni_status->ns_status; + + if (ni->ni_cpts) { + int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT); + + for (i = 0; i < num_cpts; i++) + net_config->ni_cpts[i] = ni->ni_cpts[i]; + + *cpt_count = num_cpts; + } +} + +int +lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout, + int *peer_tx_credits, int *peer_rtr_credits, + int *max_tx_credits, + struct lnet_ioctl_net_config *net_config) +{ + struct lnet_ni *ni; + struct list_head *tmp; + int cpt, i = 0; + int rc = -ENOENT; + + cpt = lnet_net_lock_current(); + + list_for_each(tmp, &the_lnet.ln_nis) { + if (i++ != idx) + continue; + + ni = list_entry(tmp, lnet_ni_t, ni_list); + lnet_ni_lock(ni); + lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout, + peer_tx_credits, peer_rtr_credits, + max_tx_credits, net_config); + lnet_ni_unlock(ni); + rc = 0; + break; + } + + lnet_net_unlock(cpt); + return rc; +} + +int +lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets, + __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr, + __s32 credits) +{ + lnet_ping_info_t *pinfo; + lnet_handle_md_t md_handle; + struct lnet_ni *ni; + struct list_head net_head; + lnet_remotenet_t *rnet; + int rc; + + INIT_LIST_HEAD(&net_head); + + /* Create a ni structure for the network string */ + rc = lnet_parse_networks(&net_head, nets); + if (rc <= 0) + return !rc ? -EINVAL : rc; + + mutex_lock(&the_lnet.ln_api_mutex); + + if (rc > 1) { + rc = -EINVAL; /* only add one interface per call */ + goto failed0; + } + + ni = list_entry(net_head.next, struct lnet_ni, ni_list); + + lnet_net_lock(LNET_LOCK_EX); + rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid)); + lnet_net_unlock(LNET_LOCK_EX); + /* + * make sure that the net added doesn't invalidate the current + * configuration LNet is keeping + */ + if (rnet) { + CERROR("Adding net %s will invalidate routing configuration\n", + nets); + rc = -EUSERS; + goto failed0; + } + + rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(), + false); + if (rc) + goto failed0; + + list_del_init(&ni->ni_list); + + rc = lnet_startup_lndni(ni, peer_timeout, peer_cr, + peer_buf_cr, credits); + if (rc) + goto failed1; + + if (ni->ni_lnd->lnd_accept) { + rc = lnet_acceptor_start(); + if (rc < 0) { + /* shutdown the ni that we just started */ + CERROR("Failed to start up acceptor thread\n"); + lnet_shutdown_lndni(ni); + goto failed1; + } + } + + lnet_ping_target_update(pinfo, md_handle); + mutex_unlock(&the_lnet.ln_api_mutex); + + return 0; + +failed1: + lnet_ping_md_unlink(pinfo, &md_handle); + lnet_ping_info_free(pinfo); +failed0: + mutex_unlock(&the_lnet.ln_api_mutex); + while (!list_empty(&net_head)) { + ni = list_entry(net_head.next, struct lnet_ni, ni_list); + list_del_init(&ni->ni_list); + lnet_ni_free(ni); + } + return rc; +} + +int +lnet_dyn_del_ni(__u32 net) +{ + lnet_ni_t *ni; + lnet_ping_info_t *pinfo; + lnet_handle_md_t md_handle; + int rc; + + /* don't allow userspace to shutdown the LOLND */ + if (LNET_NETTYP(net) == LOLND) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + /* create and link a new ping info, before removing the old one */ + rc = lnet_ping_info_setup(&pinfo, &md_handle, + lnet_get_ni_count() - 1, false); + if (rc) + goto out; + + ni = lnet_net2ni(net); + if (!ni) { + rc = -EINVAL; + goto failed; + } + + /* decrement the reference counter taken by lnet_net2ni() */ + lnet_ni_decref_locked(ni, 0); + + lnet_shutdown_lndni(ni); + + if (!lnet_count_acceptor_nis()) + lnet_acceptor_stop(); + + lnet_ping_target_update(pinfo, md_handle); + goto out; +failed: + lnet_ping_md_unlink(pinfo, &md_handle); + lnet_ping_info_free(pinfo); +out: + mutex_unlock(&the_lnet.ln_api_mutex); + + return rc; +} + +/** + * LNet ioctl handler. * - * \return Always return 0 when called by users directly (i.e., not via ioctl). */ int LNetCtl(unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; + struct lnet_ioctl_config_data *config; lnet_process_id_t id = {0}; lnet_ni_t *ni; int rc; unsigned long secs_passed; - LASSERT(the_lnet.ln_init); - LASSERT(the_lnet.ln_refcount > 0); - switch (cmd) { case IOC_LIBCFS_GET_NI: rc = LNetGetId(data->ioc_count, &id); @@ -1347,26 +1874,149 @@ LNetCtl(unsigned int cmd, void *arg) return lnet_fail_nid(data->ioc_nid, data->ioc_count); case IOC_LIBCFS_ADD_ROUTE: - rc = lnet_add_route(data->ioc_net, data->ioc_count, - data->ioc_nid, data->ioc_priority); - return (rc != 0) ? rc : lnet_check_routes(); + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_add_route(config->cfg_net, + config->cfg_config_u.cfg_route.rtr_hop, + config->cfg_nid, + config->cfg_config_u.cfg_route.rtr_priority); + if (!rc) { + rc = lnet_check_routes(); + if (rc) + lnet_del_route(config->cfg_net, + config->cfg_nid); + } + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; case IOC_LIBCFS_DEL_ROUTE: - return lnet_del_route(data->ioc_net, data->ioc_nid); + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_del_route(config->cfg_net, config->cfg_nid); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; case IOC_LIBCFS_GET_ROUTE: - return lnet_get_route(data->ioc_count, - &data->ioc_net, &data->ioc_count, - &data->ioc_nid, &data->ioc_flags, - &data->ioc_priority); + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + return lnet_get_route(config->cfg_count, + &config->cfg_net, + &config->cfg_config_u.cfg_route.rtr_hop, + &config->cfg_nid, + &config->cfg_config_u.cfg_route.rtr_flags, + &config->cfg_config_u.cfg_route.rtr_priority); + + case IOC_LIBCFS_GET_NET: { + struct lnet_ioctl_net_config *net_config; + size_t total = sizeof(*config) + sizeof(*net_config); + + config = arg; + + if (config->cfg_hdr.ioc_len < total) + return -EINVAL; + + net_config = (struct lnet_ioctl_net_config *) + config->cfg_bulk; + if (!net_config) + return -EINVAL; + + return lnet_get_net_config(config->cfg_count, + &config->cfg_ncpts, + &config->cfg_nid, + &config->cfg_config_u.cfg_net.net_peer_timeout, + &config->cfg_config_u.cfg_net.net_peer_tx_credits, + &config->cfg_config_u.cfg_net.net_peer_rtr_credits, + &config->cfg_config_u.cfg_net.net_max_tx_credits, + net_config); + } + + case IOC_LIBCFS_GET_LNET_STATS: { + struct lnet_ioctl_lnet_stats *lnet_stats = arg; + + if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats)) + return -EINVAL; + + lnet_counters_get(&lnet_stats->st_cntrs); + return 0; + } + + case IOC_LIBCFS_CONFIG_RTR: + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + if (config->cfg_config_u.cfg_buffers.buf_enable) { + rc = lnet_rtrpools_enable(); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; + } + lnet_rtrpools_disable(); + mutex_unlock(&the_lnet.ln_api_mutex); + return 0; + + case IOC_LIBCFS_ADD_BUF: + config = arg; + + if (config->cfg_hdr.ioc_len < sizeof(*config)) + return -EINVAL; + + mutex_lock(&the_lnet.ln_api_mutex); + rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny, + config->cfg_config_u.cfg_buffers.buf_small, + config->cfg_config_u.cfg_buffers.buf_large); + mutex_unlock(&the_lnet.ln_api_mutex); + return rc; + + case IOC_LIBCFS_GET_BUF: { + struct lnet_ioctl_pool_cfg *pool_cfg; + size_t total = sizeof(*config) + sizeof(*pool_cfg); + + config = arg; + + if (config->cfg_hdr.ioc_len < total) + return -EINVAL; + + pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk; + return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg); + } + + case IOC_LIBCFS_GET_PEER_INFO: { + struct lnet_ioctl_peer *peer_info = arg; + + if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info)) + return -EINVAL; + + return lnet_get_peer_info(peer_info->pr_count, + &peer_info->pr_nid, + peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness, + &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt, + &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount, + &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits, + &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob); + } + case IOC_LIBCFS_NOTIFY_ROUTER: secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]); - return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, - jiffies - secs_passed * HZ); + secs_passed *= msecs_to_jiffies(MSEC_PER_SEC); - case IOC_LIBCFS_PORTALS_COMPATIBILITY: - /* This can be removed once lustre stops calling it */ - return 0; + return lnet_notify(NULL, data->ioc_nid, data->ioc_flags, + jiffies - secs_passed); case IOC_LIBCFS_LNET_DIST: rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]); @@ -1382,46 +2032,26 @@ LNetCtl(unsigned int cmd, void *arg) lnet_net_unlock(LNET_LOCK_EX); return 0; + case IOC_LIBCFS_LNET_FAULT: + return lnet_fault_ctl(data->ioc_flags, data); + case IOC_LIBCFS_PING: id.nid = data->ioc_nid; id.pid = data->ioc_u32[0]; rc = lnet_ping(id, data->ioc_u32[1], /* timeout */ - (lnet_process_id_t *)data->ioc_pbuf1, - data->ioc_plen1/sizeof(lnet_process_id_t)); + data->ioc_pbuf1, + data->ioc_plen1 / sizeof(lnet_process_id_t)); if (rc < 0) return rc; data->ioc_count = rc; return 0; - case IOC_LIBCFS_DEBUG_PEER: { - /* CAVEAT EMPTOR: this one designed for calling directly; not - * via an ioctl */ - id = *((lnet_process_id_t *) arg); - - lnet_debug_peer(id.nid); - - ni = lnet_net2ni(LNET_NIDNET(id.nid)); - if (ni == NULL) { - CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id)); - } else { - if (ni->ni_lnd->lnd_ctl == NULL) { - CDEBUG(D_WARNING, "No ctl for %s\n", - libcfs_id2str(id)); - } else { - (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg); - } - - lnet_ni_decref(ni); - } - return 0; - } - default: ni = lnet_net2ni(data->ioc_net); - if (ni == NULL) + if (!ni) return -EINVAL; - if (ni->ni_lnd->lnd_ctl == NULL) + if (!ni->ni_lnd->lnd_ctl) rc = -EINVAL; else rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg); @@ -1433,6 +2063,12 @@ LNetCtl(unsigned int cmd, void *arg) } EXPORT_SYMBOL(LNetCtl); +void LNetDebugPeer(lnet_process_id_t id) +{ + lnet_debug_peer(id.nid); +} +EXPORT_SYMBOL(LNetDebugPeer); + /** * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that * all interfaces share a same PID, as requested by LNetNIInit(). @@ -1452,16 +2088,12 @@ LNetGetId(unsigned int index, lnet_process_id_t *id) int cpt; int rc = -ENOENT; - LASSERT(the_lnet.ln_init); - - /* LNetNI initilization failed? */ - if (the_lnet.ln_refcount == 0) - return rc; + LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_net_lock_current(); list_for_each(tmp, &the_lnet.ln_nis) { - if (index-- != 0) + if (index--) continue; ni = list_entry(tmp, lnet_ni_t, ni_list); @@ -1488,192 +2120,8 @@ LNetSnprintHandle(char *str, int len, lnet_handle_any_t h) } EXPORT_SYMBOL(LNetSnprintHandle); -static int -lnet_create_ping_info(void) -{ - int i; - int n; - int rc; - unsigned int infosz; - lnet_ni_t *ni; - lnet_process_id_t id; - lnet_ping_info_t *pinfo; - - for (n = 0; ; n++) { - rc = LNetGetId(n, &id); - if (rc == -ENOENT) - break; - - LASSERT(rc == 0); - } - - infosz = offsetof(lnet_ping_info_t, pi_ni[n]); - LIBCFS_ALLOC(pinfo, infosz); - if (pinfo == NULL) { - CERROR("Can't allocate ping info[%d]\n", n); - return -ENOMEM; - } - - pinfo->pi_nnis = n; - pinfo->pi_pid = the_lnet.ln_pid; - pinfo->pi_magic = LNET_PROTO_PING_MAGIC; - pinfo->pi_features = LNET_PING_FEAT_NI_STATUS; - - for (i = 0; i < n; i++) { - lnet_ni_status_t *ns = &pinfo->pi_ni[i]; - - rc = LNetGetId(i, &id); - LASSERT(rc == 0); - - ns->ns_nid = id.nid; - ns->ns_status = LNET_NI_STATUS_UP; - - lnet_net_lock(0); - - ni = lnet_nid2ni_locked(id.nid, 0); - LASSERT(ni != NULL); - - lnet_ni_lock(ni); - LASSERT(ni->ni_status == NULL); - ni->ni_status = ns; - lnet_ni_unlock(ni); - - lnet_ni_decref_locked(ni, 0); - lnet_net_unlock(0); - } - - the_lnet.ln_ping_info = pinfo; - return 0; -} - -static void -lnet_destroy_ping_info(void) -{ - struct lnet_ni *ni; - - lnet_net_lock(0); - - list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) { - lnet_ni_lock(ni); - ni->ni_status = NULL; - lnet_ni_unlock(ni); - } - - lnet_net_unlock(0); - - LIBCFS_FREE(the_lnet.ln_ping_info, - offsetof(lnet_ping_info_t, - pi_ni[the_lnet.ln_ping_info->pi_nnis])); - the_lnet.ln_ping_info = NULL; -} - -int -lnet_ping_target_init(void) -{ - lnet_md_t md = { NULL }; - lnet_handle_me_t meh; - lnet_process_id_t id; - int rc; - int rc2; - int infosz; - - rc = lnet_create_ping_info(); - if (rc != 0) - return rc; - - /* We can have a tiny EQ since we only need to see the unlink event on - * teardown, which by definition is the last one! */ - rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq); - if (rc != 0) { - CERROR("Can't allocate ping EQ: %d\n", rc); - goto failed_0; - } - - memset(&id, 0, sizeof(lnet_process_id_t)); - id.nid = LNET_NID_ANY; - id.pid = LNET_PID_ANY; - - rc = LNetMEAttach(LNET_RESERVED_PORTAL, id, - LNET_PROTO_PING_MATCHBITS, 0, - LNET_UNLINK, LNET_INS_AFTER, - &meh); - if (rc != 0) { - CERROR("Can't create ping ME: %d\n", rc); - goto failed_1; - } - - /* initialize md content */ - infosz = offsetof(lnet_ping_info_t, - pi_ni[the_lnet.ln_ping_info->pi_nnis]); - md.start = the_lnet.ln_ping_info; - md.length = infosz; - md.threshold = LNET_MD_THRESH_INF; - md.max_size = 0; - md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE | - LNET_MD_MANAGE_REMOTE; - md.user_ptr = NULL; - md.eq_handle = the_lnet.ln_ping_target_eq; - - rc = LNetMDAttach(meh, md, - LNET_RETAIN, - &the_lnet.ln_ping_target_md); - if (rc != 0) { - CERROR("Can't attach ping MD: %d\n", rc); - goto failed_2; - } - - return 0; - - failed_2: - rc2 = LNetMEUnlink(meh); - LASSERT(rc2 == 0); - failed_1: - rc2 = LNetEQFree(the_lnet.ln_ping_target_eq); - LASSERT(rc2 == 0); - failed_0: - lnet_destroy_ping_info(); - return rc; -} - -void -lnet_ping_target_fini(void) -{ - lnet_event_t event; - int rc; - int which; - int timeout_ms = 1000; - sigset_t blocked = cfs_block_allsigs(); - - LNetMDUnlink(the_lnet.ln_ping_target_md); - /* NB md could be busy; this just starts the unlink */ - - for (;;) { - rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1, - timeout_ms, &event, &which); - - /* I expect overflow... */ - LASSERT(rc >= 0 || rc == -EOVERFLOW); - - if (rc == 0) { - /* timed out: provide a diagnostic */ - CWARN("Still waiting for ping MD to unlink\n"); - timeout_ms *= 2; - continue; - } - - /* Got a valid event */ - if (event.unlinked) - break; - } - - rc = LNetEQFree(the_lnet.ln_ping_target_eq); - LASSERT(rc == 0); - lnet_destroy_ping_info(); - cfs_restore_sigs(blocked); -} - -int -lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids) +static int lnet_ping(lnet_process_id_t id, int timeout_ms, + lnet_process_id_t __user *ids, int n_ids) { lnet_handle_eq_t eqh; lnet_handle_md_t mdh; @@ -1683,7 +2131,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id int unlinked = 0; int replied = 0; const int a_long_time = 60000; /* mS */ - int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]); + int infosz; lnet_ping_info_t *info; lnet_process_id_t tmpid; int i; @@ -1692,6 +2140,8 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id int rc2; sigset_t blocked; + infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]); + if (n_ids <= 0 || id.nid == LNET_NID_ANY || timeout_ms > 500000 || /* arbitrary limit! */ @@ -1699,15 +2149,15 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id return -EINVAL; if (id.pid == LNET_PID_ANY) - id.pid = LUSTRE_SRV_LNET_PID; + id.pid = LNET_PID_LUSTRE; LIBCFS_ALLOC(info, infosz); - if (info == NULL) + if (!info) return -ENOMEM; /* NB 2 events max (including any unlink event) */ rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh); - if (rc != 0) { + if (rc) { CERROR("Can't allocate EQ: %d\n", rc); goto out_0; } @@ -1722,7 +2172,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id md.eq_handle = eqh; rc = LNetMDBind(md, LNET_UNLINK, &mdh); - if (rc != 0) { + if (rc) { CERROR("Can't bind MD: %d\n", rc); goto out_1; } @@ -1731,11 +2181,11 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id LNET_RESERVED_PORTAL, LNET_PROTO_PING_MATCHBITS, 0); - if (rc != 0) { + if (rc) { /* Don't CERROR; this could be deliberate! */ rc2 = LNetMDUnlink(mdh); - LASSERT(rc2 == 0); + LASSERT(!rc2); /* NB must wait for the UNLINK event below... */ unlinked = 1; @@ -1759,11 +2209,11 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */ - if (rc2 <= 0 || event.status != 0) { + if (rc2 <= 0 || event.status) { /* timeout or error */ - if (!replied && rc == 0) + if (!replied && !rc) rc = (rc2 < 0) ? rc2 : - (rc2 == 0) ? -ETIMEDOUT : + !rc2 ? -ETIMEDOUT : event.status; if (!unlinked) { @@ -1772,7 +2222,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id /* No assertion (racing with network) */ unlinked = 1; timeout_ms = a_long_time; - } else if (rc2 == 0) { + } else if (!rc2) { /* timed out waiting for unlink */ CWARN("ping %s: late network completion\n", libcfs_id2str(id)); @@ -1812,7 +2262,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id goto out_1; } - if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) { + if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) { CERROR("%s: ping w/o NI status: 0x%x\n", libcfs_id2str(id), info->pi_features); goto out_1; @@ -1846,9 +2296,9 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id out_1: rc2 = LNetEQFree(eqh); - if (rc2 != 0) + if (rc2) CERROR("rc2 %d\n", rc2); - LASSERT(rc2 == 0); + LASSERT(!rc2); out_0: LIBCFS_FREE(info, infosz); diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c index 284a3c271..449069c9e 100644 --- a/drivers/staging/lustre/lnet/lnet/config.c +++ b/drivers/staging/lustre/lnet/lnet/config.c @@ -37,15 +37,15 @@ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" -struct lnet_text_buf_t { /* tmp struct for parsing routes */ +struct lnet_text_buf { /* tmp struct for parsing routes */ struct list_head ltb_list; /* stash on lists */ int ltb_size; /* allocated size */ char ltb_text[0]; /* text buffer */ }; static int lnet_tbnob; /* track text buf allocation */ -#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */ -#define LNET_SINGLE_TEXTBUF_NOB (4<<10) +#define LNET_MAX_TEXTBUF_NOB (64 << 10) /* bound allocation */ +#define LNET_SINGLE_TEXTBUF_NOB (4 << 10) static void lnet_syntax(char *name, char *str, int offset, int width) @@ -54,9 +54,9 @@ lnet_syntax(char *name, char *str, int offset, int width) static char dashes[LNET_SINGLE_TEXTBUF_NOB]; memset(dots, '.', sizeof(dots)); - dots[sizeof(dots)-1] = 0; + dots[sizeof(dots) - 1] = 0; memset(dashes, '-', sizeof(dashes)); - dashes[sizeof(dashes)-1] = 0; + dashes[sizeof(dashes) - 1] = 0; LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str); LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n", @@ -77,7 +77,7 @@ lnet_issep(char c) } } -static int +int lnet_net_unique(__u32 net, struct list_head *nilist) { struct list_head *tmp; @@ -96,19 +96,25 @@ lnet_net_unique(__u32 net, struct list_head *nilist) void lnet_ni_free(struct lnet_ni *ni) { - if (ni->ni_refs != NULL) + int i; + + if (ni->ni_refs) cfs_percpt_free(ni->ni_refs); - if (ni->ni_tx_queues != NULL) + if (ni->ni_tx_queues) cfs_percpt_free(ni->ni_tx_queues); - if (ni->ni_cpts != NULL) + if (ni->ni_cpts) cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts); + for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) { + LIBCFS_FREE(ni->ni_interfaces[i], + strlen(ni->ni_interfaces[i]) + 1); + } LIBCFS_FREE(ni, sizeof(*ni)); } -static lnet_ni_t * +lnet_ni_t * lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) { struct lnet_tx_queue *tq; @@ -123,7 +129,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) } LIBCFS_ALLOC(ni, sizeof(*ni)); - if (ni == NULL) { + if (!ni) { CERROR("Out of memory creating network %s\n", libcfs_net2str(net)); return NULL; @@ -133,18 +139,18 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) INIT_LIST_HEAD(&ni->ni_cptlist); ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*ni->ni_refs[0])); - if (ni->ni_refs == NULL) + if (!ni->ni_refs) goto failed; ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*ni->ni_tx_queues[0])); - if (ni->ni_tx_queues == NULL) + if (!ni->ni_tx_queues) goto failed; cfs_percpt_for_each(tq, i, ni->ni_tx_queues) INIT_LIST_HEAD(&tq->tq_delayed); - if (el == NULL) { + if (!el) { ni->ni_cpts = NULL; ni->ni_ncpts = LNET_CPT_NUMBER; } else { @@ -178,13 +184,19 @@ int lnet_parse_networks(struct list_head *nilist, char *networks) { struct cfs_expr_list *el = NULL; - int tokensize = strlen(networks) + 1; + int tokensize; char *tokens; char *str; char *tmp; struct lnet_ni *ni; __u32 net; int nnets = 0; + struct list_head *temp_node; + + if (!networks) { + CERROR("networks string is undefined\n"); + return -EINVAL; + } if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) { /* _WAY_ conservative */ @@ -193,23 +205,19 @@ lnet_parse_networks(struct list_head *nilist, char *networks) return -EINVAL; } + tokensize = strlen(networks) + 1; + LIBCFS_ALLOC(tokens, tokensize); - if (tokens == NULL) { + if (!tokens) { CERROR("Can't allocate net tokens\n"); return -ENOMEM; } - the_lnet.ln_network_tokens = tokens; - the_lnet.ln_network_tokens_nob = tokensize; memcpy(tokens, networks, tokensize); - str = tmp = tokens; - - /* Add in the loopback network */ - ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist); - if (ni == NULL) - goto failed; + tmp = tokens; + str = tokens; - while (str != NULL && *str != 0) { + while (str && *str) { char *comma = strchr(str, ','); char *bracket = strchr(str, '('); char *square = strchr(str, '['); @@ -217,26 +225,29 @@ lnet_parse_networks(struct list_head *nilist, char *networks) int niface; int rc; - /* NB we don't check interface conflicts here; it's the LNDs - * responsibility (if it cares at all) */ - - if (square != NULL && (comma == NULL || square < comma)) { - /* i.e: o2ib0(ib0)[1,2], number between square - * brackets are CPTs this NI needs to be bond */ - if (bracket != NULL && bracket > square) { + /* + * NB we don't check interface conflicts here; it's the LNDs + * responsibility (if it cares at all) + */ + if (square && (!comma || square < comma)) { + /* + * i.e: o2ib0(ib0)[1,2], number between square + * brackets are CPTs this NI needs to be bond + */ + if (bracket && bracket > square) { tmp = square; goto failed_syntax; } tmp = strchr(square, ']'); - if (tmp == NULL) { + if (!tmp) { tmp = square; goto failed_syntax; } rc = cfs_expr_list_parse(square, tmp - square + 1, 0, LNET_CPT_NUMBER - 1, &el); - if (rc != 0) { + if (rc) { tmp = square; goto failed_syntax; } @@ -245,12 +256,10 @@ lnet_parse_networks(struct list_head *nilist, char *networks) *square++ = ' '; } - if (bracket == NULL || - (comma != NULL && comma < bracket)) { - + if (!bracket || (comma && comma < bracket)) { /* no interface list specified */ - if (comma != NULL) + if (comma) *comma++ = 0; net = libcfs_str2net(cfs_trimwhite(str)); @@ -262,10 +271,10 @@ lnet_parse_networks(struct list_head *nilist, char *networks) } if (LNET_NETTYP(net) != LOLND && /* LO is implicit */ - lnet_ni_alloc(net, el, nilist) == NULL) + !lnet_ni_alloc(net, el, nilist)) goto failed; - if (el != NULL) { + if (el) { cfs_expr_list_free(el); el = NULL; } @@ -281,12 +290,11 @@ lnet_parse_networks(struct list_head *nilist, char *networks) goto failed_syntax; } - nnets++; ni = lnet_ni_alloc(net, el, nilist); - if (ni == NULL) + if (!ni) goto failed; - if (el != NULL) { + if (el) { cfs_expr_list_free(el); el = NULL; } @@ -295,7 +303,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks) iface = bracket + 1; bracket = strchr(iface, ')'); - if (bracket == NULL) { + if (!bracket) { tmp = iface; goto failed_syntax; } @@ -303,11 +311,11 @@ lnet_parse_networks(struct list_head *nilist, char *networks) *bracket = 0; do { comma = strchr(iface, ','); - if (comma != NULL) + if (comma) *comma++ = 0; iface = cfs_trimwhite(iface); - if (*iface == 0) { + if (!*iface) { tmp = iface; goto failed_syntax; } @@ -319,16 +327,32 @@ lnet_parse_networks(struct list_head *nilist, char *networks) goto failed; } - ni->ni_interfaces[niface++] = iface; + /* + * Allocate a separate piece of memory and copy + * into it the string, so we don't have + * a depencency on the tokens string. This way we + * can free the tokens at the end of the function. + * The newly allocated ni_interfaces[] can be + * freed when freeing the NI + */ + LIBCFS_ALLOC(ni->ni_interfaces[niface], + strlen(iface) + 1); + if (!ni->ni_interfaces[niface]) { + CERROR("Can't allocate net interface name\n"); + goto failed; + } + strncpy(ni->ni_interfaces[niface], iface, + strlen(iface)); + niface++; iface = comma; - } while (iface != NULL); + } while (iface); str = bracket + 1; comma = strchr(bracket + 1, ','); - if (comma != NULL) { + if (comma) { *comma = 0; str = cfs_trimwhite(str); - if (*str != 0) { + if (*str) { tmp = str; goto failed_syntax; } @@ -337,14 +361,17 @@ lnet_parse_networks(struct list_head *nilist, char *networks) } str = cfs_trimwhite(str); - if (*str != 0) { + if (*str) { tmp = str; goto failed_syntax; } } - LASSERT(!list_empty(nilist)); - return 0; + list_for_each(temp_node, nilist) + nnets++; + + LIBCFS_FREE(tokens, tokensize); + return nnets; failed_syntax: lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp)); @@ -356,23 +383,22 @@ lnet_parse_networks(struct list_head *nilist, char *networks) lnet_ni_free(ni); } - if (el != NULL) + if (el) cfs_expr_list_free(el); LIBCFS_FREE(tokens, tokensize); - the_lnet.ln_network_tokens = NULL; return -EINVAL; } -static struct lnet_text_buf_t * +static struct lnet_text_buf * lnet_new_text_buf(int str_len) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; int nob; /* NB allocate space for the terminating 0 */ - nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]); + nob = offsetof(struct lnet_text_buf, ltb_text[str_len + 1]); if (nob > LNET_SINGLE_TEXTBUF_NOB) { /* _way_ conservative for "route net gateway..." */ CERROR("text buffer too big\n"); @@ -385,7 +411,7 @@ lnet_new_text_buf(int str_len) } LIBCFS_ALLOC(ltb, nob); - if (ltb == NULL) + if (!ltb) return NULL; ltb->ltb_size = nob; @@ -395,7 +421,7 @@ lnet_new_text_buf(int str_len) } static void -lnet_free_text_buf(struct lnet_text_buf_t *ltb) +lnet_free_text_buf(struct lnet_text_buf *ltb) { lnet_tbnob -= ltb->ltb_size; LIBCFS_FREE(ltb, ltb->ltb_size); @@ -404,10 +430,10 @@ lnet_free_text_buf(struct lnet_text_buf_t *ltb) static void lnet_free_text_bufs(struct list_head *tbs) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; while (!list_empty(tbs)) { - ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list); + ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list); list_del(<b->ltb_list); lnet_free_text_buf(ltb); @@ -421,7 +447,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str) char *sep; int nob; int i; - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; INIT_LIST_HEAD(&pending); @@ -432,16 +458,16 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str) str++; /* scan for separator or comment */ - for (sep = str; *sep != 0; sep++) + for (sep = str; *sep; sep++) if (lnet_issep(*sep) || *sep == '#') break; nob = (int)(sep - str); if (nob > 0) { ltb = lnet_new_text_buf(nob); - if (ltb == NULL) { + if (!ltb) { lnet_free_text_bufs(&pending); - return -1; + return -ENOMEM; } for (i = 0; i < nob; i++) @@ -459,10 +485,10 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str) /* scan for separator */ do { sep++; - } while (*sep != 0 && !lnet_issep(*sep)); + } while (*sep && !lnet_issep(*sep)); } - if (*sep == 0) + if (!*sep) break; str = sep + 1; @@ -479,18 +505,18 @@ lnet_expand1tb(struct list_head *list, { int len1 = (int)(sep1 - str); int len2 = strlen(sep2 + 1); - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; LASSERT(*sep1 == '['); LASSERT(*sep2 == ']'); ltb = lnet_new_text_buf(len1 + itemlen + len2); - if (ltb == NULL) + if (!ltb) return -ENOMEM; memcpy(ltb->ltb_text, str, len1); memcpy(<b->ltb_text[len1], item, itemlen); - memcpy(<b->ltb_text[len1+itemlen], sep2 + 1, len2); + memcpy(<b->ltb_text[len1 + itemlen], sep2 + 1, len2); ltb->ltb_text[len1 + itemlen + len2] = 0; list_add_tail(<b->ltb_list, list); @@ -516,15 +542,14 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) INIT_LIST_HEAD(&pending); sep = strchr(str, '['); - if (sep == NULL) /* nothing to expand */ + if (!sep) /* nothing to expand */ return 0; sep2 = strchr(sep, ']'); - if (sep2 == NULL) + if (!sep2) goto failed; for (parsed = sep; parsed < sep2; parsed = enditem) { - enditem = ++parsed; while (enditem < sep2 && *enditem != ',') enditem++; @@ -534,17 +559,13 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi, &stride, &scanned) < 3) { - if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) { - /* simple string enumeration */ - if (lnet_expand1tb( - &pending, str, sep, sep2, - parsed, - (int)(enditem - parsed)) != 0) { + if (lnet_expand1tb(&pending, str, sep, sep2, + parsed, + (int)(enditem - parsed))) { goto failed; } - continue; } @@ -557,18 +578,17 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) goto failed; if (hi < 0 || lo < 0 || stride < 0 || hi < lo || - (hi - lo) % stride != 0) + (hi - lo) % stride) goto failed; for (i = lo; i <= hi; i += stride) { - snprintf(num, sizeof(num), "%d", i); nob = strlen(num); if (nob + 1 == sizeof(num)) goto failed; if (lnet_expand1tb(&pending, str, sep, sep2, - num, nob) != 0) + num, nob)) goto failed; } } @@ -578,7 +598,7 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) failed: lnet_free_text_bufs(&pending); - return -1; + return -EINVAL; } static int @@ -602,17 +622,19 @@ lnet_parse_priority(char *str, unsigned int *priority, char **token) int len; sep = strchr(str, LNET_PRIORITY_SEPARATOR); - if (sep == NULL) { + if (!sep) { *priority = 0; return 0; } len = strlen(sep + 1); - if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) { - /* Update the caller's token pointer so it treats the found - priority as the token to report in the error message. */ + if ((sscanf((sep + 1), "%u%n", priority, &nob) < 1) || (len != nob)) { + /* + * Update the caller's token pointer so it treats the found + * priority as the token to report in the error message. + */ *token += sep - str + 1; - return -1; + return -EINVAL; } CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob); @@ -636,13 +658,13 @@ lnet_parse_route(char *str, int *im_a_router) struct list_head *tmp2; __u32 net; lnet_nid_t nid; - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; int rc; char *sep; char *token = str; int ntokens = 0; int myrc = -1; - unsigned int hops; + __u32 hops; int got_hops = 0; unsigned int priority = 0; @@ -658,7 +680,7 @@ lnet_parse_route(char *str, int *im_a_router) /* scan for token start */ while (isspace(*sep)) sep++; - if (*sep == 0) { + if (!*sep) { if (ntokens < (got_hops ? 3 : 2)) goto token_error; break; @@ -668,9 +690,9 @@ lnet_parse_route(char *str, int *im_a_router) token = sep++; /* scan for token end */ - while (*sep != 0 && !isspace(*sep)) + while (*sep && !isspace(*sep)) sep++; - if (*sep != 0) + if (*sep) *sep++ = 0; if (ntokens == 1) { @@ -684,7 +706,7 @@ lnet_parse_route(char *str, int *im_a_router) } ltb = lnet_new_text_buf(strlen(token)); - if (ltb == NULL) + if (!ltb) goto out; strcpy(ltb->ltb_text, token); @@ -692,8 +714,7 @@ lnet_parse_route(char *str, int *im_a_router) list_add_tail(tmp1, tmp2); while (tmp1 != tmp2) { - ltb = list_entry(tmp1, struct lnet_text_buf_t, - ltb_list); + ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list); rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text); if (rc < 0) @@ -726,20 +747,23 @@ lnet_parse_route(char *str, int *im_a_router) } } + /** + * if there are no hops set then we want to flag this value as + * unset since hops is an optional parameter + */ if (!got_hops) - hops = 1; + hops = LNET_UNDEFINED_HOPS; LASSERT(!list_empty(&nets)); LASSERT(!list_empty(&gateways)); list_for_each(tmp1, &nets) { - ltb = list_entry(tmp1, struct lnet_text_buf_t, ltb_list); + ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list); net = libcfs_str2net(ltb->ltb_text); LASSERT(net != LNET_NIDNET(LNET_NID_ANY)); list_for_each(tmp2, &gateways) { - ltb = list_entry(tmp2, struct lnet_text_buf_t, - ltb_list); + ltb = list_entry(tmp2, struct lnet_text_buf, ltb_list); nid = libcfs_str2nid(ltb->ltb_text); LASSERT(nid != LNET_NID_ANY); @@ -749,7 +773,7 @@ lnet_parse_route(char *str, int *im_a_router) } rc = lnet_add_route(net, hops, nid, priority); - if (rc != 0) { + if (rc && rc != -EEXIST && rc != -EHOSTUNREACH) { CERROR("Can't create route to %s via %s\n", libcfs_net2str(net), libcfs_nid2str(nid)); @@ -772,10 +796,10 @@ lnet_parse_route(char *str, int *im_a_router) static int lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf *ltb; while (!list_empty(tbs)) { - ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list); + ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list); if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) { lnet_free_text_bufs(tbs); @@ -806,7 +830,7 @@ lnet_parse_routes(char *routes, int *im_a_router) rc = lnet_parse_route_tbs(&tbs, im_a_router); } - LASSERT(lnet_tbnob == 0); + LASSERT(!lnet_tbnob); return rc; } @@ -818,7 +842,7 @@ lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip) int i; rc = cfs_ip_addr_parse(token, len, &list); - if (rc != 0) + if (rc) return rc; for (rc = i = 0; !rc && i < nip; i++) @@ -851,18 +875,18 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip) /* scan for token start */ while (isspace(*sep)) sep++; - if (*sep == 0) + if (!*sep) break; token = sep++; /* scan for token end */ - while (*sep != 0 && !isspace(*sep)) + while (*sep && !isspace(*sep)) sep++; - if (*sep != 0) + if (*sep) *sep++ = 0; - if (ntokens++ == 0) { + if (!ntokens++) { net = token; continue; } @@ -876,7 +900,8 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip) return rc; } - matched |= (rc != 0); + if (rc) + matched |= 1; } if (!matched) @@ -892,12 +917,12 @@ lnet_netspec2net(char *netspec) char *bracket = strchr(netspec, '('); __u32 net; - if (bracket != NULL) + if (bracket) *bracket = 0; net = libcfs_str2net(netspec); - if (bracket != NULL) + if (bracket) *bracket = '('; return net; @@ -909,8 +934,8 @@ lnet_splitnets(char *source, struct list_head *nets) int offset = 0; int offset2; int len; - struct lnet_text_buf_t *tb; - struct lnet_text_buf_t *tb2; + struct lnet_text_buf *tb; + struct lnet_text_buf *tb2; struct list_head *t; char *sep; char *bracket; @@ -919,15 +944,13 @@ lnet_splitnets(char *source, struct list_head *nets) LASSERT(!list_empty(nets)); LASSERT(nets->next == nets->prev); /* single entry */ - tb = list_entry(nets->next, struct lnet_text_buf_t, ltb_list); + tb = list_entry(nets->next, struct lnet_text_buf, ltb_list); for (;;) { sep = strchr(tb->ltb_text, ','); bracket = strchr(tb->ltb_text, '('); - if (sep != NULL && - bracket != NULL && - bracket < sep) { + if (sep && bracket && bracket < sep) { /* netspec lists interfaces... */ offset2 = offset + (int)(bracket - tb->ltb_text); @@ -935,16 +958,16 @@ lnet_splitnets(char *source, struct list_head *nets) bracket = strchr(bracket + 1, ')'); - if (bracket == NULL || - !(bracket[1] == ',' || bracket[1] == 0)) { + if (!bracket || + !(bracket[1] == ',' || !bracket[1])) { lnet_syntax("ip2nets", source, offset2, len); return -EINVAL; } - sep = (bracket[1] == 0) ? NULL : bracket + 1; + sep = !bracket[1] ? NULL : bracket + 1; } - if (sep != NULL) + if (sep) *sep++ = 0; net = lnet_netspec2net(tb->ltb_text); @@ -955,7 +978,7 @@ lnet_splitnets(char *source, struct list_head *nets) } list_for_each(t, nets) { - tb2 = list_entry(t, struct lnet_text_buf_t, ltb_list); + tb2 = list_entry(t, struct lnet_text_buf, ltb_list); if (tb2 == tb) continue; @@ -968,13 +991,13 @@ lnet_splitnets(char *source, struct list_head *nets) } } - if (sep == NULL) + if (!sep) return 0; offset += (int)(sep - tb->ltb_text); len = strlen(sep); tb2 = lnet_new_text_buf(len); - if (tb2 == NULL) + if (!tb2) return -ENOMEM; strncpy(tb2->ltb_text, sep, len); @@ -996,8 +1019,9 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) struct list_head current_nets; struct list_head *t; struct list_head *t2; - struct lnet_text_buf_t *tb; - struct lnet_text_buf_t *tb2; + struct lnet_text_buf *tb; + struct lnet_text_buf *temp; + struct lnet_text_buf *tb2; __u32 net1; __u32 net2; int len; @@ -1008,7 +1032,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) INIT_LIST_HEAD(&raw_entries); if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) { CERROR("Error parsing ip2nets\n"); - LASSERT(lnet_tbnob == 0); + LASSERT(!lnet_tbnob); return -EINVAL; } @@ -1019,12 +1043,9 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) len = 0; rc = 0; - while (!list_empty(&raw_entries)) { - tb = list_entry(raw_entries.next, struct lnet_text_buf_t, - ltb_list); - + list_for_each_entry_safe(tb, temp, &raw_entries, ltb_list) { strncpy(source, tb->ltb_text, sizeof(source)); - source[sizeof(source)-1] = '\0'; + source[sizeof(source) - 1] = '\0'; /* replace ltb_text with the network(s) add on match */ rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip); @@ -1033,7 +1054,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) list_del(&tb->ltb_list); - if (rc == 0) { /* no match */ + if (!rc) { /* no match */ lnet_free_text_buf(tb); continue; } @@ -1047,13 +1068,13 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) dup = 0; list_for_each(t, ¤t_nets) { - tb = list_entry(t, struct lnet_text_buf_t, ltb_list); + tb = list_entry(t, struct lnet_text_buf, ltb_list); net1 = lnet_netspec2net(tb->ltb_text); LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY)); list_for_each(t2, &matched_nets) { - tb2 = list_entry(t2, struct lnet_text_buf_t, - ltb_list); + tb2 = list_entry(t2, struct lnet_text_buf, + ltb_list); net2 = lnet_netspec2net(tb2->ltb_text); LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY)); @@ -1073,13 +1094,13 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) } list_for_each_safe(t, t2, ¤t_nets) { - tb = list_entry(t, struct lnet_text_buf_t, ltb_list); + tb = list_entry(t, struct lnet_text_buf, ltb_list); list_del(&tb->ltb_list); list_add_tail(&tb->ltb_list, &matched_nets); len += snprintf(networks + len, sizeof(networks) - len, - "%s%s", (len == 0) ? "" : ",", + "%s%s", !len ? "" : ",", tb->ltb_text); if (len >= sizeof(networks)) { @@ -1096,7 +1117,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) lnet_free_text_bufs(&raw_entries); lnet_free_text_bufs(&matched_nets); lnet_free_text_bufs(¤t_nets); - LASSERT(lnet_tbnob == 0); + LASSERT(!lnet_tbnob); if (rc < 0) return rc; @@ -1122,7 +1143,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) return nif; LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs)); - if (ipaddrs == NULL) { + if (!ipaddrs) { CERROR("Can't allocate ipaddrs[%d]\n", nif); lnet_ipif_free_enumeration(ifnames, nif); return -ENOMEM; @@ -1133,7 +1154,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) continue; rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask); - if (rc != 0) { + if (rc) { CWARN("Can't query interface %s: %d\n", ifnames[i], rc); continue; @@ -1155,7 +1176,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) } else { if (nip > 0) { LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2)); - if (ipaddrs2 == NULL) { + if (!ipaddrs2) { CERROR("Can't allocate ipaddrs[%d]\n", nip); nip = -ENOMEM; } else { @@ -1184,7 +1205,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets) return nip; } - if (nip == 0) { + if (!nip) { LCONSOLE_ERROR_MSG(0x118, "No local IP interfaces for ip2nets to match\n"); return -ENOENT; @@ -1198,7 +1219,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets) return rc; } - if (rc == 0) { + if (!rc) { LCONSOLE_ERROR_MSG(0x11a, "ip2nets does not match any local IP interfaces\n"); return -ENOENT; diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c index 64f94a690..adbcadbab 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-eq.c +++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c @@ -72,33 +72,38 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, { lnet_eq_t *eq; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); - /* We need count to be a power of 2 so that when eq_{enq,deq}_seq + /* + * We need count to be a power of 2 so that when eq_{enq,deq}_seq * overflow, they don't skip entries, so the queue has the same - * apparent capacity at all times */ + * apparent capacity at all times + */ + if (count) + count = roundup_pow_of_two(count); - count = roundup_pow_of_two(count); - - if (callback != LNET_EQ_HANDLER_NONE && count != 0) + if (callback != LNET_EQ_HANDLER_NONE && count) CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); - /* count can be 0 if only need callback, we can eliminate - * overhead of enqueue event */ - if (count == 0 && callback == LNET_EQ_HANDLER_NONE) + /* + * count can be 0 if only need callback, we can eliminate + * overhead of enqueue event + */ + if (!count && callback == LNET_EQ_HANDLER_NONE) return -EINVAL; eq = lnet_eq_alloc(); - if (eq == NULL) + if (!eq) return -ENOMEM; - if (count != 0) { + if (count) { LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t)); - if (eq->eq_events == NULL) + if (!eq->eq_events) goto failed; - /* NB allocator has set all event sequence numbers to 0, - * so all them should be earlier than eq_deq_seq */ + /* + * NB allocator has set all event sequence numbers to 0, + * so all them should be earlier than eq_deq_seq + */ } eq->eq_deq_seq = 1; @@ -108,13 +113,15 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*eq->eq_refs[0])); - if (eq->eq_refs == NULL) + if (!eq->eq_refs) goto failed; /* MUST hold both exclusive lnet_res_lock */ lnet_res_lock(LNET_LOCK_EX); - /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do - * both EQ lookup and poll event with only lnet_eq_wait_lock */ + /* + * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do + * both EQ lookup and poll event with only lnet_eq_wait_lock + */ lnet_eq_wait_lock(); lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh); @@ -127,10 +134,10 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, return 0; failed: - if (eq->eq_events != NULL) + if (eq->eq_events) LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t)); - if (eq->eq_refs != NULL) + if (eq->eq_refs) cfs_percpt_free(eq->eq_refs); lnet_eq_free(eq); @@ -159,23 +166,24 @@ LNetEQFree(lnet_handle_eq_t eqh) int size = 0; int i; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); lnet_res_lock(LNET_LOCK_EX); - /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do - * both EQ lookup and poll event with only lnet_eq_wait_lock */ + /* + * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do + * both EQ lookup and poll event with only lnet_eq_wait_lock + */ lnet_eq_wait_lock(); eq = lnet_handle2eq(&eqh); - if (eq == NULL) { + if (!eq) { rc = -ENOENT; goto out; } cfs_percpt_for_each(ref, i, eq->eq_refs) { LASSERT(*ref >= 0); - if (*ref == 0) + if (!*ref) continue; CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n", @@ -196,9 +204,9 @@ LNetEQFree(lnet_handle_eq_t eqh) lnet_eq_wait_unlock(); lnet_res_unlock(LNET_LOCK_EX); - if (events != NULL) + if (events) LIBCFS_FREE(events, size * sizeof(lnet_event_t)); - if (refs != NULL) + if (refs) cfs_percpt_free(refs); return rc; @@ -211,7 +219,7 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev) /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */ int index; - if (eq->eq_size == 0) { + if (!eq->eq_size) { LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE); eq->eq_callback(ev); return; @@ -255,8 +263,10 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev) if (eq->eq_deq_seq == new_event->sequence) { rc = 1; } else { - /* don't complain with CERROR: some EQs are sized small - * anyway; if it's important, the caller should complain */ + /* + * don't complain with CERROR: some EQs are sized small + * anyway; if it's important, the caller should complain + */ CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n", eq->eq_deq_seq, new_event->sequence); rc = -EOVERFLOW; @@ -309,8 +319,8 @@ __must_hold(&the_lnet.ln_eq_wait_lock) wait_queue_t wl; unsigned long now; - if (tms == 0) - return -1; /* don't want to wait and no new event */ + if (!tms) + return -ENXIO; /* don't want to wait and no new event */ init_waitqueue_entry(&wl, current); set_current_state(TASK_INTERRUPTIBLE); @@ -320,7 +330,6 @@ __must_hold(&the_lnet.ln_eq_wait_lock) if (tms < 0) { schedule(); - } else { now = jiffies; schedule_timeout(msecs_to_jiffies(tms)); @@ -329,7 +338,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) tms = 0; } - wait = tms != 0; /* might need to call here again */ + wait = tms; /* might need to call here again */ *timeout_ms = tms; lnet_eq_wait_lock(); @@ -372,7 +381,6 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, int rc; int i; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (neq < 1) @@ -384,20 +392,20 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, for (i = 0; i < neq; i++) { lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]); - if (eq == NULL) { + if (!eq) { lnet_eq_wait_unlock(); return -ENOENT; } rc = lnet_eq_dequeue_event(eq, event); - if (rc != 0) { + if (rc) { lnet_eq_wait_unlock(); *which = i; return rc; } } - if (wait == 0) + if (!wait) break; /* diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c index 758f5bede..75d31217b 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-md.c +++ b/drivers/staging/lustre/lnet/lnet/lib-md.c @@ -46,16 +46,18 @@ void lnet_md_unlink(lnet_libmd_t *md) { - if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) { + if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) { /* first unlink attempt... */ lnet_me_t *me = md->md_me; md->md_flags |= LNET_MD_FLAG_ZOMBIE; - /* Disassociate from ME (if any), + /* + * Disassociate from ME (if any), * and unlink it if it was created - * with LNET_UNLINK */ - if (me != NULL) { + * with LNET_UNLINK + */ + if (me) { /* detach MD from portal */ lnet_ptl_detach_md(me, md); if (me->me_unlink == LNET_UNLINK) @@ -66,14 +68,14 @@ lnet_md_unlink(lnet_libmd_t *md) lnet_res_lh_invalidate(&md->md_lh); } - if (md->md_refcount != 0) { + if (md->md_refcount) { CDEBUG(D_NET, "Queueing unlink of md %p\n", md); return; } CDEBUG(D_NET, "Unlinking md %p\n", md); - if (md->md_eq != NULL) { + if (md->md_eq) { int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); LASSERT(*md->md_eq->eq_refs[cpt] > 0); @@ -103,12 +105,12 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) lmd->md_refcount = 0; lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0; - if ((umd->options & LNET_MD_IOVEC) != 0) { - - if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */ + if (umd->options & LNET_MD_IOVEC) { + if (umd->options & LNET_MD_KIOV) /* Can't specify both */ return -EINVAL; - lmd->md_niov = niov = umd->length; + niov = umd->length; + lmd->md_niov = umd->length; memcpy(lmd->md_iov.iov, umd->start, niov * sizeof(lmd->md_iov.iov[0])); @@ -123,20 +125,21 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) lmd->md_length = total_length; - if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* use max size */ + if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */ (umd->max_size < 0 || umd->max_size > total_length)) /* illegal max_size */ return -EINVAL; - } else if ((umd->options & LNET_MD_KIOV) != 0) { - lmd->md_niov = niov = umd->length; + } else if (umd->options & LNET_MD_KIOV) { + niov = umd->length; + lmd->md_niov = umd->length; memcpy(lmd->md_iov.kiov, umd->start, niov * sizeof(lmd->md_iov.kiov[0])); for (i = 0; i < (int)niov; i++) { /* We take the page pointer on trust */ if (lmd->md_iov.kiov[i].kiov_offset + - lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) + lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE) return -EINVAL; /* invalid length */ total_length += lmd->md_iov.kiov[i].kiov_len; @@ -144,17 +147,18 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) lmd->md_length = total_length; - if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */ + if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */ (umd->max_size < 0 || umd->max_size > total_length)) /* illegal max_size */ return -EINVAL; } else { /* contiguous */ lmd->md_length = umd->length; - lmd->md_niov = niov = 1; + niov = 1; + lmd->md_niov = 1; lmd->md_iov.iov[0].iov_base = umd->start; lmd->md_iov.iov[0].iov_len = umd->length; - if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */ + if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */ (umd->max_size < 0 || umd->max_size > (int)umd->length)) /* illegal max_size */ return -EINVAL; @@ -169,22 +173,26 @@ lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt) { struct lnet_res_container *container = the_lnet.ln_md_containers[cpt]; - /* NB we are passed an allocated, but inactive md. + /* + * NB we are passed an allocated, but inactive md. * if we return success, caller may lnet_md_unlink() it. * otherwise caller may only lnet_md_free() it. */ - /* This implementation doesn't know how to create START events or + /* + * This implementation doesn't know how to create START events or * disable END events. Best to LASSERT our caller is compliant so - * we find out quickly... */ - /* TODO - reevaluate what should be here in light of + * we find out quickly... + */ + /* + * TODO - reevaluate what should be here in light of * the removal of the start and end events * maybe there we shouldn't even allow LNET_EQ_NONE!) - * LASSERT (eq == NULL); + * LASSERT(!eq); */ if (!LNetHandleIsInvalid(eq_handle)) { md->md_eq = lnet_handle2eq(&eq_handle); - if (md->md_eq == NULL) + if (!md->md_eq) return -ENOENT; (*md->md_eq->eq_refs[cpt])++; @@ -208,8 +216,8 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd) * and that's all. */ umd->start = lmd->md_start; - umd->length = ((lmd->md_options & - (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ? + umd->length = !(lmd->md_options & + (LNET_MD_IOVEC | LNET_MD_KIOV)) ? lmd->md_length : lmd->md_niov; umd->threshold = lmd->md_threshold; umd->max_size = lmd->md_max_size; @@ -221,13 +229,13 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd) static int lnet_md_validate(lnet_md_t *umd) { - if (umd->start == NULL && umd->length != 0) { + if (!umd->start && umd->length) { CERROR("MD start pointer can not be NULL with length %u\n", umd->length); return -EINVAL; } - if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 && + if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) && umd->length > LNET_MAX_IOV) { CERROR("Invalid option: too many fragments %u, %d max\n", umd->length, LNET_MAX_IOV); @@ -273,41 +281,42 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd, int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); - if (lnet_md_validate(&umd) != 0) + if (lnet_md_validate(&umd)) return -EINVAL; - if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) { + if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) { CERROR("Invalid option: no MD_OP set\n"); return -EINVAL; } md = lnet_md_alloc(&umd); - if (md == NULL) + if (!md) return -ENOMEM; rc = lnet_md_build(md, &umd, unlink); cpt = lnet_cpt_of_cookie(meh.cookie); lnet_res_lock(cpt); - if (rc != 0) + if (rc) goto failed; me = lnet_handle2me(&meh); - if (me == NULL) + if (!me) rc = -ENOENT; - else if (me->me_md != NULL) + else if (me->me_md) rc = -EBUSY; else rc = lnet_md_link(md, umd.eq_handle, cpt); - if (rc != 0) + if (rc) goto failed; - /* attach this MD to portal of ME and check if it matches any - * blocked msgs on this portal */ + /* + * attach this MD to portal of ME and check if it matches any + * blocked msgs on this portal + */ lnet_ptl_attach_md(me, md, &matches, &drops); lnet_md2handle(handle, md); @@ -350,29 +359,28 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle) int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); - if (lnet_md_validate(&umd) != 0) + if (lnet_md_validate(&umd)) return -EINVAL; - if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) { + if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) { CERROR("Invalid option: GET|PUT illegal on active MDs\n"); return -EINVAL; } md = lnet_md_alloc(&umd); - if (md == NULL) + if (!md) return -ENOMEM; rc = lnet_md_build(md, &umd, unlink); cpt = lnet_res_lock_current(); - if (rc != 0) + if (rc) goto failed; rc = lnet_md_link(md, umd.eq_handle, cpt); - if (rc != 0) + if (rc) goto failed; lnet_md2handle(handle, md); @@ -425,23 +433,24 @@ LNetMDUnlink(lnet_handle_md_t mdh) lnet_libmd_t *md; int cpt; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_cpt_of_cookie(mdh.cookie); lnet_res_lock(cpt); md = lnet_handle2md(&mdh); - if (md == NULL) { + if (!md) { lnet_res_unlock(cpt); return -ENOENT; } md->md_flags |= LNET_MD_FLAG_ABORTED; - /* If the MD is busy, lnet_md_unlink just marks it for deletion, and + /* + * If the MD is busy, lnet_md_unlink just marks it for deletion, and * when the LND is done, the completion event flags that the MD was - * unlinked. Otherwise, we enqueue an event now... */ - if (md->md_eq != NULL && md->md_refcount == 0) { + * unlinked. Otherwise, we enqueue an event now... + */ + if (md->md_eq && !md->md_refcount) { lnet_build_unlink_event(md, &ev); lnet_eq_enqueue_event(md->md_eq, &ev); } diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c index 42fc99ef9..e671aed37 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-me.c +++ b/drivers/staging/lustre/lnet/lnet/lib-me.c @@ -83,7 +83,6 @@ LNetMEAttach(unsigned int portal, struct lnet_me *me; struct list_head *head; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if ((int)portal >= the_lnet.ln_nportals) @@ -91,11 +90,11 @@ LNetMEAttach(unsigned int portal, mtable = lnet_mt_of_attach(portal, match_id, match_bits, ignore_bits, pos); - if (mtable == NULL) /* can't match portal type */ + if (!mtable) /* can't match portal type */ return -EPERM; me = lnet_me_alloc(); - if (me == NULL) + if (!me) return -ENOMEM; lnet_res_lock(mtable->mt_cpt); @@ -109,7 +108,7 @@ LNetMEAttach(unsigned int portal, lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt], &me->me_lh); - if (ignore_bits != 0) + if (ignore_bits) head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE]; else head = lnet_mt_match_head(mtable, match_id, match_bits); @@ -156,14 +155,13 @@ LNetMEInsert(lnet_handle_me_t current_meh, struct lnet_portal *ptl; int cpt; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (pos == LNET_INS_LOCAL) return -EPERM; new_me = lnet_me_alloc(); - if (new_me == NULL) + if (!new_me) return -ENOMEM; cpt = lnet_cpt_of_cookie(current_meh.cookie); @@ -171,7 +169,7 @@ LNetMEInsert(lnet_handle_me_t current_meh, lnet_res_lock(cpt); current_me = lnet_handle2me(¤t_meh); - if (current_me == NULL) { + if (!current_me) { lnet_me_free(new_me); lnet_res_unlock(cpt); @@ -233,22 +231,21 @@ LNetMEUnlink(lnet_handle_me_t meh) lnet_event_t ev; int cpt; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_cpt_of_cookie(meh.cookie); lnet_res_lock(cpt); me = lnet_handle2me(&meh); - if (me == NULL) { + if (!me) { lnet_res_unlock(cpt); return -ENOENT; } md = me->me_md; - if (md != NULL) { + if (md) { md->md_flags |= LNET_MD_FLAG_ABORTED; - if (md->md_eq != NULL && md->md_refcount == 0) { + if (md->md_eq && !md->md_refcount) { lnet_build_unlink_event(md, &ev); lnet_eq_enqueue_event(md->md_eq, &ev); } @@ -267,7 +264,7 @@ lnet_me_unlink(lnet_me_t *me) { list_del(&me->me_list); - if (me->me_md != NULL) { + if (me->me_md) { lnet_libmd_t *md = me->me_md; /* detach MD from portal of this ME */ diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c index fb8f7be04..f19aa9320 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ b/drivers/staging/lustre/lnet/lnet/lib-move.c @@ -50,17 +50,16 @@ int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) { lnet_test_peer_t *tp; + lnet_test_peer_t *temp; struct list_head *el; struct list_head *next; struct list_head cull; - LASSERT(the_lnet.ln_init); - /* NB: use lnet_net_lock(0) to serialize operations on test peers */ - if (threshold != 0) { + if (threshold) { /* Adding a new entry */ LIBCFS_ALLOC(tp, sizeof(*tp)); - if (tp == NULL) + if (!tp) return -ENOMEM; tp->tp_nid = nid; @@ -80,7 +79,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) list_for_each_safe(el, next, &the_lnet.ln_test_peers) { tp = list_entry(el, lnet_test_peer_t, tp_list); - if (tp->tp_threshold == 0 || /* needs culling anyway */ + if (!tp->tp_threshold || /* needs culling anyway */ nid == LNET_NID_ANY || /* removing all entries */ tp->tp_nid == nid) { /* matched this one */ list_del(&tp->tp_list); @@ -90,9 +89,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) lnet_net_unlock(0); - while (!list_empty(&cull)) { - tp = list_entry(cull.next, lnet_test_peer_t, tp_list); - + list_for_each_entry_safe(tp, temp, &cull, tp_list) { list_del(&tp->tp_list); LIBCFS_FREE(tp, sizeof(*tp)); } @@ -103,6 +100,7 @@ static int fail_peer(lnet_nid_t nid, int outgoing) { lnet_test_peer_t *tp; + lnet_test_peer_t *temp; struct list_head *el; struct list_head *next; struct list_head cull; @@ -116,12 +114,14 @@ fail_peer(lnet_nid_t nid, int outgoing) list_for_each_safe(el, next, &the_lnet.ln_test_peers) { tp = list_entry(el, lnet_test_peer_t, tp_list); - if (tp->tp_threshold == 0) { + if (!tp->tp_threshold) { /* zombie entry */ if (outgoing) { - /* only cull zombies on outgoing tests, + /* + * only cull zombies on outgoing tests, * since we may be at interrupt priority on - * incoming messages. */ + * incoming messages. + */ list_del(&tp->tp_list); list_add(&tp->tp_list, &cull); } @@ -135,7 +135,7 @@ fail_peer(lnet_nid_t nid, int outgoing) if (tp->tp_threshold != LNET_MD_THRESH_INF) { tp->tp_threshold--; if (outgoing && - tp->tp_threshold == 0) { + !tp->tp_threshold) { /* see above */ list_del(&tp->tp_list); list_add(&tp->tp_list, &cull); @@ -147,8 +147,7 @@ fail_peer(lnet_nid_t nid, int outgoing) lnet_net_unlock(0); - while (!list_empty(&cull)) { - tp = list_entry(cull.next, lnet_test_peer_t, tp_list); + list_for_each_entry_safe(tp, temp, &cull, tp_list) { list_del(&tp->tp_list); LIBCFS_FREE(tp, sizeof(*tp)); @@ -162,6 +161,7 @@ lnet_iov_nob(unsigned int niov, struct kvec *iov) { unsigned int nob = 0; + LASSERT(!niov || iov); while (niov-- > 0) nob += (iov++)->iov_len; @@ -171,13 +171,13 @@ EXPORT_SYMBOL(lnet_iov_nob); void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, - unsigned int nsiov, struct kvec *siov, unsigned int soffset, - unsigned int nob) + unsigned int nsiov, struct kvec *siov, unsigned int soffset, + unsigned int nob) { /* NB diov, siov are READ-ONLY */ unsigned int this_nob; - if (nob == 0) + if (!nob) return; /* skip complete frags before 'doffset' */ @@ -206,7 +206,7 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, this_nob = min(this_nob, nob); memcpy((char *)diov->iov_base + doffset, - (char *)siov->iov_base + soffset, this_nob); + (char *)siov->iov_base + soffset, this_nob); nob -= this_nob; if (diov->iov_len > doffset + this_nob) { @@ -230,16 +230,18 @@ EXPORT_SYMBOL(lnet_copy_iov2iov); int lnet_extract_iov(int dst_niov, struct kvec *dst, - int src_niov, struct kvec *src, - unsigned int offset, unsigned int len) + int src_niov, struct kvec *src, + unsigned int offset, unsigned int len) { - /* Initialise 'dst' to the subset of 'src' starting at 'offset', + /* + * Initialise 'dst' to the subset of 'src' starting at 'offset', * for exactly 'len' bytes, and return the number of entries. - * NB not destructive to 'src' */ + * NB not destructive to 'src' + */ unsigned int frag_len; unsigned int niov; - if (len == 0) /* no data => */ + if (!len) /* no data => */ return 0; /* no frags */ LASSERT(src_niov > 0); @@ -280,6 +282,7 @@ lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov) { unsigned int nob = 0; + LASSERT(!niov || kiov); while (niov-- > 0) nob += (kiov++)->kiov_len; @@ -297,7 +300,7 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, char *daddr = NULL; char *saddr = NULL; - if (nob == 0) + if (!nob) return; LASSERT(!in_interrupt()); @@ -325,17 +328,18 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, siov->kiov_len - soffset); this_nob = min(this_nob, nob); - if (daddr == NULL) + if (!daddr) daddr = ((char *)kmap(diov->kiov_page)) + diov->kiov_offset + doffset; - if (saddr == NULL) + if (!saddr) saddr = ((char *)kmap(siov->kiov_page)) + siov->kiov_offset + soffset; - /* Vanishing risk of kmap deadlock when mapping 2 pages. + /* + * Vanishing risk of kmap deadlock when mapping 2 pages. * However in practice at least one of the kiovs will be mapped - * kernel pages and the map/unmap will be NOOPs */ - + * kernel pages and the map/unmap will be NOOPs + */ memcpy(daddr, saddr, this_nob); nob -= this_nob; @@ -362,9 +366,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, } } while (nob > 0); - if (daddr != NULL) + if (daddr) kunmap(diov->kiov_page); - if (saddr != NULL) + if (saddr) kunmap(siov->kiov_page); } EXPORT_SYMBOL(lnet_copy_kiov2kiov); @@ -378,7 +382,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, unsigned int this_nob; char *addr = NULL; - if (nob == 0) + if (!nob) return; LASSERT(!in_interrupt()); @@ -406,7 +410,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, (__kernel_size_t) kiov->kiov_len - kiovoffset); this_nob = min(this_nob, nob); - if (addr == NULL) + if (!addr) addr = ((char *)kmap(kiov->kiov_page)) + kiov->kiov_offset + kiovoffset; @@ -434,7 +438,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, } while (nob > 0); - if (addr != NULL) + if (addr) kunmap(kiov->kiov_page); } EXPORT_SYMBOL(lnet_copy_kiov2iov); @@ -449,7 +453,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int this_nob; char *addr = NULL; - if (nob == 0) + if (!nob) return; LASSERT(!in_interrupt()); @@ -477,7 +481,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, iov->iov_len - iovoffset); this_nob = min(this_nob, nob); - if (addr == NULL) + if (!addr) addr = ((char *)kmap(kiov->kiov_page)) + kiov->kiov_offset + kiovoffset; @@ -504,23 +508,25 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, } } while (nob > 0); - if (addr != NULL) + if (addr) kunmap(kiov->kiov_page); } EXPORT_SYMBOL(lnet_copy_iov2kiov); int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, - int src_niov, lnet_kiov_t *src, - unsigned int offset, unsigned int len) + int src_niov, lnet_kiov_t *src, + unsigned int offset, unsigned int len) { - /* Initialise 'dst' to the subset of 'src' starting at 'offset', + /* + * Initialise 'dst' to the subset of 'src' starting at 'offset', * for exactly 'len' bytes, and return the number of entries. - * NB not destructive to 'src' */ + * NB not destructive to 'src' + */ unsigned int frag_len; unsigned int niov; - if (len == 0) /* no data => */ + if (!len) /* no data => */ return 0; /* no frags */ LASSERT(src_niov > 0); @@ -543,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, if (len <= frag_len) { dst->kiov_len = len; LASSERT(dst->kiov_offset + dst->kiov_len - <= PAGE_CACHE_SIZE); + <= PAGE_SIZE); return niov; } dst->kiov_len = frag_len; - LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); + LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); len -= frag_len; dst++; @@ -560,7 +566,7 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, } EXPORT_SYMBOL(lnet_extract_kiov); -static void +void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int offset, unsigned int mlen, unsigned int rlen) { @@ -570,9 +576,9 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, int rc; LASSERT(!in_interrupt()); - LASSERT(mlen == 0 || msg != NULL); + LASSERT(!mlen || msg); - if (msg != NULL) { + if (msg) { LASSERT(msg->msg_receiving); LASSERT(!msg->msg_sending); LASSERT(rlen == msg->msg_len); @@ -582,18 +588,18 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, msg->msg_receiving = 0; - if (mlen != 0) { + if (mlen) { niov = msg->msg_niov; iov = msg->msg_iov; kiov = msg->msg_kiov; LASSERT(niov > 0); - LASSERT((iov == NULL) != (kiov == NULL)); + LASSERT(!iov != !kiov); } } - rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed, - niov, iov, kiov, offset, mlen, rlen); + rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed, + niov, iov, kiov, offset, mlen, rlen); if (rc < 0) lnet_finalize(ni, msg, rc); } @@ -605,13 +611,13 @@ lnet_setpayloadbuffer(lnet_msg_t *msg) LASSERT(msg->msg_len > 0); LASSERT(!msg->msg_routing); - LASSERT(md != NULL); - LASSERT(msg->msg_niov == 0); - LASSERT(msg->msg_iov == NULL); - LASSERT(msg->msg_kiov == NULL); + LASSERT(md); + LASSERT(!msg->msg_niov); + LASSERT(!msg->msg_iov); + LASSERT(!msg->msg_kiov); msg->msg_niov = md->md_niov; - if ((md->md_options & LNET_MD_KIOV) != 0) + if (md->md_options & LNET_MD_KIOV) msg->msg_kiov = md->md_iov.kiov; else msg->msg_iov = md->md_iov.iov; @@ -626,7 +632,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target, msg->msg_len = len; msg->msg_offset = offset; - if (len != 0) + if (len) lnet_setpayloadbuffer(msg); memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr)); @@ -646,9 +652,9 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg) LASSERT(!in_interrupt()); LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND || - (msg->msg_txcredit && msg->msg_peertxcredit)); + (msg->msg_txcredit && msg->msg_peertxcredit)); - rc = (ni->ni_lnd->lnd_send)(ni, priv, msg); + rc = ni->ni_lnd->lnd_send(ni, priv, msg); if (rc < 0) lnet_finalize(ni, msg, rc); } @@ -661,12 +667,12 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg) LASSERT(!msg->msg_sending); LASSERT(msg->msg_receiving); LASSERT(!msg->msg_rx_ready_delay); - LASSERT(ni->ni_lnd->lnd_eager_recv != NULL); + LASSERT(ni->ni_lnd->lnd_eager_recv); msg->msg_rx_ready_delay = 1; - rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg, - &msg->msg_private); - if (rc != 0) { + rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg, + &msg->msg_private); + if (rc) { CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n", libcfs_nid2str(msg->msg_rxpeer->lp_nid), libcfs_id2str(msg->msg_target), rc); @@ -683,15 +689,15 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp) unsigned long last_alive = 0; LASSERT(lnet_peer_aliveness_enabled(lp)); - LASSERT(ni->ni_lnd->lnd_query != NULL); + LASSERT(ni->ni_lnd->lnd_query); lnet_net_unlock(lp->lp_cpt); - (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive); + ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive); lnet_net_lock(lp->lp_cpt); lp->lp_last_query = cfs_time_current(); - if (last_alive != 0) /* NI has updated timestamp */ + if (last_alive) /* NI has updated timestamp */ lp->lp_last_alive = last_alive; } @@ -720,14 +726,16 @@ lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now) * case, and moreover lp_last_alive at peer creation is assumed. */ if (alive && !lp->lp_alive && - !(lnet_isrouter(lp) && lp->lp_alive_count == 0)) + !(lnet_isrouter(lp) && !lp->lp_alive_count)) lnet_notify_locked(lp, 0, 1, lp->lp_last_alive); return alive; } -/* NB: returns 1 when alive, 0 when dead, negative when error; - * may drop the lnet_net_lock */ +/* + * NB: returns 1 when alive, 0 when dead, negative when error; + * may drop the lnet_net_lock + */ static int lnet_peer_alive_locked(lnet_peer_t *lp) { @@ -739,9 +747,11 @@ lnet_peer_alive_locked(lnet_peer_t *lp) if (lnet_peer_is_alive(lp, now)) return 1; - /* Peer appears dead, but we should avoid frequent NI queries (at - * most once per lnet_queryinterval seconds). */ - if (lp->lp_last_query != 0) { + /* + * Peer appears dead, but we should avoid frequent NI queries (at + * most once per lnet_queryinterval seconds). + */ + if (lp->lp_last_query) { static const int lnet_queryinterval = 1; unsigned long next_query = @@ -775,10 +785,10 @@ lnet_peer_alive_locked(lnet_peer_t *lp) * lnet_send() is going to lnet_net_unlock immediately after this, so * it sets do_send FALSE and I don't do the unlock/send/lock bit. * - * \retval 0 If \a msg sent or OK to send. - * \retval EAGAIN If \a msg blocked for credit. - * \retval EHOSTUNREACH If the next hop of the message appears dead. - * \retval ECANCELED If the MD of the message has been unlinked. + * \retval LNET_CREDIT_OK If \a msg sent or OK to send. + * \retval LNET_CREDIT_WAIT If \a msg blocked for credit. + * \retval -EHOSTUNREACH If the next hop of the message appears dead. + * \retval -ECANCELED If the MD of the message has been unlinked. */ static int lnet_post_send_locked(lnet_msg_t *msg, int do_send) @@ -794,8 +804,8 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) LASSERT(msg->msg_tx_committed); /* NB 'lp' is always the next hop */ - if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 && - lnet_peer_alive_locked(lp) == 0) { + if (!(msg->msg_target.pid & LNET_PID_USERFLAG) && + !lnet_peer_alive_locked(lp)) { the_lnet.ln_counters[cpt]->drop_count++; the_lnet.ln_counters[cpt]->drop_length += msg->msg_len; lnet_net_unlock(cpt); @@ -806,11 +816,11 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) lnet_finalize(ni, msg, -EHOSTUNREACH); lnet_net_lock(cpt); - return EHOSTUNREACH; + return -EHOSTUNREACH; } - if (msg->msg_md != NULL && - (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) { + if (msg->msg_md && + (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) { lnet_net_unlock(cpt); CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n", @@ -819,12 +829,12 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) lnet_finalize(ni, msg, -ECANCELED); lnet_net_lock(cpt); - return ECANCELED; + return -ECANCELED; } if (!msg->msg_peertxcredit) { LASSERT((lp->lp_txcredits < 0) == - !list_empty(&lp->lp_txq)); + !list_empty(&lp->lp_txq)); msg->msg_peertxcredit = 1; lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t); @@ -836,7 +846,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) if (lp->lp_txcredits < 0) { msg->msg_tx_delayed = 1; list_add_tail(&msg->msg_list, &lp->lp_txq); - return EAGAIN; + return LNET_CREDIT_WAIT; } } @@ -853,7 +863,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) if (tq->tq_credits < 0) { msg->msg_tx_delayed = 1; list_add_tail(&msg->msg_list, &tq->tq_delayed); - return EAGAIN; + return LNET_CREDIT_WAIT; } } @@ -862,7 +872,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) lnet_ni_send(ni, msg); lnet_net_lock(cpt); } - return 0; + return LNET_CREDIT_OK; } static lnet_rtrbufpool_t * @@ -877,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg) rbp = &the_lnet.ln_rtrpools[cpt][0]; LASSERT(msg->msg_len <= LNET_MTU); - while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { + while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) { rbp++; LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); } @@ -888,16 +898,19 @@ lnet_msg2bufpool(lnet_msg_t *msg) static int lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) { - /* lnet_parse is going to lnet_net_unlock immediately after this, so it - * sets do_recv FALSE and I don't do the unlock/send/lock bit. I - * return EAGAIN if msg blocked and 0 if received or OK to receive */ + /* + * lnet_parse is going to lnet_net_unlock immediately after this, so it + * sets do_recv FALSE and I don't do the unlock/send/lock bit. + * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if + * received or OK to receive + */ lnet_peer_t *lp = msg->msg_rxpeer; lnet_rtrbufpool_t *rbp; lnet_rtrbuf_t *rb; - LASSERT(msg->msg_iov == NULL); - LASSERT(msg->msg_kiov == NULL); - LASSERT(msg->msg_niov == 0); + LASSERT(!msg->msg_iov); + LASSERT(!msg->msg_kiov); + LASSERT(!msg->msg_niov); LASSERT(msg->msg_routing); LASSERT(msg->msg_receiving); LASSERT(!msg->msg_sending); @@ -907,7 +920,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) if (!msg->msg_peerrtrcredit) { LASSERT((lp->lp_rtrcredits < 0) == - !list_empty(&lp->lp_rtrq)); + !list_empty(&lp->lp_rtrq)); msg->msg_peerrtrcredit = 1; lp->lp_rtrcredits--; @@ -919,16 +932,13 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) LASSERT(msg->msg_rx_ready_delay); msg->msg_rx_delayed = 1; list_add_tail(&msg->msg_list, &lp->lp_rtrq); - return EAGAIN; + return LNET_CREDIT_WAIT; } } rbp = lnet_msg2bufpool(msg); if (!msg->msg_rtrcredit) { - LASSERT((rbp->rbp_credits < 0) == - !list_empty(&rbp->rbp_msgs)); - msg->msg_rtrcredit = 1; rbp->rbp_credits--; if (rbp->rbp_credits < rbp->rbp_mincredits) @@ -939,7 +949,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) LASSERT(msg->msg_rx_ready_delay); msg->msg_rx_delayed = 1; list_add_tail(&msg->msg_list, &rbp->rbp_msgs); - return EAGAIN; + return LNET_CREDIT_WAIT; } } @@ -958,7 +968,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) 0, msg->msg_len, msg->msg_len); lnet_net_lock(cpt); } - return 0; + return LNET_CREDIT_OK; } void @@ -980,7 +990,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) tq->tq_credits++; if (tq->tq_credits <= 0) { msg2 = list_entry(tq->tq_delayed.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg2->msg_list); LASSERT(msg2->msg_txpeer->lp_ni == ni); @@ -1003,7 +1013,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) txpeer->lp_txcredits++; if (txpeer->lp_txcredits <= 0) { msg2 = list_entry(txpeer->lp_txq.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg2->msg_list); LASSERT(msg2->msg_txpeer == txpeer); @@ -1013,12 +1023,49 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) } } - if (txpeer != NULL) { + if (txpeer) { msg->msg_txpeer = NULL; lnet_peer_decref_locked(txpeer); } } +void +lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp) +{ + lnet_msg_t *msg; + + if (list_empty(&rbp->rbp_msgs)) + return; + msg = list_entry(rbp->rbp_msgs.next, + lnet_msg_t, msg_list); + list_del(&msg->msg_list); + + (void)lnet_post_routed_recv_locked(msg, 1); +} + +void +lnet_drop_routed_msgs_locked(struct list_head *list, int cpt) +{ + struct list_head drop; + lnet_msg_t *msg; + lnet_msg_t *tmp; + + INIT_LIST_HEAD(&drop); + + list_splice_init(list, &drop); + + lnet_net_unlock(cpt); + + list_for_each_entry_safe(msg, tmp, &drop, msg_list) { + lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL, + 0, 0, 0, msg->msg_hdr.payload_length); + list_del_init(&msg->msg_list); + lnet_finalize(NULL, msg, -ECANCELED); + } + + lnet_net_lock(cpt); +} + void lnet_return_rx_credits_locked(lnet_msg_t *msg) { @@ -1030,34 +1077,51 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg) lnet_rtrbuf_t *rb; lnet_rtrbufpool_t *rbp; - /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays + /* + * NB If a msg ever blocks for a buffer in rbp_msgs, it stays * there until it gets one allocated, or aborts the wait - * itself */ - LASSERT(msg->msg_kiov != NULL); + * itself + */ + LASSERT(msg->msg_kiov); rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]); rbp = rb->rb_pool; - LASSERT(rbp == lnet_msg2bufpool(msg)); msg->msg_kiov = NULL; msg->msg_rtrcredit = 0; - LASSERT((rbp->rbp_credits < 0) == - !list_empty(&rbp->rbp_msgs)); + LASSERT(rbp == lnet_msg2bufpool(msg)); + LASSERT((rbp->rbp_credits > 0) == !list_empty(&rbp->rbp_bufs)); - list_add(&rb->rb_list, &rbp->rbp_bufs); - rbp->rbp_credits++; - if (rbp->rbp_credits <= 0) { - msg2 = list_entry(rbp->rbp_msgs.next, - lnet_msg_t, msg_list); - list_del(&msg2->msg_list); + /* + * If routing is now turned off, we just drop this buffer and + * don't bother trying to return credits. + */ + if (!the_lnet.ln_routing) { + lnet_destroy_rtrbuf(rb, rbp->rbp_npages); + goto routing_off; + } - (void) lnet_post_routed_recv_locked(msg2, 1); + /* + * It is possible that a user has lowered the desired number of + * buffers in this pool. Make sure we never put back + * more buffers than the stated number. + */ + if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) { + /* Discard this buffer so we don't have too many. */ + lnet_destroy_rtrbuf(rb, rbp->rbp_npages); + rbp->rbp_nbuffers--; + } else { + list_add(&rb->rb_list, &rbp->rbp_bufs); + rbp->rbp_credits++; + if (rbp->rbp_credits <= 0) + lnet_schedule_blocked_locked(rbp); } } +routing_off: if (msg->msg_peerrtrcredit) { /* give back peer router credits */ msg->msg_peerrtrcredit = 0; @@ -1066,15 +1130,22 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg) !list_empty(&rxpeer->lp_rtrq)); rxpeer->lp_rtrcredits++; - if (rxpeer->lp_rtrcredits <= 0) { + /* + * drop all messages which are queued to be routed on that + * peer. + */ + if (!the_lnet.ln_routing) { + lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq, + msg->msg_rx_cpt); + } else if (rxpeer->lp_rtrcredits <= 0) { msg2 = list_entry(rxpeer->lp_rtrq.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg2->msg_list); (void) lnet_post_routed_recv_locked(msg2, 1); } } - if (rxpeer != NULL) { + if (rxpeer) { msg->msg_rxpeer = NULL; lnet_peer_decref_locked(rxpeer); } @@ -1085,94 +1156,99 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2) { lnet_peer_t *p1 = r1->lr_gateway; lnet_peer_t *p2 = r2->lr_gateway; + int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops; + int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops; if (r1->lr_priority < r2->lr_priority) return 1; if (r1->lr_priority > r2->lr_priority) - return -1; + return -ERANGE; - if (r1->lr_hops < r2->lr_hops) + if (r1_hops < r2_hops) return 1; - if (r1->lr_hops > r2->lr_hops) - return -1; + if (r1_hops > r2_hops) + return -ERANGE; if (p1->lp_txqnob < p2->lp_txqnob) return 1; if (p1->lp_txqnob > p2->lp_txqnob) - return -1; + return -ERANGE; if (p1->lp_txcredits > p2->lp_txcredits) return 1; if (p1->lp_txcredits < p2->lp_txcredits) - return -1; + return -ERANGE; if (r1->lr_seq - r2->lr_seq <= 0) return 1; - return -1; + return -ERANGE; } static lnet_peer_t * lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid) { lnet_remotenet_t *rnet; - lnet_route_t *rtr; - lnet_route_t *rtr_best; - lnet_route_t *rtr_last; + lnet_route_t *route; + lnet_route_t *best_route; + lnet_route_t *last_route; struct lnet_peer *lp_best; struct lnet_peer *lp; int rc; - /* If @rtr_nid is not LNET_NID_ANY, return the gateway with - * rtr_nid nid, otherwise find the best gateway I can use */ - + /* + * If @rtr_nid is not LNET_NID_ANY, return the gateway with + * rtr_nid nid, otherwise find the best gateway I can use + */ rnet = lnet_find_net_locked(LNET_NIDNET(target)); - if (rnet == NULL) + if (!rnet) return NULL; lp_best = NULL; - rtr_best = rtr_last = NULL; - list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) { - lp = rtr->lr_gateway; + best_route = NULL; + last_route = NULL; + list_for_each_entry(route, &rnet->lrn_routes, lr_list) { + lp = route->lr_gateway; - if (!lp->lp_alive || /* gateway is down */ - ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 && - rtr->lr_downis != 0)) /* NI to target is down */ + if (!lnet_is_route_alive(route)) continue; - if (ni != NULL && lp->lp_ni != ni) + if (ni && lp->lp_ni != ni) continue; if (lp->lp_nid == rtr_nid) /* it's pre-determined router */ return lp; - if (lp_best == NULL) { - rtr_best = rtr_last = rtr; + if (!lp_best) { + best_route = route; + last_route = route; lp_best = lp; continue; } /* no protection on below fields, but it's harmless */ - if (rtr_last->lr_seq - rtr->lr_seq < 0) - rtr_last = rtr; + if (last_route->lr_seq - route->lr_seq < 0) + last_route = route; - rc = lnet_compare_routes(rtr, rtr_best); + rc = lnet_compare_routes(route, best_route); if (rc < 0) continue; - rtr_best = rtr; + best_route = route; lp_best = lp; } - /* set sequence number on the best router to the latest sequence + 1 + /* + * set sequence number on the best router to the latest sequence + 1 * so we can round-robin all routers, it's race and inaccurate but - * harmless and functional */ - if (rtr_best != NULL) - rtr_best->lr_seq = rtr_last->lr_seq + 1; + * harmless and functional + */ + if (best_route) + best_route->lr_seq = last_route->lr_seq + 1; return lp_best; } @@ -1187,11 +1263,13 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) int cpt2; int rc; - /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases, + /* + * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases, * but we might want to use pre-determined router for ACK/REPLY - * in the future */ - /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */ - LASSERT(msg->msg_txpeer == NULL); + * in the future + */ + /* NB: ni == interface pre-determined (ACK/REPLY) */ + LASSERT(!msg->msg_txpeer); LASSERT(!msg->msg_sending); LASSERT(!msg->msg_target_is_router); LASSERT(!msg->msg_receiving); @@ -1212,7 +1290,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) src_ni = NULL; } else { src_ni = lnet_nid2ni_locked(src_nid, cpt); - if (src_ni == NULL) { + if (!src_ni) { lnet_net_unlock(cpt); LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n", libcfs_nid2str(dst_nid), @@ -1225,8 +1303,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) /* Is this for someone on a local network? */ local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt); - if (local_ni != NULL) { - if (src_ni == NULL) { + if (local_ni) { + if (!src_ni) { src_ni = local_ni; src_nid = src_ni->ni_nid; } else if (src_ni == local_ni) { @@ -1261,7 +1339,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) rc = lnet_nid2peer_locked(&lp, dst_nid, cpt); /* lp has ref on src_ni; lose mine */ lnet_ni_decref_locked(src_ni, cpt); - if (rc != 0) { + if (rc) { lnet_net_unlock(cpt); LCONSOLE_WARN("Error %d finding peer %s\n", rc, libcfs_nid2str(dst_nid)); @@ -1272,8 +1350,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) } else { /* sending to a remote network */ lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid); - if (lp == NULL) { - if (src_ni != NULL) + if (!lp) { + if (src_ni) lnet_ni_decref_locked(src_ni, cpt); lnet_net_unlock(cpt); @@ -1283,14 +1361,16 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) return -EHOSTUNREACH; } - /* rtr_nid is LNET_NID_ANY or NID of pre-determined router, + /* + * rtr_nid is LNET_NID_ANY or NID of pre-determined router, * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't * pre-determined router, this can happen if router table - * was changed when we release the lock */ + * was changed when we release the lock + */ if (rtr_nid != lp->lp_nid) { cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid); if (cpt2 != cpt) { - if (src_ni != NULL) + if (src_ni) lnet_ni_decref_locked(src_ni, cpt); lnet_net_unlock(cpt); @@ -1304,7 +1384,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid), lnet_msgtyp2str(msg->msg_type), msg->msg_len); - if (src_ni == NULL) { + if (!src_ni) { src_ni = lp->lp_ni; src_nid = src_ni->ni_nid; } else { @@ -1324,30 +1404,30 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) msg->msg_target_is_router = 1; msg->msg_target.nid = lp->lp_nid; - msg->msg_target.pid = LUSTRE_SRV_LNET_PID; + msg->msg_target.pid = LNET_PID_LUSTRE; } /* 'lp' is our best choice of peer */ LASSERT(!msg->msg_peertxcredit); LASSERT(!msg->msg_txcredit); - LASSERT(msg->msg_txpeer == NULL); + LASSERT(!msg->msg_txpeer); msg->msg_txpeer = lp; /* msg takes my ref on lp */ rc = lnet_post_send_locked(msg, 0); lnet_net_unlock(cpt); - if (rc == EHOSTUNREACH || rc == ECANCELED) - return -rc; + if (rc < 0) + return rc; - if (rc == 0) + if (rc == LNET_CREDIT_OK) lnet_ni_send(src_ni, msg); - return 0; /* rc == 0 or EAGAIN */ + return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */ } -static void +void lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob) { lnet_net_lock(cpt); @@ -1363,15 +1443,17 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg) { lnet_hdr_t *hdr = &msg->msg_hdr; - if (msg->msg_wanted != 0) + if (msg->msg_wanted) lnet_setpayloadbuffer(msg); lnet_build_msg_event(msg, LNET_EVENT_PUT); - /* Must I ACK? If so I'll grab the ack_wmd out of the header and put - * it back into the ACK during lnet_finalize() */ - msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) && - (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0); + /* + * Must I ACK? If so I'll grab the ack_wmd out of the header and put + * it back into the ACK during lnet_finalize() + */ + msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) && + !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE); lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed, msg->msg_offset, msg->msg_wanted, hdr->payload_length); @@ -1382,6 +1464,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) { lnet_hdr_t *hdr = &msg->msg_hdr; struct lnet_match_info info; + bool ready_delay; int rc; /* Convert put fields to host byte order */ @@ -1397,7 +1480,8 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) info.mi_roffset = hdr->msg.put.offset; info.mi_mbits = hdr->msg.put.match_bits; - msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL; + msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv; + ready_delay = msg->msg_rx_ready_delay; again: rc = lnet_ptl_match_md(&info, msg); @@ -1410,12 +1494,18 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) return 0; case LNET_MATCHMD_NONE: - if (msg->msg_rx_delayed) /* attached on delayed list */ + /** + * no eager_recv or has already called it, should + * have been attached on delayed list + */ + if (ready_delay) return 0; rc = lnet_ni_eager_recv(ni, msg); - if (rc == 0) + if (!rc) { + ready_delay = true; goto again; + } /* fall through */ case LNET_MATCHMD_DROP: @@ -1423,7 +1513,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) libcfs_id2str(info.mi_id), info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength, rc); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } } @@ -1454,7 +1544,7 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get) CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n", libcfs_id2str(info.mi_id), info.mi_portal, info.mi_mbits, info.mi_roffset, info.mi_rlength); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } LASSERT(rc == LNET_MATCHMD_OK); @@ -1510,33 +1600,33 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg) /* NB handles only looked up by creator (no flips) */ md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - (md == NULL) ? "invalid" : "inactive", + !md ? "invalid" : "inactive", hdr->msg.reply.dst_wmd.wh_interface_cookie, hdr->msg.reply.dst_wmd.wh_object_cookie); - if (md != NULL && md->md_me != NULL) + if (md && md->md_me) CERROR("REPLY MD also attached to portal %d\n", md->md_me->me_portal); lnet_res_unlock(cpt); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } - LASSERT(md->md_offset == 0); + LASSERT(!md->md_offset); rlength = hdr->payload_length; mlength = min_t(uint, rlength, md->md_length); if (mlength < rlength && - (md->md_options & LNET_MD_TRUNCATE) == 0) { + !(md->md_options & LNET_MD_TRUNCATE)) { CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), rlength, hdr->msg.reply.dst_wmd.wh_object_cookie, mlength); lnet_res_unlock(cpt); - return ENOENT; /* +ve: OK but no match */ + return -ENOENT; /* -ve: OK but no match */ } CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n", @@ -1545,7 +1635,7 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg) lnet_msg_attach_md(msg, md, 0, mlength); - if (mlength != 0) + if (mlength) lnet_setpayloadbuffer(msg); lnet_res_unlock(cpt); @@ -1576,20 +1666,20 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg) /* NB handles only looked up by creator (no flips) */ md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { /* Don't moan; this is expected */ CDEBUG(D_NET, "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(src), - (md == NULL) ? "invalid" : "inactive", + !md ? "invalid" : "inactive", hdr->msg.ack.dst_wmd.wh_interface_cookie, hdr->msg.ack.dst_wmd.wh_object_cookie); - if (md != NULL && md->md_me != NULL) + if (md && md->md_me) CERROR("Source MD also attached to portal %d\n", md->md_me->me_portal); lnet_res_unlock(cpt); - return ENOENT; /* +ve! */ + return -ENOENT; /* -ve! */ } CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n", @@ -1606,14 +1696,22 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg) return 0; } -static int +/** + * \retval LNET_CREDIT_OK If \a msg is forwarded + * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer + * \retval -ve error code + */ +int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg) { int rc = 0; + if (!the_lnet.ln_routing) + return -ECANCELED; + if (msg->msg_rxpeer->lp_rtrcredits <= 0 || lnet_msg2bufpool(msg)->rbp_credits <= 0) { - if (ni->ni_lnd->lnd_eager_recv == NULL) { + if (!ni->ni_lnd->lnd_eager_recv) { msg->msg_rx_ready_delay = 1; } else { lnet_net_unlock(msg->msg_rx_cpt); @@ -1622,11 +1720,38 @@ lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg) } } - if (rc == 0) + if (!rc) rc = lnet_post_routed_recv_locked(msg, 0); return rc; } +int +lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg) +{ + int rc; + + switch (msg->msg_type) { + case LNET_MSG_ACK: + rc = lnet_parse_ack(ni, msg); + break; + case LNET_MSG_PUT: + rc = lnet_parse_put(ni, msg); + break; + case LNET_MSG_GET: + rc = lnet_parse_get(ni, msg, msg->msg_rdma_get); + break; + case LNET_MSG_REPLY: + rc = lnet_parse_reply(ni, msg); + break; + default: /* prevent an unused label if !kernel */ + LASSERT(0); + return -EPROTO; + } + + LASSERT(!rc || rc == -ENOENT); + return rc; +} + char * lnet_msgtyp2str(int type) { @@ -1702,7 +1827,6 @@ lnet_print_hdr(lnet_hdr_t *hdr) hdr->msg.reply.dst_wmd.wh_object_cookie, hdr->payload_length); } - } int @@ -1765,20 +1889,20 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, if (the_lnet.ln_routing && ni->ni_last_alive != ktime_get_real_seconds()) { - lnet_ni_lock(ni); - /* NB: so far here is the only place to set NI status to "up */ + lnet_ni_lock(ni); ni->ni_last_alive = ktime_get_real_seconds(); - if (ni->ni_status != NULL && + if (ni->ni_status && ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) ni->ni_status->ns_status = LNET_NI_STATUS_UP; lnet_ni_unlock(ni); } - /* Regard a bad destination NID as a protocol error. Senders should + /* + * Regard a bad destination NID as a protocol error. Senders should * know what they're doing; if they don't they're misconfigured, buggy - * or malicious so we chop them off at the knees :) */ - + * or malicious so we chop them off at the knees :) + */ if (!for_me) { if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) { /* should have gone direct */ @@ -1790,8 +1914,10 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, } if (lnet_islocalnid(dest_nid)) { - /* dest is another local NI; sender should have used - * this node's NID on its own network */ + /* + * dest is another local NI; sender should have used + * this node's NID on its own network + */ CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), @@ -1816,9 +1942,10 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, } } - /* Message looks OK; we're not going to return an error, so we MUST - * call back lnd_recv() come what may... */ - + /* + * Message looks OK; we're not going to return an error, so we MUST + * call back lnd_recv() come what may... + */ if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ fail_peer(src_nid, 0)) { /* shall we now? */ CERROR("%s, src %s: Dropping %s to simulate failure\n", @@ -1827,8 +1954,16 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, goto drop; } + if (!list_empty(&the_lnet.ln_drop_rules) && + lnet_drop_rule_match(hdr)) { + CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n", + libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), + libcfs_nid2str(dest_nid), lnet_msgtyp2str(type)); + goto drop; + } + msg = lnet_msg_alloc(); - if (msg == NULL) { + if (!msg) { CERROR("%s, src %s: Dropping %s (out of memory)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), lnet_msgtyp2str(type)); @@ -1838,11 +1973,12 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, /* msg zeroed in lnet_msg_alloc; * i.e. flags all clear, pointers NULL etc */ - msg->msg_type = type; msg->msg_private = private; msg->msg_receiving = 1; - msg->msg_len = msg->msg_wanted = payload_length; + msg->msg_rdma_get = rdma_req; + msg->msg_wanted = payload_length; + msg->msg_len = payload_length; msg->msg_offset = 0; msg->msg_hdr = *hdr; /* for building message event */ @@ -1864,7 +2000,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, lnet_net_lock(cpt); rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt); - if (rc != 0) { + if (rc) { lnet_net_unlock(cpt); CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n", libcfs_nid2str(from_nid), libcfs_nid2str(src_nid), @@ -1888,13 +2024,21 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, lnet_msg_commit(msg, cpt); + /* message delay simulation */ + if (unlikely(!list_empty(&the_lnet.ln_delay_rules) && + lnet_delay_rule_match_locked(hdr, msg))) { + lnet_net_unlock(cpt); + return 0; + } + if (!for_me) { rc = lnet_parse_forward_locked(ni, msg); lnet_net_unlock(cpt); if (rc < 0) goto free_drop; - if (rc == 0) { + + if (rc == LNET_CREDIT_OK) { lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, payload_length, payload_length); } @@ -1903,32 +2047,13 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, lnet_net_unlock(cpt); - switch (type) { - case LNET_MSG_ACK: - rc = lnet_parse_ack(ni, msg); - break; - case LNET_MSG_PUT: - rc = lnet_parse_put(ni, msg); - break; - case LNET_MSG_GET: - rc = lnet_parse_get(ni, msg, rdma_req); - break; - case LNET_MSG_REPLY: - rc = lnet_parse_reply(ni, msg); - break; - default: - LASSERT(0); - rc = -EPROTO; - goto free_drop; /* prevent an unused label if !kernel */ - } - - if (rc == 0) - return 0; - - LASSERT(rc == ENOENT); + rc = lnet_parse_local(ni, msg); + if (rc) + goto free_drop; + return 0; free_drop: - LASSERT(msg->msg_md == NULL); + LASSERT(!msg->msg_md); lnet_finalize(ni, msg, rc); drop: @@ -1950,9 +2075,9 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason) id.nid = msg->msg_hdr.src_nid; id.pid = msg->msg_hdr.src_pid; - LASSERT(msg->msg_md == NULL); + LASSERT(!msg->msg_md); LASSERT(msg->msg_rx_delayed); - LASSERT(msg->msg_rxpeer != NULL); + LASSERT(msg->msg_rxpeer); LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n", @@ -1962,10 +2087,11 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason) msg->msg_hdr.msg.put.offset, msg->msg_hdr.payload_length, reason); - /* NB I can't drop msg's ref on msg_rxpeer until after I've + /* + * NB I can't drop msg's ref on msg_rxpeer until after I've * called lnet_drop_message(), so I just hang onto msg as well - * until that's done */ - + * until that's done + */ lnet_drop_message(msg->msg_rxpeer->lp_ni, msg->msg_rxpeer->lp_cpt, msg->msg_private, msg->msg_len); @@ -1988,15 +2114,16 @@ lnet_recv_delayed_msg_list(struct list_head *head) msg = list_entry(head->next, lnet_msg_t, msg_list); list_del(&msg->msg_list); - /* md won't disappear under me, since each msg - * holds a ref on it */ - + /* + * md won't disappear under me, since each msg + * holds a ref on it + */ id.nid = msg->msg_hdr.src_nid; id.pid = msg->msg_hdr.src_pid; LASSERT(msg->msg_rx_delayed); - LASSERT(msg->msg_md != NULL); - LASSERT(msg->msg_rxpeer != NULL); + LASSERT(msg->msg_md); + LASSERT(msg->msg_rxpeer); LASSERT(msg->msg_hdr.type == LNET_MSG_PUT); CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n", @@ -2064,7 +2191,6 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ @@ -2075,7 +2201,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, } msg = lnet_msg_alloc(); - if (msg == NULL) { + if (!msg) { CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n", libcfs_id2str(target)); return -ENOMEM; @@ -2086,11 +2212,11 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, lnet_res_lock(cpt); md = lnet_handle2md(&mdh); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n", match_bits, portal, libcfs_id2str(target), - md == NULL ? -1 : md->md_threshold); - if (md != NULL && md->md_me != NULL) + !md ? -1 : md->md_threshold); + if (md && md->md_me) CERROR("Source MD also attached to portal %d\n", md->md_me->me_portal); lnet_res_unlock(cpt); @@ -2128,9 +2254,9 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, lnet_build_msg_event(msg, LNET_EVENT_SEND); rc = lnet_send(self, msg, LNET_NID_ANY); - if (rc != 0) { + if (rc) { CNETERR("Error sending PUT to %s: %d\n", - libcfs_id2str(target), rc); + libcfs_id2str(target), rc); lnet_finalize(NULL, msg, rc); } @@ -2142,13 +2268,14 @@ EXPORT_SYMBOL(LNetPut); lnet_msg_t * lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg) { - /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This + /* + * The LND can DMA direct to the GET md (i.e. no REPLY msg). This * returns a msg for the LND to pass to lnet_finalize() when the sink * data has been received. * * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when - * lnet_finalize() is called on it, so the LND must call this first */ - + * lnet_finalize() is called on it, so the LND must call this first + */ struct lnet_msg *msg = lnet_msg_alloc(); struct lnet_libmd *getmd = getmsg->msg_md; lnet_process_id_t peer_id = getmsg->msg_target; @@ -2157,26 +2284,26 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg) LASSERT(!getmsg->msg_target_is_router); LASSERT(!getmsg->msg_routing); + if (!msg) { + CERROR("%s: Dropping REPLY from %s: can't allocate msg\n", + libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id)); + goto drop; + } + cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie); lnet_res_lock(cpt); LASSERT(getmd->md_refcount > 0); - if (msg == NULL) { - CERROR("%s: Dropping REPLY from %s: can't allocate msg\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id)); - goto drop; - } - - if (getmd->md_threshold == 0) { + if (!getmd->md_threshold) { CERROR("%s: Dropping REPLY from %s for inactive MD %p\n", - libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), - getmd); + libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), + getmd); lnet_res_unlock(cpt); goto drop; } - LASSERT(getmd->md_offset == 0); + LASSERT(!getmd->md_offset); CDEBUG(D_NET, "%s: Reply from %s md %p\n", libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd); @@ -2209,7 +2336,7 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg) the_lnet.ln_counters[cpt]->drop_length += getmd->md_length; lnet_net_unlock(cpt); - if (msg != NULL) + if (msg) lnet_msg_free(msg); return NULL; @@ -2219,14 +2346,18 @@ EXPORT_SYMBOL(lnet_create_reply_msg); void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len) { - /* Set the REPLY length, now the RDMA that elides the REPLY message has - * completed and I know it. */ - LASSERT(reply != NULL); + /* + * Set the REPLY length, now the RDMA that elides the REPLY message has + * completed and I know it. + */ + LASSERT(reply); LASSERT(reply->msg_type == LNET_MSG_GET); LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY); - /* NB I trusted my peer to RDMA. If she tells me she's written beyond - * the end of my buffer, I might as well be dead. */ + /* + * NB I trusted my peer to RDMA. If she tells me she's written beyond + * the end of my buffer, I might as well be dead. + */ LASSERT(len <= reply->msg_ev.mlength); reply->msg_ev.mlength = len; @@ -2264,7 +2395,6 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, int cpt; int rc; - LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */ @@ -2275,7 +2405,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, } msg = lnet_msg_alloc(); - if (msg == NULL) { + if (!msg) { CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n", libcfs_id2str(target)); return -ENOMEM; @@ -2285,11 +2415,11 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, lnet_res_lock(cpt); md = lnet_handle2md(&mdh); - if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) { + if (!md || !md->md_threshold || md->md_me) { CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n", match_bits, portal, libcfs_id2str(target), - md == NULL ? -1 : md->md_threshold); - if (md != NULL && md->md_me != NULL) + !md ? -1 : md->md_threshold); + if (md && md->md_me) CERROR("REPLY MD also attached to portal %d\n", md->md_me->me_portal); @@ -2323,7 +2453,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, rc = lnet_send(self, msg, LNET_NID_ANY); if (rc < 0) { CNETERR("Error sending GET to %s: %d\n", - libcfs_id2str(target), rc); + libcfs_id2str(target), rc); lnet_finalize(NULL, msg, rc); } @@ -2358,12 +2488,12 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) __u32 order = 2; struct list_head *rn_list; - /* if !local_nid_dist_zero, I don't return a distance of 0 ever + /* + * if !local_nid_dist_zero, I don't return a distance of 0 ever * (when lustre sees a distance of 0, it substitutes 0@lo), so I * keep order 0 free for 0@lo and order 1 free for a local NID - * match */ - - LASSERT(the_lnet.ln_init); + * match + */ LASSERT(the_lnet.ln_refcount > 0); cpt = lnet_net_lock_current(); @@ -2372,9 +2502,9 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) ni = list_entry(e, lnet_ni_t, ni_list); if (ni->ni_nid == dstnid) { - if (srcnidp != NULL) + if (srcnidp) *srcnidp = dstnid; - if (orderp != NULL) { + if (orderp) { if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND) *orderp = 0; else @@ -2386,9 +2516,9 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) } if (LNET_NIDNET(ni->ni_nid) == dstnet) { - if (srcnidp != NULL) + if (srcnidp) *srcnidp = ni->ni_nid; - if (orderp != NULL) + if (orderp) *orderp = order; lnet_net_unlock(cpt); return 1; @@ -2404,21 +2534,28 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) if (rnet->lrn_net == dstnet) { lnet_route_t *route; lnet_route_t *shortest = NULL; + __u32 shortest_hops = LNET_UNDEFINED_HOPS; + __u32 route_hops; LASSERT(!list_empty(&rnet->lrn_routes)); list_for_each_entry(route, &rnet->lrn_routes, - lr_list) { - if (shortest == NULL || - route->lr_hops < shortest->lr_hops) + lr_list) { + route_hops = route->lr_hops; + if (route_hops == LNET_UNDEFINED_HOPS) + route_hops = 1; + if (!shortest || + route_hops < shortest_hops) { shortest = route; + shortest_hops = route_hops; + } } - LASSERT(shortest != NULL); - hops = shortest->lr_hops; - if (srcnidp != NULL) + LASSERT(shortest); + hops = shortest_hops; + if (srcnidp) *srcnidp = shortest->lr_gateway->lp_ni->ni_nid; - if (orderp != NULL) + if (orderp) *orderp = order; lnet_net_unlock(cpt); return hops + 1; diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c index 43977e8df..f879d7f28 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-msg.c +++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c @@ -74,7 +74,6 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type) ev->initiator.nid = LNET_NID_ANY; ev->initiator.pid = the_lnet.ln_pid; ev->sender = LNET_NID_ANY; - } else { /* event for passive message */ ev->target.pid = hdr->dest_pid; @@ -173,7 +172,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status) lnet_event_t *ev = &msg->msg_ev; LASSERT(msg->msg_tx_committed); - if (status != 0) + if (status) goto out; counters = the_lnet.ln_counters[msg->msg_tx_cpt]; @@ -181,7 +180,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status) default: /* routed message */ LASSERT(msg->msg_routing); LASSERT(msg->msg_rx_committed); - LASSERT(ev->type == 0); + LASSERT(!ev->type); counters->route_length += msg->msg_len; counters->route_count++; @@ -203,8 +202,10 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status) case LNET_EVENT_GET: LASSERT(msg->msg_rx_committed); - /* overwritten while sending reply, we should never be - * here for optimized GET */ + /* + * overwritten while sending reply, we should never be + * here for optimized GET + */ LASSERT(msg->msg_type == LNET_MSG_REPLY); msg->msg_type = LNET_MSG_GET; /* fix type */ break; @@ -225,13 +226,13 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status) LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */ LASSERT(msg->msg_rx_committed); - if (status != 0) + if (status) goto out; counters = the_lnet.ln_counters[msg->msg_rx_cpt]; switch (ev->type) { default: - LASSERT(ev->type == 0); + LASSERT(!ev->type); LASSERT(msg->msg_routing); goto out; @@ -240,10 +241,12 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status) break; case LNET_EVENT_GET: - /* type is "REPLY" if it's an optimized GET on passive side, + /* + * type is "REPLY" if it's an optimized GET on passive side, * because optimized GET will never be committed for sending, * so message type wouldn't be changed back to "GET" by - * lnet_msg_decommit_tx(), see details in lnet_parse_get() */ + * lnet_msg_decommit_tx(), see details in lnet_parse_get() + */ LASSERT(msg->msg_type == LNET_MSG_REPLY || msg->msg_type == LNET_MSG_GET); counters->send_length += msg->msg_wanted; @@ -254,8 +257,10 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status) break; case LNET_EVENT_REPLY: - /* type is "GET" if it's an optimized GET on active side, - * see details in lnet_create_reply_msg() */ + /* + * type is "GET" if it's an optimized GET on active side, + * see details in lnet_create_reply_msg() + */ LASSERT(msg->msg_type == LNET_MSG_GET || msg->msg_type == LNET_MSG_REPLY); break; @@ -309,10 +314,12 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, unsigned int offset, unsigned int mlen) { /* NB: @offset and @len are only useful for receiving */ - /* Here, we attach the MD on lnet_msg and mark it busy and + /* + * Here, we attach the MD on lnet_msg and mark it busy and * decrementing its threshold. Come what may, the lnet_msg "owns" * the MD until a call to lnet_msg_detach_md or lnet_finalize() - * signals completion. */ + * signals completion. + */ LASSERT(!msg->msg_routing); msg->msg_md = md; @@ -343,7 +350,7 @@ lnet_msg_detach_md(lnet_msg_t *msg, int status) LASSERT(md->md_refcount >= 0); unlink = lnet_md_unlinkable(md); - if (md->md_eq != NULL) { + if (md->md_eq) { msg->msg_ev.status = status; msg->msg_ev.unlinked = unlink; lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev); @@ -364,7 +371,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) LASSERT(msg->msg_onactivelist); - if (status == 0 && msg->msg_ack) { + if (!status && msg->msg_ack) { /* Only send an ACK if the PUT completed successfully */ lnet_msg_decommit(msg, cpt, 0); @@ -383,8 +390,10 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits; msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength); - /* NB: we probably want to use NID of msg::msg_from as 3rd - * parameter (router NID) if it's routed message */ + /* + * NB: we probably want to use NID of msg::msg_from as 3rd + * parameter (router NID) if it's routed message + */ rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY); lnet_net_lock(cpt); @@ -401,7 +410,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) */ return rc; - } else if (status == 0 && /* OK so far */ + } else if (!status && /* OK so far */ (msg->msg_routing && !msg->msg_sending)) { /* not forwarded */ LASSERT(!msg->msg_receiving); /* called back recv already */ @@ -442,7 +451,7 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) LASSERT(!in_interrupt()); - if (msg == NULL) + if (!msg) return; #if 0 CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n", @@ -458,12 +467,12 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) msg->msg_rtrcredit ? "F" : "", msg->msg_peerrtrcredit ? "f" : "", msg->msg_onactivelist ? "!" : "", - msg->msg_txpeer == NULL ? "" : libcfs_nid2str(msg->msg_txpeer->lp_nid), - msg->msg_rxpeer == NULL ? "" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); + !msg->msg_txpeer ? "" : libcfs_nid2str(msg->msg_txpeer->lp_nid), + !msg->msg_rxpeer ? "" : libcfs_nid2str(msg->msg_rxpeer->lp_nid)); #endif msg->msg_ev.status = status; - if (msg->msg_md != NULL) { + if (msg->msg_md) { cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie); lnet_res_lock(cpt); @@ -491,15 +500,16 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) container = the_lnet.ln_msg_containers[cpt]; list_add_tail(&msg->msg_list, &container->msc_finalizing); - /* Recursion breaker. Don't complete the message here if I am (or - * enough other threads are) already completing messages */ - + /* + * Recursion breaker. Don't complete the message here if I am (or + * enough other threads are) already completing messages + */ my_slot = -1; for (i = 0; i < container->msc_nfinalizers; i++) { if (container->msc_finalizers[i] == current) break; - if (my_slot < 0 && container->msc_finalizers[i] == NULL) + if (my_slot < 0 && !container->msc_finalizers[i]) my_slot = i; } @@ -512,21 +522,29 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) while (!list_empty(&container->msc_finalizing)) { msg = list_entry(container->msc_finalizing.next, - lnet_msg_t, msg_list); + lnet_msg_t, msg_list); list_del(&msg->msg_list); - /* NB drops and regains the lnet lock if it actually does - * anything, so my finalizing friends can chomp along too */ + /* + * NB drops and regains the lnet lock if it actually does + * anything, so my finalizing friends can chomp along too + */ rc = lnet_complete_msg_locked(msg, cpt); - if (rc != 0) + if (rc) break; } + if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) { + lnet_net_unlock(cpt); + lnet_delay_rule_check(); + lnet_net_lock(cpt); + } + container->msc_finalizers[my_slot] = NULL; lnet_net_unlock(cpt); - if (rc != 0) + if (rc) goto again; } EXPORT_SYMBOL(lnet_finalize); @@ -536,12 +554,12 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container) { int count = 0; - if (container->msc_init == 0) + if (!container->msc_init) return; while (!list_empty(&container->msc_active)) { lnet_msg_t *msg = list_entry(container->msc_active.next, - lnet_msg_t, msg_activelist); + lnet_msg_t, msg_activelist); LASSERT(msg->msg_onactivelist); msg->msg_onactivelist = 0; @@ -553,41 +571,23 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container) if (count > 0) CERROR("%d active msg on exit\n", count); - if (container->msc_finalizers != NULL) { + if (container->msc_finalizers) { LIBCFS_FREE(container->msc_finalizers, container->msc_nfinalizers * sizeof(*container->msc_finalizers)); container->msc_finalizers = NULL; } -#ifdef LNET_USE_LIB_FREELIST - lnet_freelist_fini(&container->msc_freelist); -#endif container->msc_init = 0; } int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) { - int rc; - container->msc_init = 1; INIT_LIST_HEAD(&container->msc_active); INIT_LIST_HEAD(&container->msc_finalizing); -#ifdef LNET_USE_LIB_FREELIST - memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t)); - - rc = lnet_freelist_init(&container->msc_freelist, - LNET_FL_MAX_MSGS, sizeof(lnet_msg_t)); - if (rc != 0) { - CERROR("Failed to init freelist for message container\n"); - lnet_msg_container_cleanup(container); - return rc; - } -#else - rc = 0; -#endif /* number of CPUs */ container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt); @@ -595,13 +595,13 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) container->msc_nfinalizers * sizeof(*container->msc_finalizers)); - if (container->msc_finalizers == NULL) { + if (!container->msc_finalizers) { CERROR("Failed to allocate message finalizers\n"); lnet_msg_container_cleanup(container); return -ENOMEM; } - return rc; + return 0; } void @@ -610,7 +610,7 @@ lnet_msg_containers_destroy(void) struct lnet_msg_container *container; int i; - if (the_lnet.ln_msg_containers == NULL) + if (!the_lnet.ln_msg_containers) return; cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) @@ -630,14 +630,14 @@ lnet_msg_containers_create(void) the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*container)); - if (the_lnet.ln_msg_containers == NULL) { + if (!the_lnet.ln_msg_containers) { CERROR("Failed to allocate cpu-partition data for network\n"); return -ENOMEM; } cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) { rc = lnet_msg_container_setup(container, i); - if (rc != 0) { + if (rc) { lnet_msg_containers_destroy(); return rc; } diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c index bd7b071b2..3947e8b71 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c +++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -50,7 +45,7 @@ lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id, struct lnet_portal *ptl = the_lnet.ln_portals[index]; int unique; - unique = ignore_bits == 0 && + unique = !ignore_bits && match_id.nid != LNET_NID_ANY && match_id.pid != LNET_PID_ANY; @@ -139,8 +134,10 @@ static int lnet_try_match_md(lnet_libmd_t *md, struct lnet_match_info *info, struct lnet_msg *msg) { - /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock; - * lnet_match_blocked_msg() relies on this to avoid races */ + /* + * ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock; + * lnet_match_blocked_msg() relies on this to avoid races + */ unsigned int offset; unsigned int mlength; lnet_me_t *me = md->md_me; @@ -150,7 +147,7 @@ lnet_try_match_md(lnet_libmd_t *md, return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED; /* mismatched MD op */ - if ((md->md_options & info->mi_opc) == 0) + if (!(md->md_options & info->mi_opc)) return LNET_MATCHMD_NONE; /* mismatched ME nid/pid? */ @@ -163,17 +160,17 @@ lnet_try_match_md(lnet_libmd_t *md, return LNET_MATCHMD_NONE; /* mismatched ME matchbits? */ - if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0) + if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) return LNET_MATCHMD_NONE; /* Hurrah! This _is_ a match; check it out... */ - if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0) + if (!(md->md_options & LNET_MD_MANAGE_REMOTE)) offset = md->md_offset; else offset = info->mi_roffset; - if ((md->md_options & LNET_MD_MAX_SIZE) != 0) { + if (md->md_options & LNET_MD_MAX_SIZE) { mlength = md->md_max_size; LASSERT(md->md_offset + mlength <= md->md_length); } else { @@ -182,7 +179,7 @@ lnet_try_match_md(lnet_libmd_t *md, if (info->mi_rlength <= mlength) { /* fits in allowed space */ mlength = info->mi_rlength; - } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) { + } else if (!(md->md_options & LNET_MD_TRUNCATE)) { /* this packet _really_ is too big */ CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n", libcfs_id2str(info->mi_id), info->mi_mbits, @@ -203,10 +200,12 @@ lnet_try_match_md(lnet_libmd_t *md, if (!lnet_md_exhausted(md)) return LNET_MATCHMD_OK; - /* Auto-unlink NOW, so the ME gets unlinked if required. + /* + * Auto-unlink NOW, so the ME gets unlinked if required. * We bumped md->md_refcount above so the MD just gets flagged - * for unlink when it is finalized. */ - if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0) + * for unlink when it is finalized. + */ + if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) lnet_md_unlink(md); return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED; @@ -239,7 +238,7 @@ lnet_mt_of_attach(unsigned int index, lnet_process_id_t id, ptl = the_lnet.ln_portals[index]; mtable = lnet_match2mt(ptl, id, mbits); - if (mtable != NULL) /* unique portal or only one match-table */ + if (mtable) /* unique portal or only one match-table */ return mtable; /* it's a wildcard portal */ @@ -248,8 +247,10 @@ lnet_mt_of_attach(unsigned int index, lnet_process_id_t id, return NULL; case LNET_INS_BEFORE: case LNET_INS_AFTER: - /* posted by no affinity thread, always hash to specific - * match-table to avoid buffer stealing which is heavy */ + /* + * posted by no affinity thread, always hash to specific + * match-table to avoid buffer stealing which is heavy + */ return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER]; case LNET_INS_LOCAL: /* posted by cpu-affinity thread */ @@ -274,7 +275,7 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)); mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits); - if (mtable != NULL) + if (mtable) return mtable; /* it's a wildcard portal */ @@ -298,10 +299,12 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) /* is there any active entry for this portal? */ nmaps = ptl->ptl_mt_nmaps; /* map to an active mtable to avoid heavy "stealing" */ - if (nmaps != 0) { - /* NB: there is possibility that ptl_mt_maps is being + if (nmaps) { + /* + * NB: there is possibility that ptl_mt_maps is being * changed because we are not under protection of - * lnet_ptl_lock, but it shouldn't hurt anything */ + * lnet_ptl_lock, but it shouldn't hurt anything + */ cpt = ptl->ptl_mt_maps[rotor % nmaps]; } } @@ -331,7 +334,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos) bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64]; pos &= (1 << LNET_MT_BITS_U64) - 1; - return ((*bmap) & (1ULL << pos)) != 0; + return (*bmap & (1ULL << pos)); } static void @@ -357,16 +360,15 @@ lnet_mt_match_head(struct lnet_match_table *mtable, lnet_process_id_t id, __u64 mbits) { struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal]; + unsigned long hash = mbits; - if (lnet_ptl_is_wildcard(ptl)) { - return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK]; - } else { - unsigned long hash = mbits + id.nid + id.pid; + if (!lnet_ptl_is_wildcard(ptl)) { + hash += id.nid + id.pid; LASSERT(lnet_ptl_is_unique(ptl)); hash = hash_long(hash, LNET_MT_HASH_BITS); - return &mtable->mt_mhash[hash]; } + return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK]; } int @@ -391,18 +393,20 @@ lnet_mt_match_md(struct lnet_match_table *mtable, list_for_each_entry_safe(me, tmp, head, me_list) { /* ME attached but MD not attached yet */ - if (me->me_md == NULL) + if (!me->me_md) continue; LASSERT(me == me->me_md->md_me); rc = lnet_try_match_md(me->me_md, info, msg); - if ((rc & LNET_MATCHMD_EXHAUSTED) == 0) + if (!(rc & LNET_MATCHMD_EXHAUSTED)) exhausted = 0; /* mlist is not empty */ - if ((rc & LNET_MATCHMD_FINISH) != 0) { - /* don't return EXHAUSTED bit because we don't know - * whether the mlist is empty or not */ + if (rc & LNET_MATCHMD_FINISH) { + /* + * don't return EXHAUSTED bit because we don't know + * whether the mlist is empty or not + */ return rc & ~LNET_MATCHMD_EXHAUSTED; } } @@ -413,7 +417,7 @@ lnet_mt_match_md(struct lnet_match_table *mtable, exhausted = 0; } - if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) { + if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) { head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits); goto again; /* re-check MEs w/o ignore-bits */ } @@ -430,8 +434,10 @@ lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) { int rc; - /* message arrived before any buffer posting on this portal, - * simply delay or drop this message */ + /* + * message arrived before any buffer posting on this portal, + * simply delay or drop this message + */ if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl))) return 0; @@ -446,7 +452,7 @@ lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) if (msg->msg_rx_ready_delay) { msg->msg_rx_delayed = 1; list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); + &ptl->ptl_msg_delayed); } rc = LNET_MATCHMD_NONE; } else { @@ -465,9 +471,13 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, int rc = 0; int i; - /* steal buffer from other CPTs, and delay it if nothing to steal, - * this function is more expensive than a regular match, but we - * don't expect it can happen a lot */ + /** + * Steal buffer from other CPTs, and delay msg if nothing to + * steal. This function is more expensive than a regular + * match, but we don't expect it can happen a lot. The return + * code contains one of LNET_MATCHMD_OK, LNET_MATCHMD_DROP, or + * LNET_MATCHMD_NONE. + */ LASSERT(lnet_ptl_is_wildcard(ptl)); for (i = 0; i < LNET_CPT_NUMBER; i++) { @@ -476,56 +486,77 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, cpt = (first + i) % LNET_CPT_NUMBER; mtable = ptl->ptl_mtables[cpt]; - if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled) + if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled) continue; lnet_res_lock(cpt); lnet_ptl_lock(ptl); - if (i == 0) { /* the first try, attach on stealing list */ + if (!i) { + /* The first try, add to stealing list. */ list_add_tail(&msg->msg_list, - &ptl->ptl_msg_stealing); + &ptl->ptl_msg_stealing); } - if (!list_empty(&msg->msg_list)) { /* on stealing list */ + if (!list_empty(&msg->msg_list)) { + /* On stealing list. */ rc = lnet_mt_match_md(mtable, info, msg); - if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && + if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) lnet_ptl_disable_mt(ptl, cpt); - if ((rc & LNET_MATCHMD_FINISH) != 0) + if (rc & LNET_MATCHMD_FINISH) { + /* Match found, remove from stealing list. */ + list_del_init(&msg->msg_list); + } else if (i == LNET_CPT_NUMBER - 1 || /* (1) */ + !ptl->ptl_mt_nmaps || /* (2) */ + (ptl->ptl_mt_nmaps == 1 && /* (3) */ + ptl->ptl_mt_maps[0] == cpt)) { + /** + * No match found, and this is either + * (1) the last cpt to check, or + * (2) there is no active cpt, or + * (3) this is the only active cpt. + * There is nothing to steal: delay or + * drop the message. + */ list_del_init(&msg->msg_list); - } else { - /* could be matched by lnet_ptl_attach_md() - * which is called by another thread */ - rc = msg->msg_md == NULL ? - LNET_MATCHMD_DROP : LNET_MATCHMD_OK; - } - - if (!list_empty(&msg->msg_list) && /* not matched yet */ - (i == LNET_CPT_NUMBER - 1 || /* the last CPT */ - ptl->ptl_mt_nmaps == 0 || /* no active CPT */ - (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */ - ptl->ptl_mt_maps[0] == cpt))) { - /* nothing to steal, delay or drop */ - list_del_init(&msg->msg_list); - - if (lnet_ptl_is_lazy(ptl)) { - msg->msg_rx_delayed = 1; - list_add_tail(&msg->msg_list, - &ptl->ptl_msg_delayed); - rc = LNET_MATCHMD_NONE; + if (lnet_ptl_is_lazy(ptl)) { + msg->msg_rx_delayed = 1; + list_add_tail(&msg->msg_list, + &ptl->ptl_msg_delayed); + rc = LNET_MATCHMD_NONE; + } else { + rc = LNET_MATCHMD_DROP; + } } else { - rc = LNET_MATCHMD_DROP; + /* Do another iteration. */ + rc = 0; } + } else { + /** + * No longer on stealing list: another thread + * matched the message in lnet_ptl_attach_md(). + * We are now expected to handle the message. + */ + rc = !msg->msg_md ? + LNET_MATCHMD_DROP : LNET_MATCHMD_OK; } lnet_ptl_unlock(ptl); lnet_res_unlock(cpt); - if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed) + /** + * Note that test (1) above ensures that we always + * exit the loop through this break statement. + * + * LNET_MATCHMD_NONE means msg was added to the + * delayed queue, and we may no longer reference it + * after lnet_ptl_unlock() and lnet_res_unlock(). + */ + if (rc & (LNET_MATCHMD_FINISH | LNET_MATCHMD_NONE)) break; } @@ -551,7 +582,7 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) ptl = the_lnet.ln_portals[info->mi_portal]; rc = lnet_ptl_match_early(ptl, msg); - if (rc != 0) /* matched or delayed early message */ + if (rc) /* matched or delayed early message */ return rc; mtable = lnet_mt_of_match(info, msg); @@ -563,13 +594,13 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) } rc = lnet_mt_match_md(mtable, info, msg); - if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) { + if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) { lnet_ptl_lock(ptl); lnet_ptl_disable_mt(ptl, mtable->mt_cpt); lnet_ptl_unlock(ptl); } - if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */ + if (rc & LNET_MATCHMD_FINISH) /* matched or dropping */ goto out1; if (!msg->msg_rx_ready_delay) @@ -587,13 +618,14 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) lnet_ptl_unlock(ptl); lnet_res_unlock(mtable->mt_cpt); - + rc = LNET_MATCHMD_NONE; } else { lnet_res_unlock(mtable->mt_cpt); rc = lnet_ptl_match_delay(ptl, info, msg); } - if (msg->msg_rx_delayed) { + /* LNET_MATCHMD_NONE means msg was added to the delay queue */ + if (rc & LNET_MATCHMD_NONE) { CDEBUG(D_NET, "Delaying %s from %s ptl %d MB %#llx off %d len %d\n", info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET", @@ -630,7 +662,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, int exhausted = 0; int cpt; - LASSERT(md->md_refcount == 0); /* a brand new MD */ + LASSERT(!md->md_refcount); /* a brand new MD */ me->me_md = md; md->md_me = me; @@ -664,15 +696,15 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, rc = lnet_try_match_md(md, &info, msg); - exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0; - if ((rc & LNET_MATCHMD_NONE) != 0) { + exhausted = (rc & LNET_MATCHMD_EXHAUSTED); + if (rc & LNET_MATCHMD_NONE) { if (exhausted) break; continue; } /* Hurrah! This _is_ a match */ - LASSERT((rc & LNET_MATCHMD_FINISH) != 0); + LASSERT(rc & LNET_MATCHMD_FINISH); list_del_init(&msg->msg_list); if (head == &ptl->ptl_msg_stealing) { @@ -682,7 +714,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, continue; } - if ((rc & LNET_MATCHMD_OK) != 0) { + if (rc & LNET_MATCHMD_OK) { list_add_tail(&msg->msg_list, matches); CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n", @@ -717,7 +749,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) struct lnet_match_table *mtable; int i; - if (ptl->ptl_mtables == NULL) /* uninitialized portal */ + if (!ptl->ptl_mtables) /* uninitialized portal */ return; LASSERT(list_empty(&ptl->ptl_msg_delayed)); @@ -727,7 +759,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) lnet_me_t *me; int j; - if (mtable->mt_mhash == NULL) /* uninitialized match-table */ + if (!mtable->mt_mhash) /* uninitialized match-table */ continue; mhash = mtable->mt_mhash; @@ -735,7 +767,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) { while (!list_empty(&mhash[j])) { me = list_entry(mhash[j].next, - lnet_me_t, me_list); + lnet_me_t, me_list); CERROR("Active ME %p on exit\n", me); list_del(&me->me_list); lnet_me_free(me); @@ -759,7 +791,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(), sizeof(struct lnet_match_table)); - if (ptl->ptl_mtables == NULL) { + if (!ptl->ptl_mtables) { CERROR("Failed to create match table for portal %d\n", index); return -ENOMEM; } @@ -772,7 +804,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) /* the extra entry is for MEs with ignore bits */ LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); - if (mhash == NULL) { + if (!mhash) { CERROR("Failed to create match hash for portal %d\n", index); goto failed; @@ -800,7 +832,7 @@ lnet_portals_destroy(void) { int i; - if (the_lnet.ln_portals == NULL) + if (!the_lnet.ln_portals) return; for (i = 0; i < the_lnet.ln_nportals; i++) @@ -820,7 +852,7 @@ lnet_portals_create(void) the_lnet.ln_nportals = MAX_PORTALS; the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size); - if (the_lnet.ln_portals == NULL) { + if (!the_lnet.ln_portals) { CERROR("Failed to allocate portals table\n"); return -ENOMEM; } @@ -886,17 +918,8 @@ LNetSetLazyPortal(int portal) } EXPORT_SYMBOL(LNetSetLazyPortal); -/** - * Turn off the lazy portal attribute. Delayed requests on the portal, - * if any, will be all dropped when this function returns. - * - * \param portal Index of the portal to disable the lazy attribute on. - * - * \retval 0 On success. - * \retval -EINVAL If \a portal is not a valid index. - */ int -LNetClearLazyPortal(int portal) +lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason) { struct lnet_portal *ptl; LIST_HEAD(zombies); @@ -915,21 +938,48 @@ LNetClearLazyPortal(int portal) return 0; } - if (the_lnet.ln_shutdown) - CWARN("Active lazy portal %d on exit\n", portal); - else - CDEBUG(D_NET, "clearing portal %d lazy\n", portal); + if (ni) { + struct lnet_msg *msg, *tmp; + + /* grab all messages which are on the NI passed in */ + list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed, + msg_list) { + if (msg->msg_rxpeer->lp_ni == ni) + list_move(&msg->msg_list, &zombies); + } + } else { + if (the_lnet.ln_shutdown) + CWARN("Active lazy portal %d on exit\n", portal); + else + CDEBUG(D_NET, "clearing portal %d lazy\n", portal); - /* grab all the blocked messages atomically */ - list_splice_init(&ptl->ptl_msg_delayed, &zombies); + /* grab all the blocked messages atomically */ + list_splice_init(&ptl->ptl_msg_delayed, &zombies); - lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY); + lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY); + } lnet_ptl_unlock(ptl); lnet_res_unlock(LNET_LOCK_EX); - lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr"); + lnet_drop_delayed_msg_list(&zombies, reason); return 0; } + +/** + * Turn off the lazy portal attribute. Delayed requests on the portal, + * if any, will be all dropped when this function returns. + * + * \param portal Index of the portal to disable the lazy attribute on. + * + * \retval 0 On success. + * \retval -EINVAL If \a portal is not a valid index. + */ +int +LNetClearLazyPortal(int portal) +{ + return lnet_clear_lazy_portal(NULL, portal, + "Clearing lazy portal attr"); +} EXPORT_SYMBOL(LNetClearLazyPortal); diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c index 589ecc84d..891fd5940 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-socket.c +++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c @@ -64,7 +64,7 @@ lnet_sock_ioctl(int cmd, unsigned long arg) int rc; rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); - if (rc != 0) { + if (rc) { CERROR("Can't create socket: %d\n", rc); return rc; } @@ -99,14 +99,17 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) CLASSERT(sizeof(ifr.ifr_name) >= IFNAMSIZ); - strcpy(ifr.ifr_name, name); + if (strlen(name) > sizeof(ifr.ifr_name) - 1) + return -E2BIG; + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); + rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr); - if (rc != 0) { + if (rc) { CERROR("Can't get flags for interface %s\n", name); return rc; } - if ((ifr.ifr_flags & IFF_UP) == 0) { + if (!(ifr.ifr_flags & IFF_UP)) { CDEBUG(D_NET, "Interface %s down\n", name); *up = 0; *ip = *mask = 0; @@ -114,10 +117,13 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) } *up = 1; - strcpy(ifr.ifr_name, name); + if (strlen(name) > sizeof(ifr.ifr_name) - 1) + return -E2BIG; + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); + ifr.ifr_addr.sa_family = AF_INET; rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr); - if (rc != 0) { + if (rc) { CERROR("Can't get IP address for interface %s\n", name); return rc; } @@ -125,10 +131,13 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr; *ip = ntohl(val); - strcpy(ifr.ifr_name, name); + if (strlen(name) > sizeof(ifr.ifr_name) - 1) + return -E2BIG; + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); + ifr.ifr_addr.sa_family = AF_INET; rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr); - if (rc != 0) { + if (rc) { CERROR("Can't get netmask for interface %s\n", name); return rc; } @@ -157,15 +166,15 @@ lnet_ipif_enumerate(char ***namesp) nalloc = 16; /* first guess at max interfaces */ toobig = 0; for (;;) { - if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { + if (nalloc * sizeof(*ifr) > PAGE_SIZE) { toobig = 1; - nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); + nalloc = PAGE_SIZE / sizeof(*ifr); CWARN("Too many interfaces: only enumerating first %d\n", nalloc); } LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr)); - if (ifr == NULL) { + if (!ifr) { CERROR("ENOMEM enumerating up to %d interfaces\n", nalloc); rc = -ENOMEM; @@ -181,9 +190,9 @@ lnet_ipif_enumerate(char ***namesp) goto out1; } - LASSERT(rc == 0); + LASSERT(!rc); - nfound = ifc.ifc_len/sizeof(*ifr); + nfound = ifc.ifc_len / sizeof(*ifr); LASSERT(nfound <= nalloc); if (nfound < nalloc || toobig) @@ -193,11 +202,11 @@ lnet_ipif_enumerate(char ***namesp) nalloc *= 2; } - if (nfound == 0) + if (!nfound) goto out1; LIBCFS_ALLOC(names, nfound * sizeof(*names)); - if (names == NULL) { + if (!names) { rc = -ENOMEM; goto out1; } @@ -213,7 +222,7 @@ lnet_ipif_enumerate(char ***namesp) } LIBCFS_ALLOC(names[i], IFNAMSIZ); - if (names[i] == NULL) { + if (!names[i]) { rc = -ENOMEM; goto out2; } @@ -242,7 +251,7 @@ lnet_ipif_free_enumeration(char **names, int n) LASSERT(n > 0); - for (i = 0; i < n && names[i] != NULL; i++) + for (i = 0; i < n && names[i]; i++) LIBCFS_FREE(names[i], IFNAMSIZ); LIBCFS_FREE(names, n * sizeof(*names)); @@ -253,32 +262,30 @@ int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) { int rc; - long ticks = timeout * HZ; + long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; LASSERT(nob > 0); - /* Caller may pass a zero timeout if she thinks the socket buffer is - * empty enough to take the whole message immediately */ - + /* + * Caller may pass a zero timeout if she thinks the socket buffer is + * empty enough to take the whole message immediately + */ for (;;) { struct kvec iov = { .iov_base = buffer, .iov_len = nob }; struct msghdr msg = { - .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0 + .msg_flags = !timeout ? MSG_DONTWAIT : 0 }; - if (timeout != 0) { + if (timeout) { /* Set send timeout to remaining time */ - tv = (struct timeval) { - .tv_sec = ticks / HZ, - .tv_usec = ((ticks % HZ) * 1000000) / HZ - }; + jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, sizeof(tv)); - if (rc != 0) { + if (rc) { CERROR("Can't set socket send timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; @@ -287,7 +294,7 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) then = jiffies; rc = kernel_sendmsg(sock, &msg, &iov, 1, nob); - ticks -= jiffies - then; + jiffies_left -= jiffies - then; if (rc == nob) return 0; @@ -295,12 +302,12 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) if (rc < 0) return rc; - if (rc == 0) { + if (!rc) { CERROR("Unexpected zero rc\n"); return -ECONNABORTED; } - if (ticks <= 0) + if (jiffies_left <= 0) return -EAGAIN; buffer = ((char *)buffer) + rc; @@ -314,12 +321,12 @@ int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) { int rc; - long ticks = timeout * HZ; + long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; LASSERT(nob > 0); - LASSERT(ticks > 0); + LASSERT(jiffies_left > 0); for (;;) { struct kvec iov = { @@ -331,13 +338,10 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) }; /* Set receive timeout to remaining time */ - tv = (struct timeval) { - .tv_sec = ticks / HZ, - .tv_usec = ((ticks % HZ) * 1000000) / HZ - }; + jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)); - if (rc != 0) { + if (rc) { CERROR("Can't set socket recv timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; @@ -345,21 +349,21 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) then = jiffies; rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0); - ticks -= jiffies - then; + jiffies_left -= jiffies - then; if (rc < 0) return rc; - if (rc == 0) + if (!rc) return -ECONNRESET; buffer = ((char *)buffer) + rc; nob -= rc; - if (nob == 0) + if (!nob) return 0; - if (ticks <= 0) + if (jiffies_left <= 0) return -ETIMEDOUT; } } @@ -379,7 +383,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); *sockp = sock; - if (rc != 0) { + if (rc) { CERROR("Can't create socket: %d\n", rc); return rc; } @@ -387,16 +391,16 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, option = 1; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&option, sizeof(option)); - if (rc != 0) { + if (rc) { CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); goto failed; } - if (local_ip != 0 || local_port != 0) { + if (local_ip || local_port) { memset(&locaddr, 0, sizeof(locaddr)); locaddr.sin_family = AF_INET; locaddr.sin_port = htons(local_port); - locaddr.sin_addr.s_addr = (local_ip == 0) ? + locaddr.sin_addr.s_addr = !local_ip ? INADDR_ANY : htonl(local_ip); rc = kernel_bind(sock, (struct sockaddr *)&locaddr, @@ -406,7 +410,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, *fatal = 0; goto failed; } - if (rc != 0) { + if (rc) { CERROR("Error trying to bind to port %d: %d\n", local_port, rc); goto failed; @@ -425,22 +429,22 @@ lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize) int option; int rc; - if (txbufsize != 0) { + if (txbufsize) { option = txbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *)&option, sizeof(option)); - if (rc != 0) { + if (rc) { CERROR("Can't set send buffer %d: %d\n", option, rc); return rc; } } - if (rxbufsize != 0) { + if (rxbufsize) { option = rxbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, - (char *)&option, sizeof(option)); - if (rc != 0) { + (char *)&option, sizeof(option)); + if (rc) { CERROR("Can't set receive buffer %d: %d\n", option, rc); return rc; @@ -461,16 +465,16 @@ lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port) rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len); else rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len); - if (rc != 0) { + if (rc) { CERROR("Error %d getting sock %s IP/port\n", rc, remote ? "peer" : "local"); return rc; } - if (ip != NULL) + if (ip) *ip = ntohl(sin.sin_addr.s_addr); - if (port != NULL) + if (port) *port = ntohs(sin.sin_port); return 0; @@ -480,10 +484,10 @@ EXPORT_SYMBOL(lnet_sock_getaddr); int lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize) { - if (txbufsize != NULL) + if (txbufsize) *txbufsize = sock->sk->sk_sndbuf; - if (rxbufsize != NULL) + if (rxbufsize) *rxbufsize = sock->sk->sk_rcvbuf; return 0; @@ -498,7 +502,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, int rc; rc = lnet_sock_create(sockp, &fatal, local_ip, local_port); - if (rc != 0) { + if (rc) { if (!fatal) CERROR("Can't create socket: port %d already in use\n", local_port); @@ -506,14 +510,13 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, } rc = kernel_listen(*sockp, backlog); - if (rc == 0) + if (!rc) return 0; CERROR("Can't set listen backlog %d: %d\n", backlog, rc); sock_release(*sockp); return rc; } -EXPORT_SYMBOL(lnet_sock_listen); int lnet_sock_accept(struct socket **newsockp, struct socket *sock) @@ -522,10 +525,10 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock) struct socket *newsock; int rc; - init_waitqueue_entry(&wait, current); - - /* XXX this should add a ref to sock->ops->owner, if - * TCP could be a module */ + /* + * XXX this should add a ref to sock->ops->owner, if + * TCP could be a module + */ rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock); if (rc) { CERROR("Can't allocate socket\n"); @@ -537,15 +540,15 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock) rc = sock->ops->accept(sock, newsock, O_NONBLOCK); if (rc == -EAGAIN) { /* Nothing ready, so wait for activity */ - set_current_state(TASK_INTERRUPTIBLE); + init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sock->sk), &wait); + set_current_state(TASK_INTERRUPTIBLE); schedule(); remove_wait_queue(sk_sleep(sock->sk), &wait); - set_current_state(TASK_RUNNING); rc = sock->ops->accept(sock, newsock, O_NONBLOCK); } - if (rc != 0) + if (rc) goto failed; *newsockp = newsock; @@ -555,7 +558,6 @@ failed: sock_release(newsock); return rc; } -EXPORT_SYMBOL(lnet_sock_accept); int lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, @@ -565,7 +567,7 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, int rc; rc = lnet_sock_create(sockp, fatal, local_ip, local_port); - if (rc != 0) + if (rc) return rc; memset(&srvaddr, 0, sizeof(srvaddr)); @@ -575,13 +577,15 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr, sizeof(srvaddr), 0); - if (rc == 0) + if (!rc) return 0; - /* EADDRNOTAVAIL probably means we're already connected to the same + /* + * EADDRNOTAVAIL probably means we're already connected to the same * peer/port on the same local port on a differently typed * connection. Let our caller retry with a different local - * port... */ + * port... + */ *fatal = !(rc == -EADDRNOTAVAIL); CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET, @@ -591,4 +595,3 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, sock_release(*sockp); return rc; } -EXPORT_SYMBOL(lnet_sock_connect); diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c index 2a137f468..468eda611 100644 --- a/drivers/staging/lustre/lnet/lnet/lo.c +++ b/drivers/staging/lustre/lnet/lnet/lo.c @@ -46,15 +46,15 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) static int lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, - int delayed, unsigned int niov, - struct kvec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) + int delayed, unsigned int niov, + struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { lnet_msg_t *sendmsg = private; - if (lntmsg != NULL) { /* not discarding */ - if (sendmsg->msg_iov != NULL) { - if (iov != NULL) + if (lntmsg) { /* not discarding */ + if (sendmsg->msg_iov) { + if (iov) lnet_copy_iov2iov(niov, iov, offset, sendmsg->msg_niov, sendmsg->msg_iov, @@ -65,7 +65,7 @@ lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, sendmsg->msg_iov, sendmsg->msg_offset, mlen); } else { - if (iov != NULL) + if (iov) lnet_copy_kiov2iov(niov, iov, offset, sendmsg->msg_niov, sendmsg->msg_kiov, diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c index c93c00752..93037c116 100644 --- a/drivers/staging/lustre/lnet/lnet/module.c +++ b/drivers/staging/lustre/lnet/lnet/module.c @@ -36,6 +36,7 @@ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" static int config_on_load; module_param(config_on_load, int, 0444); @@ -52,13 +53,21 @@ lnet_configure(void *arg) mutex_lock(&lnet_config_mutex); if (!the_lnet.ln_niinit_self) { - rc = LNetNIInit(LUSTRE_SRV_LNET_PID); + rc = try_module_get(THIS_MODULE); + + if (rc != 1) + goto out; + + rc = LNetNIInit(LNET_PID_LUSTRE); if (rc >= 0) { the_lnet.ln_niinit_self = 1; rc = 0; + } else { + module_put(THIS_MODULE); } } +out: mutex_unlock(&lnet_config_mutex); return rc; } @@ -73,6 +82,7 @@ lnet_unconfigure(void) if (the_lnet.ln_niinit_self) { the_lnet.ln_niinit_self = 0; LNetNIFini(); + module_put(THIS_MODULE); } mutex_lock(&the_lnet.ln_api_mutex); @@ -80,28 +90,93 @@ lnet_unconfigure(void) mutex_unlock(&the_lnet.ln_api_mutex); mutex_unlock(&lnet_config_mutex); - return (refcount == 0) ? 0 : -EBUSY; + return !refcount ? 0 : -EBUSY; } static int -lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data) +lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr) +{ + struct lnet_ioctl_config_data *conf = + (struct lnet_ioctl_config_data *)hdr; + int rc; + + if (conf->cfg_hdr.ioc_len < sizeof(*conf)) + return -EINVAL; + + mutex_lock(&lnet_config_mutex); + if (!the_lnet.ln_niinit_self) { + rc = -EINVAL; + goto out_unlock; + } + rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, + conf->cfg_config_u.cfg_net.net_intf, + conf->cfg_config_u.cfg_net.net_peer_timeout, + conf->cfg_config_u.cfg_net.net_peer_tx_credits, + conf->cfg_config_u.cfg_net.net_peer_rtr_credits, + conf->cfg_config_u.cfg_net.net_max_tx_credits); +out_unlock: + mutex_unlock(&lnet_config_mutex); + + return rc; +} + +static int +lnet_dyn_unconfigure(struct libcfs_ioctl_hdr *hdr) +{ + struct lnet_ioctl_config_data *conf = + (struct lnet_ioctl_config_data *)hdr; + int rc; + + if (conf->cfg_hdr.ioc_len < sizeof(*conf)) + return -EINVAL; + + mutex_lock(&lnet_config_mutex); + if (!the_lnet.ln_niinit_self) { + rc = -EINVAL; + goto out_unlock; + } + rc = lnet_dyn_del_ni(conf->cfg_net); +out_unlock: + mutex_unlock(&lnet_config_mutex); + + return rc; +} + +static int +lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) { int rc; switch (cmd) { - case IOC_LIBCFS_CONFIGURE: + case IOC_LIBCFS_CONFIGURE: { + struct libcfs_ioctl_data *data = + (struct libcfs_ioctl_data *)hdr; + + if (data->ioc_hdr.ioc_len < sizeof(*data)) + return -EINVAL; + + the_lnet.ln_nis_from_mod_params = data->ioc_flags; return lnet_configure(NULL); + } case IOC_LIBCFS_UNCONFIGURE: return lnet_unconfigure(); + case IOC_LIBCFS_ADD_NET: + return lnet_dyn_configure(hdr); + + case IOC_LIBCFS_DEL_NET: + return lnet_dyn_unconfigure(hdr); + default: - /* Passing LNET_PID_ANY only gives me a ref if the net is up + /* + * Passing LNET_PID_ANY only gives me a ref if the net is up * already; I'll need it to ensure the net can't go down while - * I'm called into it */ + * I'm called into it + */ rc = LNetNIInit(LNET_PID_ANY); if (rc >= 0) { - rc = LNetCtl(cmd, data); + rc = LNetCtl(cmd, hdr); LNetNIFini(); } return rc; @@ -110,46 +185,46 @@ lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data) static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl); -static int __init -init_lnet(void) +static int __init lnet_init(void) { int rc; mutex_init(&lnet_config_mutex); - rc = lnet_init(); - if (rc != 0) { - CERROR("lnet_init: error %d\n", rc); + rc = lnet_lib_init(); + if (rc) { + CERROR("lnet_lib_init: error %d\n", rc); return rc; } rc = libcfs_register_ioctl(&lnet_ioctl_handler); - LASSERT(rc == 0); + LASSERT(!rc); if (config_on_load) { - /* Have to schedule a separate thread to avoid deadlocking - * in modload */ + /* + * Have to schedule a separate thread to avoid deadlocking + * in modload + */ (void) kthread_run(lnet_configure, NULL, "lnet_initd"); } return 0; } -static void __exit -fini_lnet(void) +static void __exit lnet_exit(void) { int rc; rc = libcfs_deregister_ioctl(&lnet_ioctl_handler); - LASSERT(rc == 0); + LASSERT(!rc); - lnet_fini(); + lnet_lib_exit(); } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("LNet v3.1"); +MODULE_DESCRIPTION("Lustre Networking layer"); +MODULE_VERSION(LNET_VERSION); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.0"); -module_init(init_lnet); -module_exit(fini_lnet); +module_init(lnet_init); +module_exit(lnet_exit); diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c new file mode 100644 index 000000000..7d76f28d3 --- /dev/null +++ b/drivers/staging/lustre/lnet/lnet/net_fault.c @@ -0,0 +1,1025 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2014, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Seagate, Inc. + * + * lnet/lnet/net_fault.c + * + * Lustre network fault simulation + * + * Author: liang.zhen@intel.com + */ + +#define DEBUG_SUBSYSTEM S_LNET + +#include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lnetctl.h" + +#define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \ + LNET_GET_BIT | LNET_REPLY_BIT) + +struct lnet_drop_rule { + /** link chain on the_lnet.ln_drop_rules */ + struct list_head dr_link; + /** attributes of this rule */ + struct lnet_fault_attr dr_attr; + /** lock to protect \a dr_drop_at and \a dr_stat */ + spinlock_t dr_lock; + /** + * the message sequence to drop, which means message is dropped when + * dr_stat.drs_count == dr_drop_at + */ + unsigned long dr_drop_at; + /** + * seconds to drop the next message, it's exclusive with dr_drop_at + */ + unsigned long dr_drop_time; + /** baseline to caculate dr_drop_time */ + unsigned long dr_time_base; + /** statistic of dropped messages */ + struct lnet_fault_stat dr_stat; +}; + +static bool +lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid) +{ + if (nid == msg_nid || nid == LNET_NID_ANY) + return true; + + if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid)) + return false; + + /* 255.255.255.255@net is wildcard for all addresses in a network */ + return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY); +} + +static bool +lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src, + lnet_nid_t dst, unsigned int type, unsigned int portal) +{ + if (!lnet_fault_nid_match(attr->fa_src, src) || + !lnet_fault_nid_match(attr->fa_dst, dst)) + return false; + + if (!(attr->fa_msg_mask & (1 << type))) + return false; + + /** + * NB: ACK and REPLY have no portal, but they should have been + * rejected by message mask + */ + if (attr->fa_ptl_mask && /* has portal filter */ + !(attr->fa_ptl_mask & (1ULL << portal))) + return false; + + return true; +} + +static int +lnet_fault_attr_validate(struct lnet_fault_attr *attr) +{ + if (!attr->fa_msg_mask) + attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */ + + if (!attr->fa_ptl_mask) /* no portal filter */ + return 0; + + /* NB: only PUT and GET can be filtered if portal filter has been set */ + attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT; + if (!attr->fa_msg_mask) { + CDEBUG(D_NET, "can't find valid message type bits %x\n", + attr->fa_msg_mask); + return -EINVAL; + } + return 0; +} + +static void +lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type) +{ + /* NB: fs_counter is NOT updated by this function */ + switch (type) { + case LNET_MSG_PUT: + stat->fs_put++; + return; + case LNET_MSG_ACK: + stat->fs_ack++; + return; + case LNET_MSG_GET: + stat->fs_get++; + return; + case LNET_MSG_REPLY: + stat->fs_reply++; + return; + } +} + +/** + * LNet message drop simulation + */ + +/** + * Add a new drop rule to LNet + * There is no check for duplicated drop rule, all rules will be checked for + * incoming message. + */ +static int +lnet_drop_rule_add(struct lnet_fault_attr *attr) +{ + struct lnet_drop_rule *rule; + + if (attr->u.drop.da_rate & attr->u.drop.da_interval) { + CDEBUG(D_NET, "please provide either drop rate or drop interval, but not both at the same time %d/%d\n", + attr->u.drop.da_rate, attr->u.drop.da_interval); + return -EINVAL; + } + + if (lnet_fault_attr_validate(attr)) + return -EINVAL; + + CFS_ALLOC_PTR(rule); + if (!rule) + return -ENOMEM; + + spin_lock_init(&rule->dr_lock); + + rule->dr_attr = *attr; + if (attr->u.drop.da_interval) { + rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval); + rule->dr_drop_time = cfs_time_shift(cfs_rand() % + attr->u.drop.da_interval); + } else { + rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; + } + + lnet_net_lock(LNET_LOCK_EX); + list_add(&rule->dr_link, &the_lnet.ln_drop_rules); + lnet_net_unlock(LNET_LOCK_EX); + + CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n", + libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), + attr->u.drop.da_rate, attr->u.drop.da_interval); + return 0; +} + +/** + * Remove matched drop rules from lnet, all rules that can match \a src and + * \a dst will be removed. + * If \a src is zero, then all rules have \a dst as destination will be remove + * If \a dst is zero, then all rules have \a src as source will be removed + * If both of them are zero, all rules will be removed + */ +static int +lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst) +{ + struct lnet_drop_rule *rule; + struct lnet_drop_rule *tmp; + struct list_head zombies; + int n = 0; + + INIT_LIST_HEAD(&zombies); + + lnet_net_lock(LNET_LOCK_EX); + list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) { + if (rule->dr_attr.fa_src != src && src) + continue; + + if (rule->dr_attr.fa_dst != dst && dst) + continue; + + list_move(&rule->dr_link, &zombies); + } + lnet_net_unlock(LNET_LOCK_EX); + + list_for_each_entry_safe(rule, tmp, &zombies, dr_link) { + CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n", + libcfs_nid2str(rule->dr_attr.fa_src), + libcfs_nid2str(rule->dr_attr.fa_dst), + rule->dr_attr.u.drop.da_rate, + rule->dr_attr.u.drop.da_interval); + + list_del(&rule->dr_link); + CFS_FREE_PTR(rule); + n++; + } + + return n; +} + +/** + * List drop rule at position of \a pos + */ +static int +lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr, + struct lnet_fault_stat *stat) +{ + struct lnet_drop_rule *rule; + int cpt; + int i = 0; + int rc = -ENOENT; + + cpt = lnet_net_lock_current(); + list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { + if (i++ < pos) + continue; + + spin_lock(&rule->dr_lock); + *attr = rule->dr_attr; + *stat = rule->dr_stat; + spin_unlock(&rule->dr_lock); + rc = 0; + break; + } + + lnet_net_unlock(cpt); + return rc; +} + +/** + * reset counters for all drop rules + */ +static void +lnet_drop_rule_reset(void) +{ + struct lnet_drop_rule *rule; + int cpt; + + cpt = lnet_net_lock_current(); + + list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { + struct lnet_fault_attr *attr = &rule->dr_attr; + + spin_lock(&rule->dr_lock); + + memset(&rule->dr_stat, 0, sizeof(rule->dr_stat)); + if (attr->u.drop.da_rate) { + rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate; + } else { + rule->dr_drop_time = cfs_time_shift(cfs_rand() % + attr->u.drop.da_interval); + rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval); + } + spin_unlock(&rule->dr_lock); + } + + lnet_net_unlock(cpt); +} + +/** + * check source/destination NID, portal, message type and drop rate, + * decide whether should drop this message or not + */ +static bool +drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src, + lnet_nid_t dst, unsigned int type, unsigned int portal) +{ + struct lnet_fault_attr *attr = &rule->dr_attr; + bool drop; + + if (!lnet_fault_attr_match(attr, src, dst, type, portal)) + return false; + + /* match this rule, check drop rate now */ + spin_lock(&rule->dr_lock); + if (rule->dr_drop_time) { /* time based drop */ + unsigned long now = cfs_time_current(); + + rule->dr_stat.fs_count++; + drop = cfs_time_aftereq(now, rule->dr_drop_time); + if (drop) { + if (cfs_time_after(now, rule->dr_time_base)) + rule->dr_time_base = now; + + rule->dr_drop_time = rule->dr_time_base + + cfs_time_seconds(cfs_rand() % + attr->u.drop.da_interval); + rule->dr_time_base += cfs_time_seconds(attr->u.drop.da_interval); + + CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), + rule->dr_drop_time); + } + + } else { /* rate based drop */ + drop = rule->dr_stat.fs_count++ == rule->dr_drop_at; + + if (!do_div(rule->dr_stat.fs_count, attr->u.drop.da_rate)) { + rule->dr_drop_at = rule->dr_stat.fs_count + + cfs_rand() % attr->u.drop.da_rate; + CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), rule->dr_drop_at); + } + } + + if (drop) { /* drop this message, update counters */ + lnet_fault_stat_inc(&rule->dr_stat, type); + rule->dr_stat.u.drop.ds_dropped++; + } + + spin_unlock(&rule->dr_lock); + return drop; +} + +/** + * Check if message from \a src to \a dst can match any existed drop rule + */ +bool +lnet_drop_rule_match(lnet_hdr_t *hdr) +{ + struct lnet_drop_rule *rule; + lnet_nid_t src = le64_to_cpu(hdr->src_nid); + lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); + unsigned int typ = le32_to_cpu(hdr->type); + unsigned int ptl = -1; + bool drop = false; + int cpt; + + /** + * NB: if Portal is specified, then only PUT and GET will be + * filtered by drop rule + */ + if (typ == LNET_MSG_PUT) + ptl = le32_to_cpu(hdr->msg.put.ptl_index); + else if (typ == LNET_MSG_GET) + ptl = le32_to_cpu(hdr->msg.get.ptl_index); + + cpt = lnet_net_lock_current(); + list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) { + drop = drop_rule_match(rule, src, dst, typ, ptl); + if (drop) + break; + } + + lnet_net_unlock(cpt); + return drop; +} + +/** + * LNet Delay Simulation + */ +/** timestamp (second) to send delayed message */ +#define msg_delay_send msg_ev.hdr_data + +struct lnet_delay_rule { + /** link chain on the_lnet.ln_delay_rules */ + struct list_head dl_link; + /** link chain on delay_dd.dd_sched_rules */ + struct list_head dl_sched_link; + /** attributes of this rule */ + struct lnet_fault_attr dl_attr; + /** lock to protect \a below members */ + spinlock_t dl_lock; + /** refcount of delay rule */ + atomic_t dl_refcount; + /** + * the message sequence to delay, which means message is delayed when + * dl_stat.fs_count == dl_delay_at + */ + unsigned long dl_delay_at; + /** + * seconds to delay the next message, it's exclusive with dl_delay_at + */ + unsigned long dl_delay_time; + /** baseline to caculate dl_delay_time */ + unsigned long dl_time_base; + /** jiffies to send the next delayed message */ + unsigned long dl_msg_send; + /** delayed message list */ + struct list_head dl_msg_list; + /** statistic of delayed messages */ + struct lnet_fault_stat dl_stat; + /** timer to wakeup delay_daemon */ + struct timer_list dl_timer; +}; + +struct delay_daemon_data { + /** serialise rule add/remove */ + struct mutex dd_mutex; + /** protect rules on \a dd_sched_rules */ + spinlock_t dd_lock; + /** scheduled delay rules (by timer) */ + struct list_head dd_sched_rules; + /** daemon thread sleeps at here */ + wait_queue_head_t dd_waitq; + /** controller (lctl command) wait at here */ + wait_queue_head_t dd_ctl_waitq; + /** daemon is running */ + unsigned int dd_running; + /** daemon stopped */ + unsigned int dd_stopped; +}; + +static struct delay_daemon_data delay_dd; + +static unsigned long +round_timeout(unsigned long timeout) +{ + return cfs_time_seconds((unsigned int) + cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1); +} + +static void +delay_rule_decref(struct lnet_delay_rule *rule) +{ + if (atomic_dec_and_test(&rule->dl_refcount)) { + LASSERT(list_empty(&rule->dl_sched_link)); + LASSERT(list_empty(&rule->dl_msg_list)); + LASSERT(list_empty(&rule->dl_link)); + + CFS_FREE_PTR(rule); + } +} + +/** + * check source/destination NID, portal, message type and delay rate, + * decide whether should delay this message or not + */ +static bool +delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src, + lnet_nid_t dst, unsigned int type, unsigned int portal, + struct lnet_msg *msg) +{ + struct lnet_fault_attr *attr = &rule->dl_attr; + bool delay; + + if (!lnet_fault_attr_match(attr, src, dst, type, portal)) + return false; + + /* match this rule, check delay rate now */ + spin_lock(&rule->dl_lock); + if (rule->dl_delay_time) { /* time based delay */ + unsigned long now = cfs_time_current(); + + rule->dl_stat.fs_count++; + delay = cfs_time_aftereq(now, rule->dl_delay_time); + if (delay) { + if (cfs_time_after(now, rule->dl_time_base)) + rule->dl_time_base = now; + + rule->dl_delay_time = rule->dl_time_base + + cfs_time_seconds(cfs_rand() % + attr->u.delay.la_interval); + rule->dl_time_base += cfs_time_seconds(attr->u.delay.la_interval); + + CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), + rule->dl_delay_time); + } + + } else { /* rate based delay */ + delay = rule->dl_stat.fs_count++ == rule->dl_delay_at; + /* generate the next random rate sequence */ + if (!do_div(rule->dl_stat.fs_count, attr->u.delay.la_rate)) { + rule->dl_delay_at = rule->dl_stat.fs_count + + cfs_rand() % attr->u.delay.la_rate; + CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n", + libcfs_nid2str(attr->fa_src), + libcfs_nid2str(attr->fa_dst), rule->dl_delay_at); + } + } + + if (!delay) { + spin_unlock(&rule->dl_lock); + return false; + } + + /* delay this message, update counters */ + lnet_fault_stat_inc(&rule->dl_stat, type); + rule->dl_stat.u.delay.ls_delayed++; + + list_add_tail(&msg->msg_list, &rule->dl_msg_list); + msg->msg_delay_send = round_timeout( + cfs_time_shift(attr->u.delay.la_latency)); + if (rule->dl_msg_send == -1) { + rule->dl_msg_send = msg->msg_delay_send; + mod_timer(&rule->dl_timer, rule->dl_msg_send); + } + + spin_unlock(&rule->dl_lock); + return true; +} + +/** + * check if \a msg can match any Delay Rule, receiving of this message + * will be delayed if there is a match. + */ +bool +lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg) +{ + struct lnet_delay_rule *rule; + lnet_nid_t src = le64_to_cpu(hdr->src_nid); + lnet_nid_t dst = le64_to_cpu(hdr->dest_nid); + unsigned int typ = le32_to_cpu(hdr->type); + unsigned int ptl = -1; + + /* NB: called with hold of lnet_net_lock */ + + /** + * NB: if Portal is specified, then only PUT and GET will be + * filtered by delay rule + */ + if (typ == LNET_MSG_PUT) + ptl = le32_to_cpu(hdr->msg.put.ptl_index); + else if (typ == LNET_MSG_GET) + ptl = le32_to_cpu(hdr->msg.get.ptl_index); + + list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { + if (delay_rule_match(rule, src, dst, typ, ptl, msg)) + return true; + } + + return false; +} + +/** check out delayed messages for send */ +static void +delayed_msg_check(struct lnet_delay_rule *rule, bool all, + struct list_head *msg_list) +{ + struct lnet_msg *msg; + struct lnet_msg *tmp; + unsigned long now = cfs_time_current(); + + if (!all && rule->dl_msg_send > now) + return; + + spin_lock(&rule->dl_lock); + list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) { + if (!all && msg->msg_delay_send > now) + break; + + msg->msg_delay_send = 0; + list_move_tail(&msg->msg_list, msg_list); + } + + if (list_empty(&rule->dl_msg_list)) { + del_timer(&rule->dl_timer); + rule->dl_msg_send = -1; + + } else if (!list_empty(msg_list)) { + /* + * dequeued some timedout messages, update timer for the + * next delayed message on rule + */ + msg = list_entry(rule->dl_msg_list.next, + struct lnet_msg, msg_list); + rule->dl_msg_send = msg->msg_delay_send; + mod_timer(&rule->dl_timer, rule->dl_msg_send); + } + spin_unlock(&rule->dl_lock); +} + +static void +delayed_msg_process(struct list_head *msg_list, bool drop) +{ + struct lnet_msg *msg; + + while (!list_empty(msg_list)) { + struct lnet_ni *ni; + int cpt; + int rc; + + msg = list_entry(msg_list->next, struct lnet_msg, msg_list); + LASSERT(msg->msg_rxpeer); + + ni = msg->msg_rxpeer->lp_ni; + cpt = msg->msg_rx_cpt; + + list_del_init(&msg->msg_list); + if (drop) { + rc = -ECANCELED; + + } else if (!msg->msg_routing) { + rc = lnet_parse_local(ni, msg); + if (!rc) + continue; + + } else { + lnet_net_lock(cpt); + rc = lnet_parse_forward_locked(ni, msg); + lnet_net_unlock(cpt); + + switch (rc) { + case LNET_CREDIT_OK: + lnet_ni_recv(ni, msg->msg_private, msg, 0, + 0, msg->msg_len, msg->msg_len); + case LNET_CREDIT_WAIT: + continue; + default: /* failures */ + break; + } + } + + lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len); + lnet_finalize(ni, msg, rc); + } +} + +/** + * Process delayed messages for scheduled rules + * This function can either be called by delay_rule_daemon, or by lnet_finalise + */ +void +lnet_delay_rule_check(void) +{ + struct lnet_delay_rule *rule; + struct list_head msgs; + + INIT_LIST_HEAD(&msgs); + while (1) { + if (list_empty(&delay_dd.dd_sched_rules)) + break; + + spin_lock_bh(&delay_dd.dd_lock); + if (list_empty(&delay_dd.dd_sched_rules)) { + spin_unlock_bh(&delay_dd.dd_lock); + break; + } + + rule = list_entry(delay_dd.dd_sched_rules.next, + struct lnet_delay_rule, dl_sched_link); + list_del_init(&rule->dl_sched_link); + spin_unlock_bh(&delay_dd.dd_lock); + + delayed_msg_check(rule, false, &msgs); + delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */ + } + + if (!list_empty(&msgs)) + delayed_msg_process(&msgs, false); +} + +/** daemon thread to handle delayed messages */ +static int +lnet_delay_rule_daemon(void *arg) +{ + delay_dd.dd_running = 1; + wake_up(&delay_dd.dd_ctl_waitq); + + while (delay_dd.dd_running) { + wait_event_interruptible(delay_dd.dd_waitq, + !delay_dd.dd_running || + !list_empty(&delay_dd.dd_sched_rules)); + lnet_delay_rule_check(); + } + + /* in case more rules have been enqueued after my last check */ + lnet_delay_rule_check(); + delay_dd.dd_stopped = 1; + wake_up(&delay_dd.dd_ctl_waitq); + + return 0; +} + +static void +delay_timer_cb(unsigned long arg) +{ + struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg; + + spin_lock_bh(&delay_dd.dd_lock); + if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) { + atomic_inc(&rule->dl_refcount); + list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules); + wake_up(&delay_dd.dd_waitq); + } + spin_unlock_bh(&delay_dd.dd_lock); +} + +/** + * Add a new delay rule to LNet + * There is no check for duplicated delay rule, all rules will be checked for + * incoming message. + */ +int +lnet_delay_rule_add(struct lnet_fault_attr *attr) +{ + struct lnet_delay_rule *rule; + int rc = 0; + + if (attr->u.delay.la_rate & attr->u.delay.la_interval) { + CDEBUG(D_NET, "please provide either delay rate or delay interval, but not both at the same time %d/%d\n", + attr->u.delay.la_rate, attr->u.delay.la_interval); + return -EINVAL; + } + + if (!attr->u.delay.la_latency) { + CDEBUG(D_NET, "delay latency cannot be zero\n"); + return -EINVAL; + } + + if (lnet_fault_attr_validate(attr)) + return -EINVAL; + + CFS_ALLOC_PTR(rule); + if (!rule) + return -ENOMEM; + + mutex_lock(&delay_dd.dd_mutex); + if (!delay_dd.dd_running) { + struct task_struct *task; + + /** + * NB: although LND threads will process delayed message + * in lnet_finalize, but there is no guarantee that LND + * threads will be waken up if no other message needs to + * be handled. + * Only one daemon thread, performance is not the concern + * of this simualation module. + */ + task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + goto failed; + } + wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running); + } + + init_timer(&rule->dl_timer); + rule->dl_timer.function = delay_timer_cb; + rule->dl_timer.data = (unsigned long)rule; + + spin_lock_init(&rule->dl_lock); + INIT_LIST_HEAD(&rule->dl_msg_list); + INIT_LIST_HEAD(&rule->dl_sched_link); + + rule->dl_attr = *attr; + if (attr->u.delay.la_interval) { + rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval); + rule->dl_delay_time = cfs_time_shift(cfs_rand() % + attr->u.delay.la_interval); + } else { + rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; + } + + rule->dl_msg_send = -1; + + lnet_net_lock(LNET_LOCK_EX); + atomic_set(&rule->dl_refcount, 1); + list_add(&rule->dl_link, &the_lnet.ln_delay_rules); + lnet_net_unlock(LNET_LOCK_EX); + + CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n", + libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src), + attr->u.delay.la_rate); + + mutex_unlock(&delay_dd.dd_mutex); + return 0; +failed: + mutex_unlock(&delay_dd.dd_mutex); + CFS_FREE_PTR(rule); + return rc; +} + +/** + * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src + * and \a dst are zero, all rules will be removed, otherwise only matched rules + * will be removed. + * If \a src is zero, then all rules have \a dst as destination will be remove + * If \a dst is zero, then all rules have \a src as source will be removed + * + * When a delay rule is removed, all delayed messages of this rule will be + * processed immediately. + */ +int +lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown) +{ + struct lnet_delay_rule *rule; + struct lnet_delay_rule *tmp; + struct list_head rule_list; + struct list_head msg_list; + int n = 0; + bool cleanup; + + INIT_LIST_HEAD(&rule_list); + INIT_LIST_HEAD(&msg_list); + + if (shutdown) { + src = 0; + dst = 0; + } + + mutex_lock(&delay_dd.dd_mutex); + lnet_net_lock(LNET_LOCK_EX); + + list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) { + if (rule->dl_attr.fa_src != src && src) + continue; + + if (rule->dl_attr.fa_dst != dst && dst) + continue; + + CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n", + libcfs_nid2str(rule->dl_attr.fa_src), + libcfs_nid2str(rule->dl_attr.fa_dst), + rule->dl_attr.u.delay.la_rate, + rule->dl_attr.u.delay.la_interval); + /* refcount is taken over by rule_list */ + list_move(&rule->dl_link, &rule_list); + } + + /* check if we need to shutdown delay_daemon */ + cleanup = list_empty(&the_lnet.ln_delay_rules) && + !list_empty(&rule_list); + lnet_net_unlock(LNET_LOCK_EX); + + list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) { + list_del_init(&rule->dl_link); + + del_timer_sync(&rule->dl_timer); + delayed_msg_check(rule, true, &msg_list); + delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */ + n++; + } + + if (cleanup) { /* no more delay rule, shutdown delay_daemon */ + LASSERT(delay_dd.dd_running); + delay_dd.dd_running = 0; + wake_up(&delay_dd.dd_waitq); + + while (!delay_dd.dd_stopped) + wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped); + } + mutex_unlock(&delay_dd.dd_mutex); + + if (!list_empty(&msg_list)) + delayed_msg_process(&msg_list, shutdown); + + return n; +} + +/** + * List Delay Rule at position of \a pos + */ +int +lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr, + struct lnet_fault_stat *stat) +{ + struct lnet_delay_rule *rule; + int cpt; + int i = 0; + int rc = -ENOENT; + + cpt = lnet_net_lock_current(); + list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { + if (i++ < pos) + continue; + + spin_lock(&rule->dl_lock); + *attr = rule->dl_attr; + *stat = rule->dl_stat; + spin_unlock(&rule->dl_lock); + rc = 0; + break; + } + + lnet_net_unlock(cpt); + return rc; +} + +/** + * reset counters for all Delay Rules + */ +void +lnet_delay_rule_reset(void) +{ + struct lnet_delay_rule *rule; + int cpt; + + cpt = lnet_net_lock_current(); + + list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) { + struct lnet_fault_attr *attr = &rule->dl_attr; + + spin_lock(&rule->dl_lock); + + memset(&rule->dl_stat, 0, sizeof(rule->dl_stat)); + if (attr->u.delay.la_rate) { + rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate; + } else { + rule->dl_delay_time = cfs_time_shift(cfs_rand() % + attr->u.delay.la_interval); + rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval); + } + spin_unlock(&rule->dl_lock); + } + + lnet_net_unlock(cpt); +} + +int +lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data) +{ + struct lnet_fault_attr *attr; + struct lnet_fault_stat *stat; + + attr = (struct lnet_fault_attr *)data->ioc_inlbuf1; + + switch (opc) { + default: + return -EINVAL; + + case LNET_CTL_DROP_ADD: + if (!attr) + return -EINVAL; + + return lnet_drop_rule_add(attr); + + case LNET_CTL_DROP_DEL: + if (!attr) + return -EINVAL; + + data->ioc_count = lnet_drop_rule_del(attr->fa_src, + attr->fa_dst); + return 0; + + case LNET_CTL_DROP_RESET: + lnet_drop_rule_reset(); + return 0; + + case LNET_CTL_DROP_LIST: + stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; + if (!attr || !stat) + return -EINVAL; + + return lnet_drop_rule_list(data->ioc_count, attr, stat); + + case LNET_CTL_DELAY_ADD: + if (!attr) + return -EINVAL; + + return lnet_delay_rule_add(attr); + + case LNET_CTL_DELAY_DEL: + if (!attr) + return -EINVAL; + + data->ioc_count = lnet_delay_rule_del(attr->fa_src, + attr->fa_dst, false); + return 0; + + case LNET_CTL_DELAY_RESET: + lnet_delay_rule_reset(); + return 0; + + case LNET_CTL_DELAY_LIST: + stat = (struct lnet_fault_stat *)data->ioc_inlbuf2; + if (!attr || !stat) + return -EINVAL; + + return lnet_delay_rule_list(data->ioc_count, attr, stat); + } +} + +int +lnet_fault_init(void) +{ + CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT); + CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK); + CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET); + CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY); + + mutex_init(&delay_dd.dd_mutex); + spin_lock_init(&delay_dd.dd_lock); + init_waitqueue_head(&delay_dd.dd_waitq); + init_waitqueue_head(&delay_dd.dd_ctl_waitq); + INIT_LIST_HEAD(&delay_dd.dd_sched_rules); + + return 0; +} + +void +lnet_fault_fini(void) +{ + lnet_drop_rule_del(0, 0); + lnet_delay_rule_del(0, 0, true); + + LASSERT(list_empty(&the_lnet.ln_drop_rules)); + LASSERT(list_empty(&the_lnet.ln_delay_rules)); + LASSERT(list_empty(&delay_dd.dd_sched_rules)); +} diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c index 80f585afa..ebf468fbc 100644 --- a/drivers/staging/lustre/lnet/lnet/nidstrings.c +++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c @@ -170,7 +170,7 @@ parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange) } LIBCFS_ALLOC(addrrange, sizeof(struct addrrange)); - if (addrrange == NULL) + if (!addrrange) return -ENOMEM; list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges); INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges); @@ -203,16 +203,18 @@ add_nidrange(const struct cfs_lstr *src, return NULL; nf = libcfs_namenum2netstrfns(src->ls_str); - if (nf == NULL) + if (!nf) return NULL; endlen = src->ls_len - strlen(nf->nf_name); - if (endlen == 0) + if (!endlen) /* network name only, e.g. "elan" or "tcp" */ netnum = 0; else { - /* e.g. "elan25" or "tcp23", refuse to parse if + /* + * e.g. "elan25" or "tcp23", refuse to parse if * network name is not appended with decimal or - * hexadecimal number */ + * hexadecimal number + */ if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name), endlen, &netnum, 0, MAX_NUMERIC_VALUE)) return NULL; @@ -227,7 +229,7 @@ add_nidrange(const struct cfs_lstr *src, } LIBCFS_ALLOC(nr, sizeof(struct nidrange)); - if (nr == NULL) + if (!nr) return NULL; list_add_tail(&nr->nr_link, nidlist); INIT_LIST_HEAD(&nr->nr_addrranges); @@ -253,22 +255,21 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist) struct nidrange *nr; tmp = *src; - if (cfs_gettok(src, '@', &addrrange) == 0) + if (!cfs_gettok(src, '@', &addrrange)) goto failed; - if (cfs_gettok(src, '@', &net) == 0 || src->ls_str != NULL) + if (!cfs_gettok(src, '@', &net) || src->ls_str) goto failed; nr = add_nidrange(&net, nidlist); - if (nr == NULL) + if (!nr) goto failed; - if (parse_addrange(&addrrange, nr) != 0) + if (parse_addrange(&addrrange, nr)) goto failed; return 1; failed: - CWARN("can't parse nidrange: \"%.*s\"\n", tmp.ls_len, tmp.ls_str); return 0; } @@ -342,12 +343,12 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist) INIT_LIST_HEAD(nidlist); while (src.ls_str) { rc = cfs_gettok(&src, ' ', &res); - if (rc == 0) { + if (!rc) { cfs_free_nidlist(nidlist); return 0; } rc = parse_nidrange(&res, nidlist); - if (rc == 0) { + if (!rc) { cfs_free_nidlist(nidlist); return 0; } @@ -378,7 +379,7 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist) return 1; list_for_each_entry(ar, &nr->nr_addrranges, ar_link) if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid), - &ar->ar_numaddr_ranges)) + &ar->ar_numaddr_ranges)) return 1; } return 0; @@ -395,7 +396,7 @@ cfs_print_network(char *buffer, int count, struct nidrange *nr) { struct netstrfns *nf = nr->nr_netstrfns; - if (nr->nr_netnum == 0) + if (!nr->nr_netnum) return scnprintf(buffer, count, "@%s", nf->nf_name); else return scnprintf(buffer, count, "@%s%u", @@ -417,7 +418,7 @@ cfs_print_addrranges(char *buffer, int count, struct list_head *addrranges, struct netstrfns *nf = nr->nr_netstrfns; list_for_each_entry(ar, addrranges, ar_link) { - if (i != 0) + if (i) i += scnprintf(buffer + i, count - i, " "); i += nf->nf_print_addrlist(buffer + i, count - i, &ar->ar_numaddr_ranges); @@ -442,10 +443,10 @@ int cfs_print_nidlist(char *buffer, int count, struct list_head *nidlist) return 0; list_for_each_entry(nr, nidlist, nr_link) { - if (i != 0) + if (i) i += scnprintf(buffer + i, count - i, " "); - if (nr->nr_all != 0) { + if (nr->nr_all) { LASSERT(list_empty(&nr->nr_addrranges)); i += scnprintf(buffer + i, count - i, "*"); i += cfs_print_network(buffer + i, count - i, nr); @@ -487,13 +488,13 @@ static void cfs_ip_ar_min_max(struct addrrange *ar, __u32 *min_nid, tmp_ip_addr = ((min_ip[0] << 24) | (min_ip[1] << 16) | (min_ip[2] << 8) | min_ip[3]); - if (min_nid != NULL) + if (min_nid) *min_nid = tmp_ip_addr; tmp_ip_addr = ((max_ip[0] << 24) | (max_ip[1] << 16) | (max_ip[2] << 8) | max_ip[3]); - if (max_nid != NULL) + if (max_nid) *max_nid = tmp_ip_addr; } @@ -515,16 +516,16 @@ static void cfs_num_ar_min_max(struct addrrange *ar, __u32 *min_nid, list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) { list_for_each_entry(re, &el->el_exprs, re_link) { - if (re->re_lo < min_addr || min_addr == 0) + if (re->re_lo < min_addr || !min_addr) min_addr = re->re_lo; if (re->re_hi > max_addr) max_addr = re->re_hi; } } - if (min_nid != NULL) + if (min_nid) *min_nid = min_addr; - if (max_nid != NULL) + if (max_nid) *max_nid = max_addr; } @@ -546,17 +547,17 @@ bool cfs_nidrange_is_contiguous(struct list_head *nidlist) list_for_each_entry(nr, nidlist, nr_link) { nf = nr->nr_netstrfns; - if (lndname == NULL) + if (!lndname) lndname = nf->nf_name; if (netnum == -1) netnum = nr->nr_netnum; - if (strcmp(lndname, nf->nf_name) != 0 || + if (strcmp(lndname, nf->nf_name) || netnum != nr->nr_netnum) return false; } - if (nf == NULL) + if (!nf) return false; if (!nf->nf_is_contiguous(nidlist)) @@ -590,7 +591,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist) list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { cfs_num_ar_min_max(ar, ¤t_start_nid, ¤t_end_nid); - if (last_end_nid != 0 && + if (last_end_nid && (current_start_nid - last_end_nid != 1)) return false; last_end_nid = current_end_nid; @@ -600,7 +601,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist) re_link) { if (re->re_stride > 1) return false; - else if (last_hi != 0 && + else if (last_hi && re->re_hi - last_hi != 1) return false; last_hi = re->re_hi; @@ -640,7 +641,7 @@ static bool cfs_ip_is_contiguous(struct list_head *nidlist) last_diff = 0; cfs_ip_ar_min_max(ar, ¤t_start_nid, ¤t_end_nid); - if (last_end_nid != 0 && + if (last_end_nid && (current_start_nid - last_end_nid != 1)) return false; last_end_nid = current_end_nid; @@ -724,7 +725,7 @@ static void cfs_num_min_max(struct list_head *nidlist, __u32 *min_nid, list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { cfs_num_ar_min_max(ar, &tmp_min_addr, &tmp_max_addr); - if (tmp_min_addr < min_addr || min_addr == 0) + if (tmp_min_addr < min_addr || !min_addr) min_addr = tmp_min_addr; if (tmp_max_addr > max_addr) max_addr = tmp_min_addr; @@ -756,16 +757,16 @@ static void cfs_ip_min_max(struct list_head *nidlist, __u32 *min_nid, list_for_each_entry(ar, &nr->nr_addrranges, ar_link) { cfs_ip_ar_min_max(ar, &tmp_min_ip_addr, &tmp_max_ip_addr); - if (tmp_min_ip_addr < min_ip_addr || min_ip_addr == 0) + if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr) min_ip_addr = tmp_min_ip_addr; if (tmp_max_ip_addr > max_ip_addr) max_ip_addr = tmp_max_ip_addr; } } - if (min_nid != NULL) + if (min_nid) *min_nid = min_ip_addr; - if (max_nid != NULL) + if (max_nid) *max_nid = max_ip_addr; } @@ -784,12 +785,14 @@ libcfs_ip_addr2str(__u32 addr, char *str, size_t size) (addr >> 8) & 0xff, addr & 0xff); } -/* CAVEAT EMPTOR XscanfX +/* + * CAVEAT EMPTOR XscanfX * I use "%n" at the end of a sscanf format to detect trailing junk. However * sscanf may return immediately if it sees the terminating '0' in a string, so * I initialise the %n variable to the expected length. If sscanf sets it; * fine, if it doesn't, then the scan ended at the end of the string, which is - * fine too :) */ + * fine too :) + */ static int libcfs_ip_str2addr(const char *str, int nob, __u32 *addr) { @@ -802,9 +805,9 @@ libcfs_ip_str2addr(const char *str, int nob, __u32 *addr) /* numeric IP? */ if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 && n == nob && - (a & ~0xff) == 0 && (b & ~0xff) == 0 && - (c & ~0xff) == 0 && (d & ~0xff) == 0) { - *addr = ((a<<24)|(b<<16)|(c<<8)|d); + !(a & ~0xff) && !(b & ~0xff) && + !(c & ~0xff) && !(d & ~0xff)) { + *addr = ((a << 24) | (b << 16) | (c << 8) | d); return 1; } @@ -824,7 +827,7 @@ cfs_ip_addr_parse(char *str, int len, struct list_head *list) src.ls_len = len; i = 0; - while (src.ls_str != NULL) { + while (src.ls_str) { struct cfs_lstr res; if (!cfs_gettok(&src, '.', &res)) { @@ -833,7 +836,7 @@ cfs_ip_addr_parse(char *str, int len, struct list_head *list) } rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el); - if (rc != 0) + if (rc) goto out; list_add_tail(&el->el_link, list); @@ -858,7 +861,7 @@ libcfs_ip_addr_range_print(char *buffer, int count, struct list_head *list) list_for_each_entry(el, list, el_link) { LASSERT(j++ < 4); - if (i != 0) + if (i) i += scnprintf(buffer + i, count - i, "."); i += cfs_expr_list_print(buffer + i, count - i, el); } @@ -928,7 +931,7 @@ libcfs_num_parse(char *str, int len, struct list_head *list) int rc; rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el); - if (rc == 0) + if (!rc) list_add_tail(&el->el_link, list); return rc; @@ -1060,7 +1063,7 @@ libcfs_name2netstrfns(const char *name) int libcfs_isknown_lnd(__u32 lnd) { - return libcfs_lnd2netstrfns(lnd) != NULL; + return !!libcfs_lnd2netstrfns(lnd); } EXPORT_SYMBOL(libcfs_isknown_lnd); @@ -1069,7 +1072,7 @@ libcfs_lnd2modname(__u32 lnd) { struct netstrfns *nf = libcfs_lnd2netstrfns(lnd); - return (nf == NULL) ? NULL : nf->nf_modname; + return nf ? nf->nf_modname : NULL; } EXPORT_SYMBOL(libcfs_lnd2modname); @@ -1078,10 +1081,10 @@ libcfs_str2lnd(const char *str) { struct netstrfns *nf = libcfs_name2netstrfns(str); - if (nf != NULL) + if (nf) return nf->nf_type; - return -1; + return -ENXIO; } EXPORT_SYMBOL(libcfs_str2lnd); @@ -1091,7 +1094,7 @@ libcfs_lnd2str_r(__u32 lnd, char *buf, size_t buf_size) struct netstrfns *nf; nf = libcfs_lnd2netstrfns(lnd); - if (nf == NULL) + if (!nf) snprintf(buf, buf_size, "?%u?", lnd); else snprintf(buf, buf_size, "%s", nf->nf_name); @@ -1108,9 +1111,9 @@ libcfs_net2str_r(__u32 net, char *buf, size_t buf_size) struct netstrfns *nf; nf = libcfs_lnd2netstrfns(lnd); - if (nf == NULL) + if (!nf) snprintf(buf, buf_size, "<%u:%u>", lnd, nnum); - else if (nnum == 0) + else if (!nnum) snprintf(buf, buf_size, "%s", nf->nf_name); else snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum); @@ -1135,14 +1138,14 @@ libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size) } nf = libcfs_lnd2netstrfns(lnd); - if (nf == NULL) + if (!nf) { snprintf(buf, buf_size, "%x@<%u:%u>", addr, lnd, nnum); - else { + } else { size_t addr_len; nf->nf_addr2str(addr, buf, buf_size); addr_len = strlen(buf); - if (nnum == 0) + if (!nnum) snprintf(buf + addr_len, buf_size - addr_len, "@%s", nf->nf_name); else @@ -1195,7 +1198,7 @@ libcfs_str2net(const char *str) { __u32 net; - if (libcfs_str2net_internal(str, &net) != NULL) + if (libcfs_str2net_internal(str, &net)) return net; return LNET_NIDNET(LNET_NID_ANY); @@ -1210,15 +1213,15 @@ libcfs_str2nid(const char *str) __u32 net; __u32 addr; - if (sep != NULL) { + if (sep) { nf = libcfs_str2net_internal(sep + 1, &net); - if (nf == NULL) + if (!nf) return LNET_NID_ANY; } else { sep = str + strlen(str); net = LNET_MKNET(SOCKLND, 0); nf = libcfs_lnd2netstrfns(SOCKLND); - LASSERT(nf != NULL); + LASSERT(nf); } if (!nf->nf_str2addr(str, (int)(sep - str), &addr)) @@ -1240,8 +1243,8 @@ libcfs_id2str(lnet_process_id_t id) } snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s", - ((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "", - (id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid)); + id.pid & LNET_PID_USERFLAG ? "U" : "", + id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid)); return str; } EXPORT_SYMBOL(libcfs_id2str); diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c index 1fceed3c8..b026feebc 100644 --- a/drivers/staging/lustre/lnet/lnet/peer.c +++ b/drivers/staging/lustre/lnet/lnet/peer.c @@ -39,6 +39,7 @@ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" +#include "../../include/linux/lnet/lib-dlc.h" int lnet_peer_tables_create(void) @@ -50,7 +51,7 @@ lnet_peer_tables_create(void) the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*ptable)); - if (the_lnet.ln_peer_tables == NULL) { + if (!the_lnet.ln_peer_tables) { CERROR("Failed to allocate cpu-partition peer tables\n"); return -ENOMEM; } @@ -60,7 +61,7 @@ lnet_peer_tables_create(void) LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i, LNET_PEER_HASH_SIZE * sizeof(*hash)); - if (hash == NULL) { + if (!hash) { CERROR("Failed to create peer hash table\n"); lnet_peer_tables_destroy(); return -ENOMEM; @@ -82,12 +83,12 @@ lnet_peer_tables_destroy(void) int i; int j; - if (the_lnet.ln_peer_tables == NULL) + if (!the_lnet.ln_peer_tables) return; cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { hash = ptable->pt_hash; - if (hash == NULL) /* not initialized */ + if (!hash) /* not initialized */ break; LASSERT(list_empty(&ptable->pt_deathrow)); @@ -103,62 +104,116 @@ lnet_peer_tables_destroy(void) the_lnet.ln_peer_tables = NULL; } +static void +lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable) +{ + int i; + lnet_peer_t *lp; + lnet_peer_t *tmp; + + for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { + list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], + lp_hashlist) { + if (ni && ni != lp->lp_ni) + continue; + list_del_init(&lp->lp_hashlist); + /* Lose hash table's ref */ + ptable->pt_zombies++; + lnet_peer_decref_locked(lp); + } + } +} + +static void +lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable, + int cpt_locked) +{ + int i; + + for (i = 3; ptable->pt_zombies; i++) { + lnet_net_unlock(cpt_locked); + + if (is_power_of_2(i)) { + CDEBUG(D_WARNING, + "Waiting for %d zombies on peer table\n", + ptable->pt_zombies); + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1) >> 1); + lnet_net_lock(cpt_locked); + } +} + +static void +lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable, + int cpt_locked) +{ + lnet_peer_t *lp; + lnet_peer_t *tmp; + lnet_nid_t lp_nid; + int i; + + for (i = 0; i < LNET_PEER_HASH_SIZE; i++) { + list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i], + lp_hashlist) { + if (ni != lp->lp_ni) + continue; + + if (!lp->lp_rtr_refcount) + continue; + + lp_nid = lp->lp_nid; + + lnet_net_unlock(cpt_locked); + lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid); + lnet_net_lock(cpt_locked); + } + } +} + void -lnet_peer_tables_cleanup(void) +lnet_peer_tables_cleanup(lnet_ni_t *ni) { struct lnet_peer_table *ptable; + struct list_head deathrow; + lnet_peer_t *lp; + lnet_peer_t *temp; int i; - int j; - LASSERT(the_lnet.ln_shutdown); /* i.e. no new peers */ + INIT_LIST_HEAD(&deathrow); + LASSERT(the_lnet.ln_shutdown || ni); + /* + * If just deleting the peers for a NI, get rid of any routes these + * peers are gateways for. + */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { lnet_net_lock(i); - - for (j = 0; j < LNET_PEER_HASH_SIZE; j++) { - struct list_head *peers = &ptable->pt_hash[j]; - - while (!list_empty(peers)) { - lnet_peer_t *lp = list_entry(peers->next, - lnet_peer_t, - lp_hashlist); - list_del_init(&lp->lp_hashlist); - /* lose hash table's ref */ - lnet_peer_decref_locked(lp); - } - } - + lnet_peer_table_del_rtrs_locked(ni, ptable, i); lnet_net_unlock(i); } + /* + * Start the process of moving the applicable peers to + * deathrow. + */ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { - LIST_HEAD(deathrow); - lnet_peer_t *lp; - lnet_net_lock(i); + lnet_peer_table_cleanup_locked(ni, ptable); + lnet_net_unlock(i); + } - for (j = 3; ptable->pt_number != 0; j++) { - lnet_net_unlock(i); - - if ((j & (j - 1)) == 0) { - CDEBUG(D_WARNING, - "Waiting for %d peers on peer table\n", - ptable->pt_number); - } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 2); - lnet_net_lock(i); - } + /* Cleanup all entries on deathrow. */ + cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { + lnet_net_lock(i); + lnet_peer_table_deathrow_wait_locked(ptable, i); list_splice_init(&ptable->pt_deathrow, &deathrow); - lnet_net_unlock(i); + } - while (!list_empty(&deathrow)) { - lp = list_entry(deathrow.next, - lnet_peer_t, lp_hashlist); - list_del(&lp->lp_hashlist); - LIBCFS_FREE(lp, sizeof(*lp)); - } + list_for_each_entry_safe(lp, temp, &deathrow, lp_hashlist) { + list_del(&lp->lp_hashlist); + LIBCFS_FREE(lp, sizeof(*lp)); } } @@ -167,11 +222,11 @@ lnet_destroy_peer_locked(lnet_peer_t *lp) { struct lnet_peer_table *ptable; - LASSERT(lp->lp_refcount == 0); - LASSERT(lp->lp_rtr_refcount == 0); + LASSERT(!lp->lp_refcount); + LASSERT(!lp->lp_rtr_refcount); LASSERT(list_empty(&lp->lp_txq)); LASSERT(list_empty(&lp->lp_hashlist)); - LASSERT(lp->lp_txqnob == 0); + LASSERT(!lp->lp_txqnob); ptable = the_lnet.ln_peer_tables[lp->lp_cpt]; LASSERT(ptable->pt_number > 0); @@ -181,6 +236,8 @@ lnet_destroy_peer_locked(lnet_peer_t *lp) lp->lp_ni = NULL; list_add(&lp->lp_hashlist, &ptable->pt_deathrow); + LASSERT(ptable->pt_zombies > 0); + ptable->pt_zombies--; } lnet_peer_t * @@ -220,14 +277,14 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) ptable = the_lnet.ln_peer_tables[cpt2]; lp = lnet_find_peer_locked(ptable, nid); - if (lp != NULL) { + if (lp) { *lpp = lp; return 0; } if (!list_empty(&ptable->pt_deathrow)) { lp = list_entry(ptable->pt_deathrow.next, - lnet_peer_t, lp_hashlist); + lnet_peer_t, lp_hashlist); list_del(&lp->lp_hashlist); } @@ -238,12 +295,12 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) ptable->pt_number++; lnet_net_unlock(cpt); - if (lp != NULL) + if (lp) memset(lp, 0, sizeof(*lp)); else LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp)); - if (lp == NULL) { + if (!lp) { rc = -ENOMEM; lnet_net_lock(cpt); goto out; @@ -276,30 +333,30 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) } lp2 = lnet_find_peer_locked(ptable, nid); - if (lp2 != NULL) { + if (lp2) { *lpp = lp2; goto out; } lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2); - if (lp->lp_ni == NULL) { + if (!lp->lp_ni) { rc = -EHOSTUNREACH; goto out; } - lp->lp_txcredits = - lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits; - lp->lp_rtrcredits = + lp->lp_txcredits = lp->lp_ni->ni_peertxcredits; + lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits; + lp->lp_rtrcredits = lnet_peer_buffer_credits(lp->lp_ni); lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni); list_add_tail(&lp->lp_hashlist, - &ptable->pt_hash[lnet_nid2peerhash(nid)]); + &ptable->pt_hash[lnet_nid2peerhash(nid)]); ptable->pt_version++; *lpp = lp; return 0; out: - if (lp != NULL) + if (lp) list_add(&lp->lp_hashlist, &ptable->pt_deathrow); ptable->pt_number--; return rc; @@ -317,7 +374,7 @@ lnet_debug_peer(lnet_nid_t nid) lnet_net_lock(cpt); rc = lnet_nid2peer_locked(&lp, nid, cpt); - if (rc != 0) { + if (rc) { lnet_net_unlock(cpt); CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid)); return; @@ -336,3 +393,65 @@ lnet_debug_peer(lnet_nid_t nid) lnet_net_unlock(cpt); } + +int +lnet_get_peer_info(__u32 peer_index, __u64 *nid, + char aliveness[LNET_MAX_STR_LEN], + __u32 *cpt_iter, __u32 *refcount, + __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits, + __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits, + __u32 *peer_tx_qnob) +{ + struct lnet_peer_table *peer_table; + lnet_peer_t *lp; + bool found = false; + int lncpt, j; + + /* get the number of CPTs */ + lncpt = cfs_percpt_number(the_lnet.ln_peer_tables); + + /* + * if the cpt number to be examined is >= the number of cpts in + * the system then indicate that there are no more cpts to examin + */ + if (*cpt_iter >= lncpt) + return -ENOENT; + + /* get the current table */ + peer_table = the_lnet.ln_peer_tables[*cpt_iter]; + /* if the ptable is NULL then there are no more cpts to examine */ + if (!peer_table) + return -ENOENT; + + lnet_net_lock(*cpt_iter); + + for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) { + struct list_head *peers = &peer_table->pt_hash[j]; + + list_for_each_entry(lp, peers, lp_hashlist) { + if (peer_index-- > 0) + continue; + + snprintf(aliveness, LNET_MAX_STR_LEN, "NA"); + if (lnet_isrouter(lp) || + lnet_peer_aliveness_enabled(lp)) + snprintf(aliveness, LNET_MAX_STR_LEN, + lp->lp_alive ? "up" : "down"); + + *nid = lp->lp_nid; + *refcount = lp->lp_refcount; + *ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits; + *peer_tx_credits = lp->lp_txcredits; + *peer_rtr_credits = lp->lp_rtrcredits; + *peer_min_rtr_credits = lp->lp_mintxcredits; + *peer_tx_qnob = lp->lp_txqnob; + + found = true; + } + } + lnet_net_unlock(*cpt_iter); + + *cpt_iter = lncpt; + + return found ? 0 : -ENOENT; +} diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c index f5faa414d..b01dc424c 100644 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ b/drivers/staging/lustre/lnet/lnet/router.c @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #define DEBUG_SUBSYSTEM S_LNET @@ -28,8 +24,11 @@ #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4) #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */ #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4) +#define LNET_NRB_SMALL_PAGES 1 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) +#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \ + PAGE_SHIFT) static char *forwarding = ""; module_param(forwarding, charp, 0444); @@ -61,8 +60,10 @@ lnet_peer_buffer_credits(lnet_ni_t *ni) if (peer_buffer_credits > 0) return peer_buffer_credits; - /* As an approximation, allow this peer the same number of router - * buffers as it is allowed outstanding sends */ + /* + * As an approximation, allow this peer the same number of router + * buffers as it is allowed outstanding sends + */ return ni->ni_peertxcredits; } @@ -107,7 +108,7 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, lp->lp_timestamp = when; /* update timestamp */ lp->lp_ping_deadline = 0; /* disable ping timeout */ - if (lp->lp_alive_count != 0 && /* got old news */ + if (lp->lp_alive_count && /* got old news */ (!lp->lp_alive) == (!alive)) { /* new date for old news */ CDEBUG(D_NET, "Old news\n"); return; @@ -131,11 +132,12 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp) int alive; int notifylnd; - /* Notify only in 1 thread at any time to ensure ordered notification. + /* + * Notify only in 1 thread at any time to ensure ordered notification. * NB individual events can be missed; the only guarantee is that you - * always get the most recent news */ - - if (lp->lp_notifying || ni == NULL) + * always get the most recent news + */ + if (lp->lp_notifying || !ni) return; lp->lp_notifying = 1; @@ -147,13 +149,14 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp) lp->lp_notifylnd = 0; lp->lp_notify = 0; - if (notifylnd && ni->ni_lnd->lnd_notify != NULL) { + if (notifylnd && ni->ni_lnd->lnd_notify) { lnet_net_unlock(lp->lp_cpt); - /* A new notification could happen now; I'll handle it - * when control returns to me */ - - (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive); + /* + * A new notification could happen now; I'll handle it + * when control returns to me + */ + ni->ni_lnd->lnd_notify(ni, lp->lp_nid, alive); lnet_net_lock(lp->lp_cpt); } @@ -176,7 +179,7 @@ lnet_rtr_addref_locked(lnet_peer_t *lp) /* a simple insertion sort */ list_for_each_prev(pos, &the_lnet.ln_routers) { lnet_peer_t *rtr = list_entry(pos, lnet_peer_t, - lp_rtr_list); + lp_rtr_list); if (rtr->lp_nid < lp->lp_nid) break; @@ -197,12 +200,12 @@ lnet_rtr_decref_locked(lnet_peer_t *lp) /* lnet_net_lock must be exclusively locked */ lp->lp_rtr_refcount--; - if (lp->lp_rtr_refcount == 0) { + if (!lp->lp_rtr_refcount) { LASSERT(list_empty(&lp->lp_routes)); - if (lp->lp_rcd != NULL) { + if (lp->lp_rcd) { list_add(&lp->lp_rcd->rcd_list, - &the_lnet.ln_rcd_deathrow); + &the_lnet.ln_rcd_deathrow); lp->lp_rcd = NULL; } @@ -245,8 +248,10 @@ static void lnet_shuffle_seed(void) cfs_get_random_bytes(seed, sizeof(seed)); - /* Nodes with small feet have little entropy - * the NID for this node gives the most entropy in the low bits */ + /* + * Nodes with small feet have little entropy + * the NID for this node gives the most entropy in the low bits + */ list_for_each(tmp, &the_lnet.ln_nis) { ni = list_entry(tmp, lnet_ni_t, ni_list); lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid)); @@ -277,7 +282,7 @@ lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route) /* len+1 positions to add a new entry, also prevents division by 0 */ offset = cfs_rand() % (len + 1); list_for_each(e, &rnet->lrn_routes) { - if (offset == 0) + if (!offset) break; offset--; } @@ -289,7 +294,7 @@ lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route) } int -lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, +lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway, unsigned int priority) { struct list_head *e; @@ -300,7 +305,7 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, int add_route; int rc; - CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n", + CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n", libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway)); if (gateway == LNET_NID_ANY || @@ -308,21 +313,21 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, net == LNET_NIDNET(LNET_NID_ANY) || LNET_NETTYP(net) == LOLND || LNET_NIDNET(gateway) == net || - hops < 1 || hops > 255) + (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255))) return -EINVAL; if (lnet_islocalnet(net)) /* it's a local network */ - return 0; /* ignore the route entry */ + return -EEXIST; /* Assume net, route, all new */ LIBCFS_ALLOC(route, sizeof(*route)); LIBCFS_ALLOC(rnet, sizeof(*rnet)); - if (route == NULL || rnet == NULL) { + if (!route || !rnet) { CERROR("Out of memory creating route %s %d %s\n", libcfs_net2str(net), hops, libcfs_nid2str(gateway)); - if (route != NULL) + if (route) LIBCFS_FREE(route, sizeof(*route)); - if (rnet != NULL) + if (rnet) LIBCFS_FREE(rnet, sizeof(*rnet)); return -ENOMEM; } @@ -336,25 +341,24 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, lnet_net_lock(LNET_LOCK_EX); rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX); - if (rc != 0) { + if (rc) { lnet_net_unlock(LNET_LOCK_EX); LIBCFS_FREE(route, sizeof(*route)); LIBCFS_FREE(rnet, sizeof(*rnet)); if (rc == -EHOSTUNREACH) /* gateway is not on a local net */ - return 0; /* ignore the route entry */ + return rc; /* ignore the route entry */ CERROR("Error %d creating route %s %d %s\n", rc, libcfs_net2str(net), hops, libcfs_nid2str(gateway)); - return rc; } LASSERT(!the_lnet.ln_shutdown); rnet2 = lnet_find_net_locked(net); - if (rnet2 == NULL) { + if (!rnet2) { /* new network */ list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net)); rnet2 = rnet; @@ -382,8 +386,8 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, lnet_net_unlock(LNET_LOCK_EX); /* XXX Assume alive */ - if (ni->ni_lnd->lnd_notify != NULL) - (ni->ni_lnd->lnd_notify)(ni, gateway, 1); + if (ni->ni_lnd->lnd_notify) + ni->ni_lnd->lnd_notify(ni, gateway, 1); lnet_net_lock(LNET_LOCK_EX); } @@ -391,14 +395,20 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, /* -1 for notify or !add_route */ lnet_peer_decref_locked(route->lr_gateway); lnet_net_unlock(LNET_LOCK_EX); + rc = 0; - if (!add_route) + if (!add_route) { + rc = -EEXIST; LIBCFS_FREE(route, sizeof(*route)); + } if (rnet != rnet2) LIBCFS_FREE(rnet, sizeof(*rnet)); - return 0; + /* indicate to startup the router checker if configured */ + wake_up(&the_lnet.ln_rc_waitq); + + return rc; } int @@ -426,10 +436,9 @@ lnet_check_routes(void) lnet_nid_t nid2; int net; - route = list_entry(e2, lnet_route_t, - lr_list); + route = list_entry(e2, lnet_route_t, lr_list); - if (route2 == NULL) { + if (!route2) { route2 = route; continue; } @@ -472,9 +481,10 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid) CDEBUG(D_NET, "Del route: net %s : gw %s\n", libcfs_net2str(net), libcfs_nid2str(gw_nid)); - /* NB Caller may specify either all routes via the given gateway - * or a specific route entry actual NIDs) */ - + /* + * NB Caller may specify either all routes via the given gateway + * or a specific route entry actual NIDs) + */ lnet_net_lock(LNET_LOCK_EX); if (net == LNET_NIDNET(LNET_NID_ANY)) rn_list = &the_lnet.ln_remote_nets_hash[0]; @@ -486,7 +496,7 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid) rnet = list_entry(e1, lnet_remotenet_t, lrn_list); if (!(net == LNET_NIDNET(LNET_NID_ANY) || - net == rnet->lrn_net)) + net == rnet->lrn_net)) continue; list_for_each(e2, &rnet->lrn_routes) { @@ -513,7 +523,7 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid) LIBCFS_FREE(route, sizeof(*route)); - if (rnet != NULL) + if (rnet) LIBCFS_FREE(rnet, sizeof(*rnet)); rc = 0; @@ -538,6 +548,38 @@ lnet_destroy_routes(void) lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY); } +int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg) +{ + int i, rc = -ENOENT, j; + + if (!the_lnet.ln_rtrpools) + return rc; + + for (i = 0; i < LNET_NRBPOOLS; i++) { + lnet_rtrbufpool_t *rbp; + + lnet_net_lock(LNET_LOCK_EX); + cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) { + if (i++ != idx) + continue; + + pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages; + pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers; + pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits; + pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits; + rc = 0; + break; + } + lnet_net_unlock(LNET_LOCK_EX); + } + + lnet_net_lock(LNET_LOCK_EX); + pool_cfg->pl_routing = the_lnet.ln_routing; + lnet_net_unlock(LNET_LOCK_EX); + + return rc; +} + int lnet_get_route(int idx, __u32 *net, __u32 *hops, lnet_nid_t *gateway, __u32 *alive, __u32 *priority) @@ -558,15 +600,14 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops, rnet = list_entry(e1, lnet_remotenet_t, lrn_list); list_for_each(e2, &rnet->lrn_routes) { - route = list_entry(e2, lnet_route_t, - lr_list); + route = list_entry(e2, lnet_route_t, lr_list); - if (idx-- == 0) { + if (!idx--) { *net = rnet->lrn_net; *hops = route->lr_hops; *priority = route->lr_priority; *gateway = route->lr_gateway->lp_nid; - *alive = route->lr_gateway->lp_alive; + *alive = lnet_is_route_alive(route); lnet_net_unlock(cpt); return 0; } @@ -604,7 +645,7 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) { lnet_ping_info_t *info = rcd->rcd_pinginfo; struct lnet_peer *gw = rcd->rcd_gateway; - lnet_route_t *rtr; + lnet_route_t *rte; if (!gw->lp_alive) return; @@ -621,21 +662,25 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) } gw->lp_ping_feats = info->pi_features; - if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) { + if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) { CDEBUG(D_NET, "%s: Unexpected features 0x%x\n", libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats); return; /* nothing I can understand */ } - if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0) + if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) return; /* can't carry NI status info */ - list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) { - int ptl_status = LNET_NI_STATUS_INVALID; + list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) { int down = 0; int up = 0; int i; + if (gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) { + rte->lr_downis = 1; + continue; + } + for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) { lnet_ni_status_t *stat = &info->pi_ni[i]; lnet_nid_t nid = stat->ns_nid; @@ -651,22 +696,15 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) continue; if (stat->ns_status == LNET_NI_STATUS_DOWN) { - if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND) - down++; - else if (ptl_status != LNET_NI_STATUS_UP) - ptl_status = LNET_NI_STATUS_DOWN; + down++; continue; } if (stat->ns_status == LNET_NI_STATUS_UP) { - if (LNET_NIDNET(nid) == rtr->lr_net) { + if (LNET_NIDNET(nid) == rte->lr_net) { up = 1; break; } - /* ptl NIs are considered down only when - * they're all down */ - if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND) - ptl_status = LNET_NI_STATUS_UP; continue; } @@ -677,10 +715,17 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) } if (up) { /* ignore downed NIs if NI for dest network is up */ - rtr->lr_downis = 0; + rte->lr_downis = 0; continue; } - rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN); + /** + * if @down is zero and this route is single-hop, it means + * we can't find NI for target network + */ + if (!down && rte->lr_hops == 1) + down = 1; + + rte->lr_downis = down; } } @@ -690,7 +735,7 @@ lnet_router_checker_event(lnet_event_t *event) lnet_rc_data_t *rcd = event->md.user_ptr; struct lnet_peer *lp; - LASSERT(rcd != NULL); + LASSERT(rcd); if (event->unlinked) { LNetInvalidateHandle(&rcd->rcd_mdh); @@ -701,11 +746,13 @@ lnet_router_checker_event(lnet_event_t *event) event->type == LNET_EVENT_REPLY); lp = rcd->rcd_gateway; - LASSERT(lp != NULL); + LASSERT(lp); - /* NB: it's called with holding lnet_res_lock, we have a few - * places need to hold both locks at the same time, please take - * care of lock ordering */ + /* + * NB: it's called with holding lnet_res_lock, we have a few + * places need to hold both locks at the same time, please take + * care of lock ordering + */ lnet_net_lock(lp->lp_cpt); if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) { /* ignore if no longer a router or rcd is replaced */ @@ -714,23 +761,26 @@ lnet_router_checker_event(lnet_event_t *event) if (event->type == LNET_EVENT_SEND) { lp->lp_ping_notsent = 0; - if (event->status == 0) + if (!event->status) goto out; } /* LNET_EVENT_REPLY */ - /* A successful REPLY means the router is up. If _any_ comms + /* + * A successful REPLY means the router is up. If _any_ comms * to the router fail I assume it's down (this will happen if * we ping alive routers to try to detect router death before - * apps get burned). */ + * apps get burned). + */ + lnet_notify_locked(lp, 1, !event->status, cfs_time_current()); - lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current()); - /* The router checker will wake up very shortly and do the + /* + * The router checker will wake up very shortly and do the * actual notification. * XXX If 'lp' stops being a router before then, it will still - * have the notification pending!!! */ - - if (avoid_asym_router_failure && event->status == 0) + * have the notification pending!!! + */ + if (avoid_asym_router_failure && !event->status) lnet_parse_rc_info(rcd); out: @@ -753,7 +803,7 @@ lnet_wait_known_routerstate(void) list_for_each(entry, &the_lnet.ln_routers) { rtr = list_entry(entry, lnet_peer_t, lp_rtr_list); - if (rtr->lp_alive_count == 0) { + if (!rtr->lp_alive_count) { all_known = 0; break; } @@ -774,7 +824,7 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net) { lnet_route_t *rte; - if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) { + if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) { list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) { if (rte->lr_net == net) { rte->lr_downis = 0; @@ -811,13 +861,15 @@ lnet_update_ni_status_locked(void) continue; } - LASSERT(ni->ni_status != NULL); + LASSERT(ni->ni_status); if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) { CDEBUG(D_NET, "NI(%s:%d) status changed to down\n", libcfs_nid2str(ni->ni_nid), timeout); - /* NB: so far, this is the only place to set - * NI status to "down" */ + /* + * NB: so far, this is the only place to set + * NI status to "down" + */ ni->ni_status->ns_status = LNET_NI_STATUS_DOWN; } lnet_ni_unlock(ni); @@ -831,7 +883,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd) /* detached from network */ LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh)); - if (rcd->rcd_gateway != NULL) { + if (rcd->rcd_gateway) { int cpt = rcd->rcd_gateway->lp_cpt; lnet_net_lock(cpt); @@ -839,7 +891,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd) lnet_net_unlock(cpt); } - if (rcd->rcd_pinginfo != NULL) + if (rcd->rcd_pinginfo) LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE); LIBCFS_FREE(rcd, sizeof(*rcd)); @@ -856,14 +908,14 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) lnet_net_unlock(gateway->lp_cpt); LIBCFS_ALLOC(rcd, sizeof(*rcd)); - if (rcd == NULL) + if (!rcd) goto out; LNetInvalidateHandle(&rcd->rcd_mdh); INIT_LIST_HEAD(&rcd->rcd_list); LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE); - if (pi == NULL) + if (!pi) goto out; for (i = 0; i < LNET_MAX_RTR_NIS; i++) { @@ -885,11 +937,11 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) CERROR("Can't bind MD: %d\n", rc); goto out; } - LASSERT(rc == 0); + LASSERT(!rc); lnet_net_lock(gateway->lp_cpt); /* router table changed or someone has created rcd for this gateway */ - if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) { + if (!lnet_isrouter(gateway) || gateway->lp_rcd) { lnet_net_unlock(gateway->lp_cpt); goto out; } @@ -902,10 +954,10 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway) return rcd; out: - if (rcd != NULL) { + if (rcd) { if (!LNetHandleIsInvalid(rcd->rcd_mdh)) { rc = LNetMDUnlink(rcd->rcd_mdh); - LASSERT(rc == 0); + LASSERT(!rc); } lnet_destroy_rc_data(rcd); } @@ -936,7 +988,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) lnet_peer_addref_locked(rtr); - if (rtr->lp_ping_deadline != 0 && /* ping timed out? */ + if (rtr->lp_ping_deadline && /* ping timed out? */ cfs_time_after(now, rtr->lp_ping_deadline)) lnet_notify_locked(rtr, 1, 0, now); @@ -950,10 +1002,10 @@ lnet_ping_router_locked(lnet_peer_t *rtr) return; } - rcd = rtr->lp_rcd != NULL ? + rcd = rtr->lp_rcd ? rtr->lp_rcd : lnet_create_rc_data_locked(rtr); - if (rcd == NULL) + if (!rcd) return; secs = lnet_router_check_interval(rtr); @@ -964,7 +1016,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) rtr->lp_ping_deadline, rtr->lp_ping_notsent, rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp); - if (secs != 0 && !rtr->lp_ping_notsent && + if (secs && !rtr->lp_ping_notsent && cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp, cfs_time_seconds(secs)))) { int rc; @@ -972,7 +1024,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) lnet_handle_md_t mdh; id.nid = rtr->lp_nid; - id.pid = LUSTRE_SRV_LNET_PID; + id.pid = LNET_PID_LUSTRE; CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id)); rtr->lp_ping_notsent = 1; @@ -980,7 +1032,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) mdh = rcd->rcd_mdh; - if (rtr->lp_ping_deadline == 0) { + if (!rtr->lp_ping_deadline) { rtr->lp_ping_deadline = cfs_time_shift(router_ping_timeout); } @@ -991,7 +1043,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr) LNET_PROTO_PING_MATCHBITS, 0); lnet_net_lock(rtr->lp_cpt); - if (rc != 0) + if (rc) rtr->lp_ping_notsent = 0; /* no event pending */ } @@ -1001,8 +1053,9 @@ lnet_ping_router_locked(lnet_peer_t *rtr) int lnet_router_checker_start(void) { + struct task_struct *task; int rc; - int eqsz; + int eqsz = 0; LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); @@ -1012,39 +1065,33 @@ lnet_router_checker_start(void) return -EINVAL; } - if (!the_lnet.ln_routing && - live_router_check_interval <= 0 && - dead_router_check_interval <= 0) - return 0; - sema_init(&the_lnet.ln_rc_signal, 0); - /* EQ size doesn't matter; the callback is guaranteed to get every - * event */ - eqsz = 0; - rc = LNetEQAlloc(eqsz, lnet_router_checker_event, - &the_lnet.ln_rc_eqh); - if (rc != 0) { + + rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh); + if (rc) { CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc); return -ENOMEM; } the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING; - rc = PTR_ERR(kthread_run(lnet_router_checker, - NULL, "router_checker")); - if (IS_ERR_VALUE(rc)) { + task = kthread_run(lnet_router_checker, NULL, "router_checker"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("Can't start router checker thread: %d\n", rc); /* block until event callback signals exit */ down(&the_lnet.ln_rc_signal); rc = LNetEQFree(the_lnet.ln_rc_eqh); - LASSERT(rc == 0); + LASSERT(!rc); the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; return -ENOMEM; } if (check_routers_before_use) { - /* Note that a helpful side-effect of pinging all known routers + /* + * Note that a helpful side-effect of pinging all known routers * at startup is that it makes them drop stale connections they - * may have to a previous instance of me. */ + * may have to a previous instance of me. + */ lnet_wait_known_routerstate(); } @@ -1061,13 +1108,15 @@ lnet_router_checker_stop(void) LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING; + /* wakeup the RC thread if it's sleeping */ + wake_up(&the_lnet.ln_rc_waitq); /* block until event callback signals exit */ down(&the_lnet.ln_rc_signal); LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); rc = LNetEQFree(the_lnet.ln_rc_eqh); - LASSERT(rc == 0); + LASSERT(!rc); } static void @@ -1091,13 +1140,13 @@ lnet_prune_rc_data(int wait_unlink) if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) { /* router checker is stopping, prune all */ list_for_each_entry(lp, &the_lnet.ln_routers, - lp_rtr_list) { - if (lp->lp_rcd == NULL) + lp_rtr_list) { + if (!lp->lp_rcd) continue; LASSERT(list_empty(&lp->lp_rcd->rcd_list)); list_add(&lp->lp_rcd->rcd_list, - &the_lnet.ln_rcd_deathrow); + &the_lnet.ln_rcd_deathrow); lp->lp_rcd = NULL; } } @@ -1119,7 +1168,7 @@ lnet_prune_rc_data(int wait_unlink) /* release all zombie RCDs */ while (!list_empty(&the_lnet.ln_rcd_zombie)) { list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie, - rcd_list) { + rcd_list) { if (LNetHandleIsInvalid(rcd->rcd_mdh)) list_move(&rcd->rcd_list, &head); } @@ -1131,7 +1180,7 @@ lnet_prune_rc_data(int wait_unlink) while (!list_empty(&head)) { rcd = list_entry(head.next, - lnet_rc_data_t, rcd_list); + lnet_rc_data_t, rcd_list); list_del_init(&rcd->rcd_list); lnet_destroy_rc_data(rcd); } @@ -1151,6 +1200,33 @@ lnet_prune_rc_data(int wait_unlink) lnet_net_unlock(LNET_LOCK_EX); } +/* + * This function is called to check if the RC should block indefinitely. + * It's called from lnet_router_checker() as well as being passed to + * wait_event_interruptible() to avoid the lost wake_up problem. + * + * When it's called from wait_event_interruptible() it is necessary to + * also not sleep if the rc state is not running to avoid a deadlock + * when the system is shutting down + */ +static inline bool +lnet_router_checker_active(void) +{ + if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) + return true; + + /* + * Router Checker thread needs to run when routing is enabled in + * order to call lnet_update_ni_status_locked() + */ + if (the_lnet.ln_routing) + return true; + + return !list_empty(&the_lnet.ln_routers) && + (live_router_check_interval > 0 || + dead_router_check_interval > 0); +} + static int lnet_router_checker(void *arg) { @@ -1159,8 +1235,6 @@ lnet_router_checker(void *arg) cfs_block_allsigs(); - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); - while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) { __u64 version; int cpt; @@ -1199,15 +1273,25 @@ rescan: lnet_prune_rc_data(0); /* don't wait for UNLINK */ - /* Call schedule_timeout() here always adds 1 to load average + /* + * Call schedule_timeout() here always adds 1 to load average * because kernel counts # active tasks as nr_running - * + nr_uninterruptible. */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + * + nr_uninterruptible. + */ + /* + * if there are any routes then wakeup every second. If + * there are no routes then sleep indefinitely until woken + * up by a user adding a route + */ + if (!lnet_router_checker_active()) + wait_event_interruptible(the_lnet.ln_rc_waitq, + lnet_router_checker_active()); + else + wait_event_interruptible_timeout(the_lnet.ln_rc_waitq, + false, + cfs_time_seconds(1)); } - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING); - lnet_prune_rc_data(1); /* wait for UNLINK */ the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; @@ -1216,7 +1300,7 @@ rescan: return 0; } -static void +void lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) { int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); @@ -1237,7 +1321,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) int i; LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz); - if (rb == NULL) + if (!rb) return NULL; rb->rb_pool = rbp; @@ -1246,7 +1330,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) page = alloc_pages_node( cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_KERNEL | __GFP_ZERO, 0); - if (page == NULL) { + if (!page) { while (--i >= 0) __free_page(rb->rb_kiov[i].kiov_page); @@ -1254,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) return NULL; } - rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; + rb->rb_kiov[i].kiov_len = PAGE_SIZE; rb->rb_kiov[i].kiov_offset = 0; rb->rb_kiov[i].kiov_page = page; } @@ -1263,66 +1347,119 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) } static void -lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp) +lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt) { int npages = rbp->rbp_npages; - int nbuffers = 0; + struct list_head tmp; lnet_rtrbuf_t *rb; + lnet_rtrbuf_t *temp; - if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */ + if (!rbp->rbp_nbuffers) /* not initialized or already freed */ return; - LASSERT(list_empty(&rbp->rbp_msgs)); - LASSERT(rbp->rbp_credits == rbp->rbp_nbuffers); + INIT_LIST_HEAD(&tmp); - while (!list_empty(&rbp->rbp_bufs)) { - LASSERT(rbp->rbp_credits > 0); + lnet_net_lock(cpt); + lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt); + list_splice_init(&rbp->rbp_bufs, &tmp); + rbp->rbp_req_nbuffers = 0; + rbp->rbp_nbuffers = 0; + rbp->rbp_credits = 0; + rbp->rbp_mincredits = 0; + lnet_net_unlock(cpt); - rb = list_entry(rbp->rbp_bufs.next, - lnet_rtrbuf_t, rb_list); + /* Free buffers on the free list. */ + list_for_each_entry_safe(rb, temp, &tmp, rb_list) { list_del(&rb->rb_list); lnet_destroy_rtrbuf(rb, npages); - nbuffers++; } - - LASSERT(rbp->rbp_nbuffers == nbuffers); - LASSERT(rbp->rbp_credits == nbuffers); - - rbp->rbp_nbuffers = rbp->rbp_credits = 0; } static int -lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt) +lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt) { + struct list_head rb_list; lnet_rtrbuf_t *rb; - int i; + int num_rb; + int num_buffers = 0; + int old_req_nbufs; + int npages = rbp->rbp_npages; - if (rbp->rbp_nbuffers != 0) { - LASSERT(rbp->rbp_nbuffers == nbufs); + lnet_net_lock(cpt); + /* + * If we are called for less buffers than already in the pool, we + * just lower the req_nbuffers number and excess buffers will be + * thrown away as they are returned to the free list. Credits + * then get adjusted as well. + * If we already have enough buffers allocated to serve the + * increase requested, then we can treat that the same way as we + * do the decrease. + */ + num_rb = nbufs - rbp->rbp_nbuffers; + if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) { + rbp->rbp_req_nbuffers = nbufs; + lnet_net_unlock(cpt); return 0; } + /* + * store the older value of rbp_req_nbuffers and then set it to + * the new request to prevent lnet_return_rx_credits_locked() from + * freeing buffers that we need to keep around + */ + old_req_nbufs = rbp->rbp_req_nbuffers; + rbp->rbp_req_nbuffers = nbufs; + lnet_net_unlock(cpt); - for (i = 0; i < nbufs; i++) { + INIT_LIST_HEAD(&rb_list); + + /* + * allocate the buffers on a local list first. If all buffers are + * allocated successfully then join this list to the rbp buffer + * list. If not then free all allocated buffers. + */ + while (num_rb-- > 0) { rb = lnet_new_rtrbuf(rbp, cpt); + if (!rb) { + CERROR("Failed to allocate %d route bufs of %d pages\n", + nbufs, npages); - if (rb == NULL) { - CERROR("Failed to allocate %d router bufs of %d pages\n", - nbufs, rbp->rbp_npages); - return -ENOMEM; - } + lnet_net_lock(cpt); + rbp->rbp_req_nbuffers = old_req_nbufs; + lnet_net_unlock(cpt); - rbp->rbp_nbuffers++; - rbp->rbp_credits++; - rbp->rbp_mincredits++; - list_add(&rb->rb_list, &rbp->rbp_bufs); + goto failed; + } - /* No allocation "under fire" */ - /* Otherwise we'd need code to schedule blocked msgs etc */ - LASSERT(!the_lnet.ln_routing); + list_add(&rb->rb_list, &rb_list); + num_buffers++; } - LASSERT(rbp->rbp_credits == nbufs); + lnet_net_lock(cpt); + + list_splice_tail(&rb_list, &rbp->rbp_bufs); + rbp->rbp_nbuffers += num_buffers; + rbp->rbp_credits += num_buffers; + rbp->rbp_mincredits = rbp->rbp_credits; + /* + * We need to schedule blocked msg using the newly + * added buffers. + */ + while (!list_empty(&rbp->rbp_bufs) && + !list_empty(&rbp->rbp_msgs)) + lnet_schedule_blocked_locked(rbp); + + lnet_net_unlock(cpt); + return 0; + +failed: + while (!list_empty(&rb_list)) { + rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list); + list_del(&rb->rb_list); + lnet_destroy_rtrbuf(rb, npages); + } + + return -ENOMEM; } static void @@ -1337,26 +1474,28 @@ lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages) } void -lnet_rtrpools_free(void) +lnet_rtrpools_free(int keep_pools) { lnet_rtrbufpool_t *rtrp; int i; - if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */ + if (!the_lnet.ln_rtrpools) /* uninitialized or freed */ return; cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - lnet_rtrpool_free_bufs(&rtrp[0]); - lnet_rtrpool_free_bufs(&rtrp[1]); - lnet_rtrpool_free_bufs(&rtrp[2]); + lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i); + lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i); + lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i); } - cfs_percpt_free(the_lnet.ln_rtrpools); - the_lnet.ln_rtrpools = NULL; + if (!keep_pools) { + cfs_percpt_free(the_lnet.ln_rtrpools); + the_lnet.ln_rtrpools = NULL; + } } static int -lnet_nrb_tiny_calculate(int npages) +lnet_nrb_tiny_calculate(void) { int nrbs = LNET_NRB_TINY; @@ -1364,7 +1503,7 @@ lnet_nrb_tiny_calculate(int npages) LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when routing enabled\n", tiny_router_buffers); - return -1; + return -EINVAL; } if (tiny_router_buffers > 0) @@ -1375,7 +1514,7 @@ lnet_nrb_tiny_calculate(int npages) } static int -lnet_nrb_small_calculate(int npages) +lnet_nrb_small_calculate(void) { int nrbs = LNET_NRB_SMALL; @@ -1383,7 +1522,7 @@ lnet_nrb_small_calculate(int npages) LCONSOLE_ERROR_MSG(0x10c, "small_router_buffers=%d invalid when routing enabled\n", small_router_buffers); - return -1; + return -EINVAL; } if (small_router_buffers > 0) @@ -1394,7 +1533,7 @@ lnet_nrb_small_calculate(int npages) } static int -lnet_nrb_large_calculate(int npages) +lnet_nrb_large_calculate(void) { int nrbs = LNET_NRB_LARGE; @@ -1402,7 +1541,7 @@ lnet_nrb_large_calculate(int npages) LCONSOLE_ERROR_MSG(0x10c, "large_router_buffers=%d invalid when routing enabled\n", large_router_buffers); - return -1; + return -EINVAL; } if (large_router_buffers > 0) @@ -1416,16 +1555,12 @@ int lnet_rtrpools_alloc(int im_a_router) { lnet_rtrbufpool_t *rtrp; - int large_pages; - int small_pages = 1; int nrb_tiny; int nrb_small; int nrb_large; int rc; int i; - large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (!strcmp(forwarding, "")) { /* not set either way */ if (!im_a_router) @@ -1440,41 +1575,46 @@ lnet_rtrpools_alloc(int im_a_router) return -EINVAL; } - nrb_tiny = lnet_nrb_tiny_calculate(0); + nrb_tiny = lnet_nrb_tiny_calculate(); if (nrb_tiny < 0) return -EINVAL; - nrb_small = lnet_nrb_small_calculate(small_pages); + nrb_small = lnet_nrb_small_calculate(); if (nrb_small < 0) return -EINVAL; - nrb_large = lnet_nrb_large_calculate(large_pages); + nrb_large = lnet_nrb_large_calculate(); if (nrb_large < 0) return -EINVAL; the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(), LNET_NRBPOOLS * sizeof(lnet_rtrbufpool_t)); - if (the_lnet.ln_rtrpools == NULL) { + if (!the_lnet.ln_rtrpools) { LCONSOLE_ERROR_MSG(0x10c, "Failed to initialize router buffe pool\n"); return -ENOMEM; } cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { - lnet_rtrpool_init(&rtrp[0], 0); - rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i); - if (rc != 0) + lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0); + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX], + nrb_tiny, i); + if (rc) goto failed; - lnet_rtrpool_init(&rtrp[1], small_pages); - rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i); - if (rc != 0) + lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX], + LNET_NRB_SMALL_PAGES); + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX], + nrb_small, i); + if (rc) goto failed; - lnet_rtrpool_init(&rtrp[2], large_pages); - rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i); - if (rc != 0) + lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX], + LNET_NRB_LARGE_PAGES); + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX], + nrb_large, i); + if (rc) goto failed; } @@ -1485,10 +1625,118 @@ lnet_rtrpools_alloc(int im_a_router) return 0; failed: - lnet_rtrpools_free(); + lnet_rtrpools_free(0); return rc; } +static int +lnet_rtrpools_adjust_helper(int tiny, int small, int large) +{ + int nrb = 0; + int rc = 0; + int i; + lnet_rtrbufpool_t *rtrp; + + /* + * If the provided values for each buffer pool are different than the + * configured values, we need to take action. + */ + if (tiny >= 0) { + tiny_router_buffers = tiny; + nrb = lnet_nrb_tiny_calculate(); + cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX], + nrb, i); + if (rc) + return rc; + } + } + if (small >= 0) { + small_router_buffers = small; + nrb = lnet_nrb_small_calculate(); + cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX], + nrb, i); + if (rc) + return rc; + } + } + if (large >= 0) { + large_router_buffers = large; + nrb = lnet_nrb_large_calculate(); + cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) { + rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX], + nrb, i); + if (rc) + return rc; + } + } + + return 0; +} + +int +lnet_rtrpools_adjust(int tiny, int small, int large) +{ + /* + * this function doesn't revert the changes if adding new buffers + * failed. It's up to the user space caller to revert the + * changes. + */ + if (!the_lnet.ln_routing) + return 0; + + return lnet_rtrpools_adjust_helper(tiny, small, large); +} + +int +lnet_rtrpools_enable(void) +{ + int rc; + + if (the_lnet.ln_routing) + return 0; + + if (!the_lnet.ln_rtrpools) + /* + * If routing is turned off, and we have never + * initialized the pools before, just call the + * standard buffer pool allocation routine as + * if we are just configuring this for the first + * time. + */ + return lnet_rtrpools_alloc(1); + + rc = lnet_rtrpools_adjust_helper(0, 0, 0); + if (rc) + return rc; + + lnet_net_lock(LNET_LOCK_EX); + the_lnet.ln_routing = 1; + + the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED; + lnet_net_unlock(LNET_LOCK_EX); + + return 0; +} + +void +lnet_rtrpools_disable(void) +{ + if (!the_lnet.ln_routing) + return; + + lnet_net_lock(LNET_LOCK_EX); + the_lnet.ln_routing = 0; + the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED; + + tiny_router_buffers = 0; + small_router_buffers = 0; + large_router_buffers = 0; + lnet_net_unlock(LNET_LOCK_EX); + lnet_rtrpools_free(1); +} + int lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) { @@ -1499,28 +1747,28 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) LASSERT(!in_interrupt()); CDEBUG(D_NET, "%s notifying %s: %s\n", - (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid), - libcfs_nid2str(nid), - alive ? "up" : "down"); + !ni ? "userspace" : libcfs_nid2str(ni->ni_nid), + libcfs_nid2str(nid), + alive ? "up" : "down"); - if (ni != NULL && + if (ni && LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) { CWARN("Ignoring notification of %s %s by %s (different net)\n", - libcfs_nid2str(nid), alive ? "birth" : "death", - libcfs_nid2str(ni->ni_nid)); + libcfs_nid2str(nid), alive ? "birth" : "death", + libcfs_nid2str(ni->ni_nid)); return -EINVAL; } /* can't do predictions... */ if (cfs_time_after(when, now)) { CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n", - (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid), + !ni ? "userspace" : libcfs_nid2str(ni->ni_nid), libcfs_nid2str(nid), alive ? "up" : "down", cfs_duration_sec(cfs_time_sub(when, now))); return -EINVAL; } - if (ni != NULL && !alive && /* LND telling me she's down */ + if (ni && !alive && /* LND telling me she's down */ !auto_down) { /* auto-down disabled */ CDEBUG(D_NET, "Auto-down disabled\n"); return 0; @@ -1534,23 +1782,26 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) } lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid); - if (lp == NULL) { + if (!lp) { /* nid not found */ lnet_net_unlock(cpt); CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid)); return 0; } - /* We can't fully trust LND on reporting exact peer last_alive + /* + * We can't fully trust LND on reporting exact peer last_alive * if he notifies us about dead peer. For example ksocklnd can * call us with when == _time_when_the_node_was_booted_ if - * no connections were successfully established */ - if (ni != NULL && !alive && when < lp->lp_last_alive) + * no connections were successfully established + */ + if (ni && !alive && when < lp->lp_last_alive) when = lp->lp_last_alive; - lnet_notify_locked(lp, ni == NULL, alive, when); + lnet_notify_locked(lp, !ni, alive, when); - lnet_ni_notify_locked(ni, lp); + if (ni) + lnet_ni_notify_locked(ni, lp); lnet_peer_decref_locked(lp); diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c index 396c7c4e5..65f65a3fc 100644 --- a/drivers/staging/lustre/lnet/lnet/router_proc.c +++ b/drivers/staging/lustre/lnet/lnet/router_proc.c @@ -15,18 +15,16 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Portals; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/libcfs/libcfs.h" #include "../../include/linux/lnet/lib-lnet.h" -/* This is really lnet_proc.c. You might need to update sanity test 215 - * if any file format is changed. */ +/* + * This is really lnet_proc.c. You might need to update sanity test 215 + * if any file format is changed. + */ #define LNET_LOFFT_BITS (sizeof(loff_t) * 8) /* @@ -75,25 +73,6 @@ #define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK)) -static int proc_call_handler(void *data, int write, loff_t *ppos, - void __user *buffer, size_t *lenp, - int (*handler)(void *data, int write, - loff_t pos, void __user *buffer, int len)) -{ - int rc = handler(data, write, *ppos, buffer, *lenp); - - if (rc < 0) - return rc; - - if (write) { - *ppos += *lenp; - } else { - *lenp = rc; - *ppos += rc; - } - return 0; -} - static int __proc_lnet_stats(void *data, int write, loff_t pos, void __user *buffer, int nob) { @@ -111,11 +90,11 @@ static int __proc_lnet_stats(void *data, int write, /* read */ LIBCFS_ALLOC(ctrs, sizeof(*ctrs)); - if (ctrs == NULL) + if (!ctrs) return -ENOMEM; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) { + if (!tmpstr) { LIBCFS_FREE(ctrs, sizeof(*ctrs)); return -ENOMEM; } @@ -145,8 +124,8 @@ static int __proc_lnet_stats(void *data, int write, static int proc_lnet_stats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_stats); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_lnet_stats); } static int proc_lnet_routes(struct ctl_table *table, int write, @@ -167,16 +146,16 @@ static int proc_lnet_routes(struct ctl_table *table, int write, LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n", the_lnet.ln_routing ? "enabled" : "disabled"); LASSERT(tmpstr + tmpsiz - s > 0); @@ -206,23 +185,22 @@ static int proc_lnet_routes(struct ctl_table *table, int write, return -ESTALE; } - for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL; - i++) { + for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) { rn_list = &the_lnet.ln_remote_nets_hash[i]; n = rn_list->next; - while (n != rn_list && route == NULL) { + while (n != rn_list && !route) { rnet = list_entry(n, lnet_remotenet_t, - lrn_list); + lrn_list); r = rnet->lrn_routes.next; while (r != &rnet->lrn_routes) { lnet_route_t *re = list_entry(r, lnet_route_t, - lr_list); - if (skip == 0) { + lr_list); + if (!skip) { route = re; break; } @@ -235,12 +213,12 @@ static int proc_lnet_routes(struct ctl_table *table, int write, } } - if (route != NULL) { + if (route) { __u32 net = rnet->lrn_net; - unsigned int hops = route->lr_hops; + __u32 hops = route->lr_hops; unsigned int priority = route->lr_priority; lnet_nid_t nid = route->lr_gateway->lp_nid; - int alive = route->lr_gateway->lp_alive; + int alive = lnet_is_route_alive(route); s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4u %8u %7s %s\n", @@ -259,9 +237,9 @@ static int proc_lnet_routes(struct ctl_table *table, int write, if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) { rc = -EFAULT; - else { + } else { off += 1; *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); } @@ -269,7 +247,7 @@ static int proc_lnet_routes(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -291,16 +269,16 @@ static int proc_lnet_routers(struct ctl_table *table, int write, LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n", "ref", "rtr_ref", "alive_cnt", "state", @@ -330,9 +308,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write, while (r != &the_lnet.ln_routers) { lnet_peer_t *lp = list_entry(r, lnet_peer_t, - lp_rtr_list); + lp_rtr_list); - if (skip == 0) { + if (!skip) { peer = lp; break; } @@ -341,7 +319,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write, r = r->next; } - if (peer != NULL) { + if (peer) { lnet_nid_t nid = peer->lp_nid; unsigned long now = cfs_time_current(); unsigned long deadline = peer->lp_ping_deadline; @@ -356,19 +334,21 @@ static int proc_lnet_routers(struct ctl_table *table, int write, lnet_route_t *rtr; if ((peer->lp_ping_feats & - LNET_PING_FEAT_NI_STATUS) != 0) { + LNET_PING_FEAT_NI_STATUS)) { list_for_each_entry(rtr, &peer->lp_routes, - lr_gwlist) { - /* downis on any route should be the - * number of downis on the gateway */ - if (rtr->lr_downis != 0) { + lr_gwlist) { + /* + * downis on any route should be the + * number of downis on the gateway + */ + if (rtr->lr_downis) { down_ni = rtr->lr_downis; break; } } } - if (deadline == 0) + if (!deadline) s += snprintf(s, tmpstr + tmpsiz - s, "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n", nrefs, nrtrrefs, alive_cnt, @@ -394,9 +374,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write, if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) { rc = -EFAULT; - else { + } else { off += 1; *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); } @@ -404,7 +384,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -427,7 +407,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS); LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; if (cpt >= LNET_CPT_NUMBER) { @@ -436,12 +416,12 @@ static int proc_lnet_peers(struct ctl_table *table, int write, } LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n", "nid", "refs", "state", "last", "max", @@ -470,18 +450,20 @@ static int proc_lnet_peers(struct ctl_table *table, int write, } while (hash < LNET_PEER_HASH_SIZE) { - if (p == NULL) + if (!p) p = ptable->pt_hash[hash].next; while (p != &ptable->pt_hash[hash]) { lnet_peer_t *lp = list_entry(p, lnet_peer_t, - lp_hashlist); - if (skip == 0) { + lp_hashlist); + if (!skip) { peer = lp; - /* minor optimization: start from idx+1 + /* + * minor optimization: start from idx+1 * on next iteration if we've just - * drained lp_hashlist */ + * drained lp_hashlist + */ if (lp->lp_hashlist.next == &ptable->pt_hash[hash]) { hoff = 1; @@ -497,7 +479,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, p = lp->lp_hashlist.next; } - if (peer != NULL) + if (peer) break; p = NULL; @@ -505,7 +487,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, hash++; } - if (peer != NULL) { + if (peer) { lnet_nid_t nid = peer->lp_nid; int nrefs = peer->lp_refcount; int lastalive = -1; @@ -553,7 +535,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, cpt++; hash = 0; hoff = 1; - if (peer == NULL && cpt < LNET_CPT_NUMBER) + if (!peer && cpt < LNET_CPT_NUMBER) goto again; } } @@ -571,7 +553,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -593,7 +575,7 @@ static int __proc_lnet_buffers(void *data, int write, /* (4 %d) * 4 * LNET_CPT_NUMBER */ tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ @@ -603,7 +585,7 @@ static int __proc_lnet_buffers(void *data, int write, "pages", "count", "credits", "min"); LASSERT(tmpstr + tmpsiz - s > 0); - if (the_lnet.ln_rtrpools == NULL) + if (!the_lnet.ln_rtrpools) goto out; /* I'm not a router */ for (idx = 0; idx < LNET_NRBPOOLS; idx++) { @@ -638,8 +620,8 @@ static int __proc_lnet_buffers(void *data, int write, static int proc_lnet_buffers(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_buffers); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_lnet_buffers); } static int proc_lnet_nis(struct ctl_table *table, int write, @@ -653,16 +635,16 @@ static int proc_lnet_nis(struct ctl_table *table, int write, LASSERT(!write); - if (*lenp == 0) + if (!*lenp) return 0; LIBCFS_ALLOC(tmpstr, tmpsiz); - if (tmpstr == NULL) + if (!tmpstr) return -ENOMEM; s = tmpstr; /* points to current position in tmpstr[] */ - if (*ppos == 0) { + if (!*ppos) { s += snprintf(s, tmpstr + tmpsiz - s, "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n", "nid", "status", "alive", "refs", "peer", @@ -680,7 +662,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write, while (n != &the_lnet.ln_nis) { lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list); - if (skip == 0) { + if (!skip) { ni = a_ni; break; } @@ -689,7 +671,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write, n = n->next; } - if (ni != NULL) { + if (ni) { struct lnet_tx_queue *tq; char *stat; time64_t now = ktime_get_real_seconds(); @@ -705,15 +687,17 @@ static int proc_lnet_nis(struct ctl_table *table, int write, last_alive = 0; lnet_ni_lock(ni); - LASSERT(ni->ni_status != NULL); + LASSERT(ni->ni_status); stat = (ni->ni_status->ns_status == LNET_NI_STATUS_UP) ? "up" : "down"; lnet_ni_unlock(ni); - /* we actually output credits information for - * TX queue of each partition */ + /* + * we actually output credits information for + * TX queue of each partition + */ cfs_percpt_for_each(tq, i, ni->ni_tx_queues) { - for (j = 0; ni->ni_cpts != NULL && + for (j = 0; ni->ni_cpts && j < ni->ni_ncpts; j++) { if (i == ni->ni_cpts[j]) break; @@ -722,18 +706,19 @@ static int proc_lnet_nis(struct ctl_table *table, int write, if (j == ni->ni_ncpts) continue; - if (i != 0) + if (i) lnet_net_lock(i); s += snprintf(s, tmpstr + tmpsiz - s, - "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n", - libcfs_nid2str(ni->ni_nid), stat, - last_alive, *ni->ni_refs[i], - ni->ni_peertxcredits, - ni->ni_peerrtrcredits, - tq->tq_credits_max, - tq->tq_credits, tq->tq_credits_min); - if (i != 0) + "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n", + libcfs_nid2str(ni->ni_nid), stat, + last_alive, *ni->ni_refs[i], + ni->ni_peertxcredits, + ni->ni_peerrtrcredits, + tq->tq_credits_max, + tq->tq_credits, + tq->tq_credits_min); + if (i) lnet_net_unlock(i); } LASSERT(tmpstr + tmpsiz - s > 0); @@ -755,7 +740,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write, LIBCFS_FREE(tmpstr, tmpsiz); - if (rc == 0) + if (!rc) *lenp = len; return rc; @@ -795,8 +780,6 @@ static struct lnet_portal_rotors portal_rotors[] = { }, }; -extern int portal_rotor; - static int __proc_lnet_portal_rotor(void *data, int write, loff_t pos, void __user *buffer, int nob) { @@ -807,7 +790,7 @@ static int __proc_lnet_portal_rotor(void *data, int write, int i; LIBCFS_ALLOC(buf, buf_len); - if (buf == NULL) + if (!buf) return -ENOMEM; if (!write) { @@ -831,7 +814,7 @@ static int __proc_lnet_portal_rotor(void *data, int write, rc = 0; } else { rc = cfs_trace_copyout_string(buffer, nob, - buf + pos, "\n"); + buf + pos, "\n"); } goto out; } @@ -844,9 +827,9 @@ static int __proc_lnet_portal_rotor(void *data, int write, rc = -EINVAL; lnet_res_lock(0); - for (i = 0; portal_rotors[i].pr_name != NULL; i++) { - if (strncasecmp(portal_rotors[i].pr_name, tmp, - strlen(portal_rotors[i].pr_name)) == 0) { + for (i = 0; portal_rotors[i].pr_name; i++) { + if (!strncasecmp(portal_rotors[i].pr_name, tmp, + strlen(portal_rotors[i].pr_name))) { portal_rotor = portal_rotors[i].pr_value; rc = 0; break; @@ -862,8 +845,8 @@ static int proc_lnet_portal_rotor(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_lnet_portal_rotor); + return lprocfs_call_handler(table->data, write, ppos, buffer, lenp, + __proc_lnet_portal_rotor); } static struct ctl_table lnet_table[] = { diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c index 1f04cc1fc..dcb6e506f 100644 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c @@ -51,14 +51,14 @@ MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by d static void brw_client_fini(sfw_test_instance_t *tsi) { - srpc_bulk_t *bulk; - sfw_test_unit_t *tsu; + srpc_bulk_t *bulk; + sfw_test_unit_t *tsu; LASSERT(tsi->tsi_is_client); list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { bulk = tsu->tsu_private; - if (bulk == NULL) + if (!bulk) continue; srpc_free_bulk(bulk); @@ -69,38 +69,42 @@ brw_client_fini(sfw_test_instance_t *tsi) static int brw_client_init(sfw_test_instance_t *tsi) { - sfw_session_t *sn = tsi->tsi_batch->bat_session; - int flags; - int npg; - int len; - int opc; - srpc_bulk_t *bulk; - sfw_test_unit_t *tsu; - - LASSERT(sn != NULL); + sfw_session_t *sn = tsi->tsi_batch->bat_session; + int flags; + int npg; + int len; + int opc; + srpc_bulk_t *bulk; + sfw_test_unit_t *tsu; + + LASSERT(sn); LASSERT(tsi->tsi_is_client); - if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { - test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; + if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { + test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - npg = breq->blk_npg; - /* NB: this is not going to work for variable page size, - * but we have to keep it for compatibility */ - len = npg * PAGE_CACHE_SIZE; + npg = breq->blk_npg; + /* + * NB: this is not going to work for variable page size, + * but we have to keep it for compatibility + */ + len = npg * PAGE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; - /* I should never get this step if it's unknown feature - * because make_session will reject unknown feature */ - LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0); + /* + * I should never get this step if it's unknown feature + * because make_session will reject unknown feature + */ + LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + len = breq->blk_len; + npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; } if (npg > LNET_MAX_IOV || npg <= 0) @@ -116,7 +120,7 @@ brw_client_init(sfw_test_instance_t *tsi) list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) { bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid), npg, len, opc == LST_BRW_READ); - if (bulk == NULL) { + if (!bulk) { brw_client_fini(tsi); return -ENOMEM; } @@ -127,9 +131,9 @@ brw_client_init(sfw_test_instance_t *tsi) return 0; } -#define BRW_POISON 0xbeefbeefbeefbeefULL -#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL -#define BRW_MSIZE sizeof(__u64) +#define BRW_POISON 0xbeefbeefbeefbeefULL +#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL +#define BRW_MSIZE sizeof(__u64) static int brw_inject_one_error(void) @@ -141,7 +145,7 @@ brw_inject_one_error(void) ktime_get_ts64(&ts); - if (((ts.tv_nsec / NSEC_PER_USEC) & 1) == 0) + if (!((ts.tv_nsec / NSEC_PER_USEC) & 1)) return 0; return brw_inject_errors--; @@ -151,9 +155,9 @@ static void brw_fill_page(struct page *pg, int pattern, __u64 magic) { char *addr = page_address(pg); - int i; + int i; - LASSERT(addr != NULL); + LASSERT(addr); if (pattern == LST_BRW_CHECK_NONE) return; @@ -163,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic) if (pattern == LST_BRW_CHECK_SIMPLE) { memcpy(addr, &magic, BRW_MSIZE); - addr += PAGE_CACHE_SIZE - BRW_MSIZE; + addr += PAGE_SIZE - BRW_MSIZE; memcpy(addr, &magic, BRW_MSIZE); return; } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) + for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); return; } @@ -180,22 +184,22 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic) static int brw_check_page(struct page *pg, int pattern, __u64 magic) { - char *addr = page_address(pg); - __u64 data = 0; /* make compiler happy */ - int i; + char *addr = page_address(pg); + __u64 data = 0; /* make compiler happy */ + int i; - LASSERT(addr != NULL); + LASSERT(addr); if (pattern == LST_BRW_CHECK_NONE) return 0; if (pattern == LST_BRW_CHECK_SIMPLE) { - data = *((__u64 *) addr); + data = *((__u64 *)addr); if (data != magic) goto bad_data; - addr += PAGE_CACHE_SIZE - BRW_MSIZE; - data = *((__u64 *) addr); + addr += PAGE_SIZE - BRW_MSIZE; + data = *((__u64 *)addr); if (data != magic) goto bad_data; @@ -203,8 +207,8 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { - data = *(((__u64 *) addr) + i); + for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) { + data = *(((__u64 *)addr) + i); if (data != magic) goto bad_data; } @@ -216,7 +220,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) bad_data: CERROR("Bad data in page %p: %#llx, %#llx expected\n", - pg, data, magic); + pg, data, magic); return 1; } @@ -240,9 +244,9 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) for (i = 0; i < bk->bk_niov; i++) { pg = bk->bk_iovs[i].kiov_page; - if (brw_check_page(pg, pattern, magic) != 0) { + if (brw_check_page(pg, pattern, magic)) { CERROR("Bulk page %p (%d/%d) is corrupted!\n", - pg, i, bk->bk_niov); + pg, i, bk->bk_niov); return 1; } } @@ -252,7 +256,7 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) static int brw_client_prep_rpc(sfw_test_unit_t *tsu, - lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) + lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) { srpc_bulk_t *bulk = tsu->tsu_private; sfw_test_instance_t *tsi = tsu->tsu_instance; @@ -265,32 +269,34 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, int opc; int rc; - LASSERT(sn != NULL); - LASSERT(bulk != NULL); + LASSERT(sn); + LASSERT(bulk); - if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { + if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0; - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - npg = breq->blk_npg; - len = npg * PAGE_CACHE_SIZE; + npg = breq->blk_npg; + len = npg * PAGE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; - /* I should never get this step if it's unknown feature - * because make_session will reject unknown feature */ - LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0); + /* + * I should never get this step if it's unknown feature + * because make_session will reject unknown feature + */ + LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); - opc = breq->blk_opc; + opc = breq->blk_opc; flags = breq->blk_flags; - len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + len = breq->blk_len; + npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; } rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); - if (rc != 0) + if (rc) return rc; memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg])); @@ -301,8 +307,8 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, req = &rpc->crpc_reqstmsg.msg_body.brw_reqst; req->brw_flags = flags; - req->brw_rw = opc; - req->brw_len = len; + req->brw_rw = opc; + req->brw_len = len; *rpcpp = rpc; return 0; @@ -318,14 +324,14 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; - LASSERT(sn != NULL); + LASSERT(sn); - if (rpc->crpc_status != 0) { + if (rpc->crpc_status) { CERROR("BRW RPC to %s failed with %d\n", - libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); + libcfs_id2str(rpc->crpc_dest), rpc->crpc_status); if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_brw_errors); - goto out; + return; } if (msg->msg_magic != SRPC_MSG_MAGIC) { @@ -334,27 +340,24 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) } CDEBUG(reply->brw_status ? D_WARNING : D_NET, - "BRW RPC to %s finished with brw_status: %d\n", - libcfs_id2str(rpc->crpc_dest), reply->brw_status); + "BRW RPC to %s finished with brw_status: %d\n", + libcfs_id2str(rpc->crpc_dest), reply->brw_status); - if (reply->brw_status != 0) { + if (reply->brw_status) { atomic_inc(&sn->sn_brw_errors); rpc->crpc_status = -(int)reply->brw_status; - goto out; + return; } if (reqst->brw_rw == LST_BRW_WRITE) - goto out; + return; - if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) { + if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) { CERROR("Bulk data from %s is corrupted!\n", - libcfs_id2str(rpc->crpc_dest)); + libcfs_id2str(rpc->crpc_dest)); atomic_inc(&sn->sn_brw_errors); rpc->crpc_status = -EBADMSG; } - -out: - return; } static void @@ -362,17 +365,17 @@ brw_server_rpc_done(struct srpc_server_rpc *rpc) { srpc_bulk_t *blk = rpc->srpc_bulk; - if (blk == NULL) + if (!blk) return; - if (rpc->srpc_status != 0) + if (rpc->srpc_status) CERROR("Bulk transfer %s %s has failed: %d\n", - blk->bk_sink ? "from" : "to", - libcfs_id2str(rpc->srpc_peer), rpc->srpc_status); + blk->bk_sink ? "from" : "to", + libcfs_id2str(rpc->srpc_peer), rpc->srpc_status); else CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n", - blk->bk_niov, blk->bk_sink ? "from" : "to", - libcfs_id2str(rpc->srpc_peer)); + blk->bk_niov, blk->bk_sink ? "from" : "to", + libcfs_id2str(rpc->srpc_peer)); sfw_free_pages(rpc); } @@ -385,16 +388,16 @@ brw_bulk_ready(struct srpc_server_rpc *rpc, int status) srpc_brw_reqst_t *reqst; srpc_msg_t *reqstmsg; - LASSERT(rpc->srpc_bulk != NULL); - LASSERT(rpc->srpc_reqstbuf != NULL); + LASSERT(rpc->srpc_bulk); + LASSERT(rpc->srpc_reqstbuf); reqstmsg = &rpc->srpc_reqstbuf->buf_msg; reqst = &reqstmsg->msg_body.brw_reqst; - if (status != 0) { + if (status) { CERROR("BRW bulk %s failed for RPC from %s: %d\n", - reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE", - libcfs_id2str(rpc->srpc_peer), status); + reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE", + libcfs_id2str(rpc->srpc_peer), status); return -EIO; } @@ -404,9 +407,9 @@ brw_bulk_ready(struct srpc_server_rpc *rpc, int status) if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) __swab64s(&magic); - if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) { + if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic)) { CERROR("Bulk data from %s is corrupted!\n", - libcfs_id2str(rpc->srpc_peer)); + libcfs_id2str(rpc->srpc_peer)); reply->brw_status = EBADMSG; } @@ -448,27 +451,27 @@ brw_server_handle(struct srpc_server_rpc *rpc) return 0; } - if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) { replymsg->msg_ses_feats = LST_FEATS_MASK; reply->brw_status = EPROTO; return 0; } - if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) { + if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) { /* compat with old version */ - if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) { + if (reqst->brw_len & ~CFS_PAGE_MASK) { reply->brw_status = EINVAL; return 0; } - npg = reqst->brw_len >> PAGE_CACHE_SHIFT; + npg = reqst->brw_len >> PAGE_SHIFT; } else { - npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; - if (reqst->brw_len == 0 || npg > LNET_MAX_IOV) { + if (!reqst->brw_len || npg > LNET_MAX_IOV) { reply->brw_status = EINVAL; return 0; } @@ -476,7 +479,7 @@ brw_server_handle(struct srpc_server_rpc *rpc) rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg, reqst->brw_len, reqst->brw_rw == LST_BRW_WRITE); - if (rc != 0) + if (rc) return rc; if (reqst->brw_rw == LST_BRW_READ) @@ -490,8 +493,8 @@ brw_server_handle(struct srpc_server_rpc *rpc) sfw_test_client_ops_t brw_test_client; void brw_init_test_client(void) { - brw_test_client.tso_init = brw_client_init; - brw_test_client.tso_fini = brw_client_fini; + brw_test_client.tso_init = brw_client_init; + brw_test_client.tso_fini = brw_client_fini; brw_test_client.tso_prep_rpc = brw_client_prep_rpc; brw_test_client.tso_done_rpc = brw_client_done_rpc; }; @@ -499,10 +502,9 @@ void brw_init_test_client(void) srpc_service_t brw_test_service; void brw_init_test_service(void) { - - brw_test_service.sv_id = SRPC_SERVICE_BRW; - brw_test_service.sv_name = "brw_test"; - brw_test_service.sv_handler = brw_server_handle; + brw_test_service.sv_id = SRPC_SERVICE_BRW; + brw_test_service.sv_name = "brw_test"; + brw_test_service.sv_handler = brw_server_handle; brw_test_service.sv_bulk_ready = brw_bulk_ready; - brw_test_service.sv_wi_total = brw_srv_workitems; + brw_test_service.sv_wi_total = brw_srv_workitems; } diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c index a53466540..79ee6c0bf 100644 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ b/drivers/staging/lustre/lnet/selftest/conctl.c @@ -51,20 +51,19 @@ lst_session_new_ioctl(lstio_session_new_args_t *args) char *name; int rc; - if (args->lstio_ses_idp == NULL || /* address for output sid */ - args->lstio_ses_key == 0 || /* no key is specified */ - args->lstio_ses_namep == NULL || /* session name */ + if (!args->lstio_ses_idp || /* address for output sid */ + !args->lstio_ses_key || /* no key is specified */ + !args->lstio_ses_namep || /* session name */ args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_ses_namep, - args->lstio_ses_nmlen)) { + if (copy_from_user(name, args->lstio_ses_namep, + args->lstio_ses_nmlen)) { LIBCFS_FREE(name, args->lstio_ses_nmlen + 1); return -EFAULT; } @@ -96,12 +95,12 @@ lst_session_info_ioctl(lstio_session_info_args_t *args) { /* no checking of key */ - if (args->lstio_ses_idp == NULL || /* address for output sid */ - args->lstio_ses_keyp == NULL || /* address for output key */ - args->lstio_ses_featp == NULL || /* address for output features */ - args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */ - args->lstio_ses_namep == NULL || /* address for output name */ - args->lstio_ses_nmlen <= 0 || + if (!args->lstio_ses_idp || /* address for output sid */ + !args->lstio_ses_keyp || /* address for output key */ + !args->lstio_ses_featp || /* address for output features */ + !args->lstio_ses_ndinfo || /* address for output ndinfo */ + !args->lstio_ses_namep || /* address for output name */ + args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -116,28 +115,28 @@ lst_session_info_ioctl(lstio_session_info_args_t *args) static int lst_debug_ioctl(lstio_debug_args_t *args) { - char *name = NULL; - int client = 1; - int rc; + char *name = NULL; + int client = 1; + int rc; if (args->lstio_dbg_key != console_session.ses_key) return -EACCES; - if (args->lstio_dbg_resultp == NULL) + if (!args->lstio_dbg_resultp) return -EINVAL; - if (args->lstio_dbg_namep != NULL && /* name of batch/group */ + if (args->lstio_dbg_namep && /* name of batch/group */ (args->lstio_dbg_nmlen <= 0 || args->lstio_dbg_nmlen > LST_NAME_SIZE)) return -EINVAL; - if (args->lstio_dbg_namep != NULL) { + if (args->lstio_dbg_namep) { LIBCFS_ALLOC(name, args->lstio_dbg_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; if (copy_from_user(name, args->lstio_dbg_namep, - args->lstio_dbg_nmlen)) { + args->lstio_dbg_nmlen)) { LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1); return -EFAULT; @@ -157,7 +156,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) case LST_OPC_BATCHSRV: client = 0; case LST_OPC_BATCHCLI: - if (name == NULL) + if (!name) goto out; rc = lstcon_batch_debug(args->lstio_dbg_timeout, @@ -165,7 +164,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) break; case LST_OPC_GROUP: - if (name == NULL) + if (!name) goto out; rc = lstcon_group_debug(args->lstio_dbg_timeout, @@ -174,7 +173,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) case LST_OPC_NODES: if (args->lstio_dbg_count <= 0 || - args->lstio_dbg_idsp == NULL) + !args->lstio_dbg_idsp) goto out; rc = lstcon_nodes_debug(args->lstio_dbg_timeout, @@ -188,7 +187,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) } out: - if (name != NULL) + if (name) LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1); return rc; @@ -203,18 +202,17 @@ lst_group_add_ioctl(lstio_group_add_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_namep == NULL || + if (!args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen); return -EFAULT; } @@ -231,24 +229,23 @@ lst_group_add_ioctl(lstio_group_add_args_t *args) static int lst_group_del_ioctl(lstio_group_del_args_t *args) { - int rc; - char *name; + int rc; + char *name; if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_namep == NULL || + if (!args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; } @@ -265,24 +262,23 @@ lst_group_del_ioctl(lstio_group_del_args_t *args) static int lst_group_update_ioctl(lstio_group_update_args_t *args) { - int rc; - char *name; + int rc; + char *name; if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_resultp == NULL || - args->lstio_grp_namep == NULL || + if (!args->lstio_grp_resultp || + !args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, + if (copy_from_user(name, args->lstio_grp_namep, args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; @@ -300,8 +296,8 @@ lst_group_update_ioctl(lstio_group_update_args_t *args) break; case LST_GROUP_RMND: - if (args->lstio_grp_count <= 0 || - args->lstio_grp_idsp == NULL) { + if (args->lstio_grp_count <= 0 || + !args->lstio_grp_idsp) { rc = -EINVAL; break; } @@ -330,21 +326,21 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_idsp == NULL || /* array of ids */ + if (!args->lstio_grp_idsp || /* array of ids */ args->lstio_grp_count <= 0 || - args->lstio_grp_resultp == NULL || - args->lstio_grp_featp == NULL || - args->lstio_grp_namep == NULL || + !args->lstio_grp_resultp || + !args->lstio_grp_featp || + !args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; if (copy_from_user(name, args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; @@ -357,7 +353,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) args->lstio_grp_resultp); LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); - if (rc == 0 && + if (!rc && copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) { return -EINVAL; } @@ -371,15 +367,15 @@ lst_group_list_ioctl(lstio_group_list_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_idx < 0 || - args->lstio_grp_namep == NULL || + if (args->lstio_grp_idx < 0 || + !args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; return lstcon_group_list(args->lstio_grp_idx, - args->lstio_grp_nmlen, - args->lstio_grp_namep); + args->lstio_grp_nmlen, + args->lstio_grp_namep); } static int @@ -393,24 +389,24 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) if (args->lstio_grp_key != console_session.ses_key) return -EACCES; - if (args->lstio_grp_namep == NULL || + if (!args->lstio_grp_namep || args->lstio_grp_nmlen <= 0 || args->lstio_grp_nmlen > LST_NAME_SIZE) return -EINVAL; - if (args->lstio_grp_entp == NULL && /* output: group entry */ - args->lstio_grp_dentsp == NULL) /* output: node entry */ + if (!args->lstio_grp_entp && /* output: group entry */ + !args->lstio_grp_dentsp) /* output: node entry */ return -EINVAL; - if (args->lstio_grp_dentsp != NULL) { /* have node entry */ - if (args->lstio_grp_idxp == NULL || /* node index */ - args->lstio_grp_ndentp == NULL) /* # of node entry */ + if (args->lstio_grp_dentsp) { /* have node entry */ + if (!args->lstio_grp_idxp || /* node index */ + !args->lstio_grp_ndentp) /* # of node entry */ return -EINVAL; if (copy_from_user(&ndent, args->lstio_grp_ndentp, - sizeof(ndent)) || + sizeof(ndent)) || copy_from_user(&index, args->lstio_grp_idxp, - sizeof(index))) + sizeof(index))) return -EFAULT; if (ndent <= 0 || index < 0) @@ -418,12 +414,11 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) } LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; } @@ -435,10 +430,10 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); - if (rc != 0) + if (rc) return rc; - if (args->lstio_grp_dentsp != NULL && + if (args->lstio_grp_dentsp && (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) || copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent)))) return -EFAULT; @@ -455,18 +450,17 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_namep == NULL || + if (!args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -489,18 +483,17 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_namep == NULL || + if (!args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -524,19 +517,18 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_resultp == NULL || - args->lstio_bat_namep == NULL || + if (!args->lstio_bat_resultp || + !args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -554,14 +546,14 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args) static int lst_batch_query_ioctl(lstio_batch_query_args_t *args) { - char *name; - int rc; + char *name; + int rc; if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_resultp == NULL || - args->lstio_bat_namep == NULL || + if (!args->lstio_bat_resultp || + !args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -570,12 +562,11 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -599,8 +590,8 @@ lst_batch_list_ioctl(lstio_batch_list_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_idx < 0 || - args->lstio_bat_namep == NULL || + if (args->lstio_bat_idx < 0 || + !args->lstio_bat_namep || args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -621,24 +612,24 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) if (args->lstio_bat_key != console_session.ses_key) return -EACCES; - if (args->lstio_bat_namep == NULL || /* batch name */ + if (!args->lstio_bat_namep || /* batch name */ args->lstio_bat_nmlen <= 0 || args->lstio_bat_nmlen > LST_NAME_SIZE) return -EINVAL; - if (args->lstio_bat_entp == NULL && /* output: batch entry */ - args->lstio_bat_dentsp == NULL) /* output: node entry */ + if (!args->lstio_bat_entp && /* output: batch entry */ + !args->lstio_bat_dentsp) /* output: node entry */ return -EINVAL; - if (args->lstio_bat_dentsp != NULL) { /* have node entry */ - if (args->lstio_bat_idxp == NULL || /* node index */ - args->lstio_bat_ndentp == NULL) /* # of node entry */ + if (args->lstio_bat_dentsp) { /* have node entry */ + if (!args->lstio_bat_idxp || /* node index */ + !args->lstio_bat_ndentp) /* # of node entry */ return -EINVAL; if (copy_from_user(&index, args->lstio_bat_idxp, - sizeof(index)) || + sizeof(index)) || copy_from_user(&ndent, args->lstio_bat_ndentp, - sizeof(ndent))) + sizeof(ndent))) return -EFAULT; if (ndent <= 0 || index < 0) @@ -646,28 +637,27 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) } LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1); - if (name == NULL) + if (!name) return -ENOMEM; - if (copy_from_user(name, - args->lstio_bat_namep, args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } name[args->lstio_bat_nmlen] = 0; - rc = lstcon_batch_info(name, - args->lstio_bat_entp, args->lstio_bat_server, - args->lstio_bat_testidx, &index, &ndent, - args->lstio_bat_dentsp); + rc = lstcon_batch_info(name, args->lstio_bat_entp, + args->lstio_bat_server, args->lstio_bat_testidx, + &index, &ndent, args->lstio_bat_dentsp); LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); - if (rc != 0) + if (rc) return rc; - if (args->lstio_bat_dentsp != NULL && + if (args->lstio_bat_dentsp && (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) || copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent)))) rc = -EFAULT; @@ -679,98 +669,104 @@ static int lst_stat_query_ioctl(lstio_stat_args_t *args) { int rc; - char *name; + char *name = NULL; /* TODO: not finished */ if (args->lstio_sta_key != console_session.ses_key) return -EACCES; - if (args->lstio_sta_resultp == NULL || - (args->lstio_sta_namep == NULL && - args->lstio_sta_idsp == NULL) || - args->lstio_sta_nmlen <= 0 || - args->lstio_sta_nmlen > LST_NAME_SIZE) + if (!args->lstio_sta_resultp) return -EINVAL; - if (args->lstio_sta_idsp != NULL && - args->lstio_sta_count <= 0) - return -EINVAL; - - LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1); - if (name == NULL) - return -ENOMEM; - - if (copy_from_user(name, args->lstio_sta_namep, - args->lstio_sta_nmlen)) { - LIBCFS_FREE(name, args->lstio_sta_nmlen + 1); - return -EFAULT; - } + if (args->lstio_sta_idsp) { + if (args->lstio_sta_count <= 0) + return -EINVAL; - if (args->lstio_sta_idsp == NULL) { - rc = lstcon_group_stat(name, args->lstio_sta_timeout, - args->lstio_sta_resultp); - } else { rc = lstcon_nodes_stat(args->lstio_sta_count, args->lstio_sta_idsp, args->lstio_sta_timeout, args->lstio_sta_resultp); - } + } else if (args->lstio_sta_namep) { + if (args->lstio_sta_nmlen <= 0 || + args->lstio_sta_nmlen > LST_NAME_SIZE) + return -EINVAL; + + LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1); + if (!name) + return -ENOMEM; - LIBCFS_FREE(name, args->lstio_sta_nmlen + 1); + rc = copy_from_user(name, args->lstio_sta_namep, + args->lstio_sta_nmlen); + if (!rc) + rc = lstcon_group_stat(name, args->lstio_sta_timeout, + args->lstio_sta_resultp); + else + rc = -EFAULT; + } else { + rc = -EINVAL; + } + if (name) + LIBCFS_FREE(name, args->lstio_sta_nmlen + 1); return rc; } static int lst_test_add_ioctl(lstio_test_args_t *args) { - char *batch_name; - char *src_name = NULL; - char *dst_name = NULL; - void *param = NULL; - int ret = 0; - int rc = -ENOMEM; - - if (args->lstio_tes_resultp == NULL || - args->lstio_tes_retp == NULL || - args->lstio_tes_bat_name == NULL || /* no specified batch */ + char *batch_name; + char *src_name = NULL; + char *dst_name = NULL; + void *param = NULL; + int ret = 0; + int rc = -ENOMEM; + + if (!args->lstio_tes_resultp || + !args->lstio_tes_retp || + !args->lstio_tes_bat_name || /* no specified batch */ args->lstio_tes_bat_nmlen <= 0 || args->lstio_tes_bat_nmlen > LST_NAME_SIZE || - args->lstio_tes_sgrp_name == NULL || /* no source group */ + !args->lstio_tes_sgrp_name || /* no source group */ args->lstio_tes_sgrp_nmlen <= 0 || args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE || - args->lstio_tes_dgrp_name == NULL || /* no target group */ + !args->lstio_tes_dgrp_name || /* no target group */ args->lstio_tes_dgrp_nmlen <= 0 || args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE) return -EINVAL; - if (args->lstio_tes_loop == 0 || /* negative is infinite */ + if (!args->lstio_tes_loop || /* negative is infinite */ args->lstio_tes_concur <= 0 || args->lstio_tes_dist <= 0 || args->lstio_tes_span <= 0) return -EINVAL; /* have parameter, check if parameter length is valid */ - if (args->lstio_tes_param != NULL && + if (args->lstio_tes_param && (args->lstio_tes_param_len <= 0 || - args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) + args->lstio_tes_param_len > + PAGE_SIZE - sizeof(lstcon_test_t))) return -EINVAL; LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); - if (batch_name == NULL) + if (!batch_name) return rc; LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1); - if (src_name == NULL) + if (!src_name) goto out; LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1); - if (dst_name == NULL) + if (!dst_name) goto out; - if (args->lstio_tes_param != NULL) { + if (args->lstio_tes_param) { LIBCFS_ALLOC(param, args->lstio_tes_param_len); - if (param == NULL) + if (!param) goto out; + if (copy_from_user(param, args->lstio_tes_param, + args->lstio_tes_param_len)) { + rc = -EFAULT; + goto out; + } } rc = -EFAULT; @@ -779,54 +775,55 @@ static int lst_test_add_ioctl(lstio_test_args_t *args) copy_from_user(src_name, args->lstio_tes_sgrp_name, args->lstio_tes_sgrp_nmlen) || copy_from_user(dst_name, args->lstio_tes_dgrp_name, - args->lstio_tes_dgrp_nmlen) || - copy_from_user(param, args->lstio_tes_param, - args->lstio_tes_param_len)) + args->lstio_tes_dgrp_nmlen)) goto out; - rc = lstcon_test_add(batch_name, - args->lstio_tes_type, - args->lstio_tes_loop, - args->lstio_tes_concur, - args->lstio_tes_dist, args->lstio_tes_span, - src_name, dst_name, param, - args->lstio_tes_param_len, - &ret, args->lstio_tes_resultp); + rc = lstcon_test_add(batch_name, args->lstio_tes_type, + args->lstio_tes_loop, args->lstio_tes_concur, + args->lstio_tes_dist, args->lstio_tes_span, + src_name, dst_name, param, + args->lstio_tes_param_len, + &ret, args->lstio_tes_resultp); - if (ret != 0) + if (ret) rc = (copy_to_user(args->lstio_tes_retp, &ret, - sizeof(ret))) ? -EFAULT : 0; + sizeof(ret))) ? -EFAULT : 0; out: - if (batch_name != NULL) + if (batch_name) LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1); - if (src_name != NULL) + if (src_name) LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1); - if (dst_name != NULL) + if (dst_name) LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1); - if (param != NULL) + if (param) LIBCFS_FREE(param, args->lstio_tes_param_len); return rc; } int -lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data) +lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) { - char *buf; - int opc = data->ioc_u32[0]; - int rc; + char *buf; + struct libcfs_ioctl_data *data; + int opc; + int rc; if (cmd != IOC_LIBCFS_LNETST) return -EINVAL; - if (data->ioc_plen1 > PAGE_CACHE_SIZE) + data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); + + opc = data->ioc_u32[0]; + + if (data->ioc_plen1 > PAGE_SIZE) return -EINVAL; LIBCFS_ALLOC(buf, data->ioc_plen1); - if (buf == NULL) + if (!buf) return -ENOMEM; /* copy in parameter */ @@ -916,7 +913,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data) } if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat, - sizeof(lstcon_trans_stat_t))) + sizeof(lstcon_trans_stat_t))) rc = -EFAULT; out: mutex_unlock(&console_session.ses_mutex); diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c index 1066c7043..35a227d0c 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ b/drivers/staging/lustre/lnet/selftest/conrpc.c @@ -54,14 +54,16 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc) { lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv; - LASSERT(crpc != NULL && rpc == crpc->crp_rpc); + LASSERT(crpc && rpc == crpc->crp_rpc); LASSERT(crpc->crp_posted && !crpc->crp_finished); spin_lock(&rpc->crpc_lock); - if (crpc->crp_trans == NULL) { - /* Orphan RPC is not in any transaction, - * I'm just a poor body and nobody loves me */ + if (!crpc->crp_trans) { + /* + * Orphan RPC is not in any transaction, + * I'm just a poor body and nobody loves me + */ spin_unlock(&rpc->crpc_lock); /* release it */ @@ -72,11 +74,11 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc) /* not an orphan RPC */ crpc->crp_finished = 1; - if (crpc->crp_stamp == 0) { + if (!crpc->crp_stamp) { /* not aborted */ - LASSERT(crpc->crp_status == 0); + LASSERT(!crpc->crp_status); - crpc->crp_stamp = cfs_time_current(); + crpc->crp_stamp = cfs_time_current(); crpc->crp_status = rpc->crpc_status; } @@ -94,16 +96,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats, crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service, feats, bulk_npg, bulk_len, lstcon_rpc_done, (void *)crpc); - if (crpc->crp_rpc == NULL) + if (!crpc->crp_rpc) return -ENOMEM; - crpc->crp_trans = NULL; - crpc->crp_node = nd; - crpc->crp_posted = 0; + crpc->crp_trans = NULL; + crpc->crp_node = nd; + crpc->crp_posted = 0; crpc->crp_finished = 0; crpc->crp_unpacked = 0; - crpc->crp_status = 0; - crpc->crp_stamp = 0; + crpc->crp_status = 0; + crpc->crp_stamp = 0; crpc->crp_embedded = embedded; INIT_LIST_HEAD(&crpc->crp_link); @@ -121,22 +123,21 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats, spin_lock(&console_session.ses_rpc_lock); - if (!list_empty(&console_session.ses_rpc_freelist)) { - crpc = list_entry(console_session.ses_rpc_freelist.next, - lstcon_rpc_t, crp_link); + crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist, + lstcon_rpc_t, crp_link); + if (crpc) list_del_init(&crpc->crp_link); - } spin_unlock(&console_session.ses_rpc_lock); - if (crpc == NULL) { + if (!crpc) { LIBCFS_ALLOC(crpc, sizeof(*crpc)); - if (crpc == NULL) + if (!crpc) return -ENOMEM; } rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc); - if (rc == 0) { + if (!rc) { *crpcpp = crpc; return 0; } @@ -155,7 +156,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc) LASSERT(list_empty(&crpc->crp_link)); for (i = 0; i < bulk->bk_niov; i++) { - if (bulk->bk_iovs[i].kiov_page == NULL) + if (!bulk->bk_iovs[i].kiov_page) continue; __free_page(bulk->bk_iovs[i].kiov_page); @@ -172,7 +173,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc) spin_lock(&console_session.ses_rpc_lock); list_add(&crpc->crp_link, - &console_session.ses_rpc_freelist); + &console_session.ses_rpc_freelist); spin_unlock(&console_session.ses_rpc_lock); } @@ -186,7 +187,7 @@ lstcon_rpc_post(lstcon_rpc_t *crpc) { lstcon_rpc_trans_t *trans = crpc->crp_trans; - LASSERT(trans != NULL); + LASSERT(trans); atomic_inc(&trans->tas_remaining); crpc->crp_posted = 1; @@ -234,15 +235,17 @@ lstcon_rpc_trans_name(int transop) } int -lstcon_rpc_trans_prep(struct list_head *translist, - int transop, lstcon_rpc_trans_t **transpp) +lstcon_rpc_trans_prep(struct list_head *translist, int transop, + lstcon_rpc_trans_t **transpp) { lstcon_rpc_trans_t *trans; - if (translist != NULL) { + if (translist) { list_for_each_entry(trans, translist, tas_link) { - /* Can't enqueue two private transaction on - * the same object */ + /* + * Can't enqueue two private transaction on + * the same object + */ if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE) return -EPERM; } @@ -250,12 +253,12 @@ lstcon_rpc_trans_prep(struct list_head *translist, /* create a trans group */ LIBCFS_ALLOC(trans, sizeof(*trans)); - if (trans == NULL) + if (!trans) return -ENOMEM; trans->tas_opc = transop; - if (translist == NULL) + if (!translist) INIT_LIST_HEAD(&trans->tas_olink); else list_add_tail(&trans->tas_olink, translist); @@ -285,8 +288,8 @@ void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) { srpc_client_rpc_t *rpc; - lstcon_rpc_t *crpc; - lstcon_node_t *nd; + lstcon_rpc_t *crpc; + lstcon_node_t *nd; list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { rpc = crpc->crp_rpc; @@ -294,8 +297,8 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) spin_lock(&rpc->crpc_lock); if (!crpc->crp_posted || /* not posted */ - crpc->crp_stamp != 0) { /* rpc done or aborted already */ - if (crpc->crp_stamp == 0) { + crpc->crp_stamp) { /* rpc done or aborted already */ + if (!crpc->crp_stamp) { crpc->crp_stamp = cfs_time_current(); crpc->crp_status = -EINTR; } @@ -303,14 +306,14 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error) continue; } - crpc->crp_stamp = cfs_time_current(); + crpc->crp_stamp = cfs_time_current(); crpc->crp_status = error; spin_unlock(&rpc->crpc_lock); sfw_abort_rpc(rpc); - if (error != ETIMEDOUT) + if (error != -ETIMEDOUT) continue; nd = crpc->crp_node; @@ -329,7 +332,7 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans) !list_empty(&trans->tas_olink)) /* Not an end session RPC */ return 1; - return (atomic_read(&trans->tas_remaining) == 0) ? 1 : 0; + return !atomic_read(&trans->tas_remaining) ? 1 : 0; } int @@ -366,7 +369,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) if (console_session.ses_shutdown) rc = -ESHUTDOWN; - if (rc != 0 || atomic_read(&trans->tas_remaining) != 0) { + if (rc || atomic_read(&trans->tas_remaining)) { /* treat short timeout as canceled */ if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2) rc = -EINTR; @@ -385,14 +388,14 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) static int lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) { - lstcon_node_t *nd = crpc->crp_node; + lstcon_node_t *nd = crpc->crp_node; srpc_client_rpc_t *rpc = crpc->crp_rpc; srpc_generic_reply_t *rep; - LASSERT(nd != NULL && rpc != NULL); - LASSERT(crpc->crp_stamp != 0); + LASSERT(nd && rpc); + LASSERT(crpc->crp_stamp); - if (crpc->crp_status != 0) { + if (crpc->crp_status) { *msgpp = NULL; return crpc->crp_status; } @@ -422,23 +425,23 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) { - lstcon_rpc_t *crpc; + lstcon_rpc_t *crpc; srpc_msg_t *rep; int error; - LASSERT(stat != NULL); + LASSERT(stat); memset(stat, 0, sizeof(*stat)); list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { lstcon_rpc_stat_total(stat, 1); - LASSERT(crpc->crp_stamp != 0); + LASSERT(crpc->crp_stamp); error = lstcon_rpc_get_reply(crpc, &rep); - if (error != 0) { + if (error) { lstcon_rpc_stat_failure(stat, 1); - if (stat->trs_rpc_errno == 0) + if (!stat->trs_rpc_errno) stat->trs_rpc_errno = -error; continue; @@ -449,7 +452,7 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat); } - if (trans->tas_opc == LST_TRANS_SESNEW && stat->trs_fwk_errno == 0) { + if (trans->tas_opc == LST_TRANS_SESNEW && !stat->trs_fwk_errno) { stat->trs_fwk_errno = lstcon_session_feats_check(trans->tas_features); } @@ -460,17 +463,15 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) lstcon_rpc_stat_failure(stat, 0), lstcon_rpc_stat_total(stat, 0), stat->trs_rpc_errno, stat->trs_fwk_errno); - - return; } int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, - struct list_head *head_up, + struct list_head __user *head_up, lstcon_rpc_readent_func_t readent) { struct list_head tmp; - struct list_head *next; + struct list_head __user *next; lstcon_rpc_ent_t *ent; srpc_generic_reply_t *rep; lstcon_rpc_t *crpc; @@ -480,13 +481,13 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, struct timeval tv; int error; - LASSERT(head_up != NULL); + LASSERT(head_up); next = head_up; list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) { if (copy_from_user(&tmp, next, - sizeof(struct list_head))) + sizeof(struct list_head))) return -EFAULT; if (tmp.next == head_up) @@ -496,7 +497,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, ent = list_entry(next, lstcon_rpc_ent_t, rpe_link); - LASSERT(crpc->crp_stamp != 0); + LASSERT(crpc->crp_stamp); error = lstcon_rpc_get_reply(crpc, &msg); @@ -506,33 +507,32 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, (unsigned long)console_session.ses_id.ses_stamp); jiffies_to_timeval(dur, &tv); - if (copy_to_user(&ent->rpe_peer, - &nd->nd_id, sizeof(lnet_process_id_t)) || + if (copy_to_user(&ent->rpe_peer, &nd->nd_id, + sizeof(lnet_process_id_t)) || copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) || - copy_to_user(&ent->rpe_state, - &nd->nd_state, sizeof(nd->nd_state)) || + copy_to_user(&ent->rpe_state, &nd->nd_state, + sizeof(nd->nd_state)) || copy_to_user(&ent->rpe_rpc_errno, &error, - sizeof(error))) + sizeof(error))) return -EFAULT; - if (error != 0) + if (error) continue; /* RPC is done */ rep = (srpc_generic_reply_t *)&msg->msg_body.reply; - if (copy_to_user(&ent->rpe_sid, - &rep->sid, sizeof(lst_sid_t)) || - copy_to_user(&ent->rpe_fwk_errno, - &rep->status, sizeof(rep->status))) + if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) || + copy_to_user(&ent->rpe_fwk_errno, &rep->status, + sizeof(rep->status))) return -EFAULT; - if (readent == NULL) + if (!readent) continue; error = readent(trans->tas_opc, msg, ent); - if (error != 0) + if (error) return error; } @@ -547,8 +547,7 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) lstcon_rpc_t *tmp; int count = 0; - list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, - crp_link) { + list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) { rpc = crpc->crp_rpc; spin_lock(&rpc->crpc_lock); @@ -563,14 +562,15 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) continue; } - /* rpcs can be still not callbacked (even LNetMDUnlink is called) + /* + * rpcs can be still not callbacked (even LNetMDUnlink is called) * because huge timeout for inaccessible network, don't make * user wait for them, just abandon them, they will be recycled - * in callback */ + * in callback + */ + LASSERT(crpc->crp_status); - LASSERT(crpc->crp_status != 0); - - crpc->crp_node = NULL; + crpc->crp_node = NULL; crpc->crp_trans = NULL; list_del_init(&crpc->crp_link); count++; @@ -580,7 +580,7 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) atomic_dec(&trans->tas_remaining); } - LASSERT(atomic_read(&trans->tas_remaining) == 0); + LASSERT(!atomic_read(&trans->tas_remaining)); list_del(&trans->tas_link); if (!list_empty(&trans->tas_olink)) @@ -590,8 +590,6 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) lstcon_rpc_trans_name(trans->tas_opc), count); LIBCFS_FREE(trans, sizeof(*trans)); - - return; } int @@ -606,12 +604,12 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, case LST_TRANS_SESNEW: rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst; - msrq->mksn_sid = console_session.ses_id; - msrq->mksn_force = console_session.ses_force; + msrq->mksn_sid = console_session.ses_id; + msrq->mksn_force = console_session.ses_force; strlcpy(msrq->mksn_name, console_session.ses_name, sizeof(msrq->mksn_name)); break; @@ -619,7 +617,7 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, case LST_TRANS_SESEND: rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst; @@ -640,12 +638,12 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst; - drq->dbg_sid = console_session.ses_id; + drq->dbg_sid = console_session.ses_id; drq->dbg_flags = 0; return rc; @@ -655,28 +653,28 @@ int lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc) { - lstcon_batch_t *batch; + lstcon_batch_t *batch; srpc_batch_reqst_t *brq; - int rc; + int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst; - brq->bar_sid = console_session.ses_id; - brq->bar_bid = tsb->tsb_id; + brq->bar_sid = console_session.ses_id; + brq->bar_bid = tsb->tsb_id; brq->bar_testidx = tsb->tsb_index; - brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN : - (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP : - SRPC_BATCH_OPC_QUERY); + brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN : + (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP : + SRPC_BATCH_OPC_QUERY); if (transop != LST_TRANS_TSBRUN && transop != LST_TRANS_TSBSTOP) return 0; - LASSERT(tsb->tsb_index == 0); + LASSERT(!tsb->tsb_index); batch = (lstcon_batch_t *)tsb; brq->bar_arg = batch->bat_arg; @@ -688,15 +686,15 @@ int lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) { srpc_stat_reqst_t *srq; - int rc; + int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc); - if (rc != 0) + if (rc) return rc; srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst; - srq->str_sid = console_session.ses_id; + srq->str_sid = console_session.ses_id; srq->str_type = 0; /* XXX remove it */ return 0; @@ -736,7 +734,7 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx, return -EINVAL; start = ((idx / dist) * span) % grp->grp_nnode; - end = ((idx / dist) * span + span - 1) % grp->grp_nnode; + end = ((idx / dist) * span + span - 1) % grp->grp_nnode; list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) { nd = ndl->ndl_node; @@ -776,7 +774,7 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req) { test_ping_req_t *prq = &req->tsr_u.ping; - prq->png_size = param->png_size; + prq->png_size = param->png_size; prq->png_flags = param->png_flags; /* TODO dest */ return 0; @@ -787,9 +785,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) { test_bulk_req_t *brq = &req->tsr_u.bulk_v0; - brq->blk_opc = param->blk_opc; - brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / - PAGE_CACHE_SIZE; + brq->blk_opc = param->blk_opc; + brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) / + PAGE_SIZE; brq->blk_flags = param->blk_flags; return 0; @@ -800,9 +798,9 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) { test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1; - brq->blk_opc = param->blk_opc; - brq->blk_flags = param->blk_flags; - brq->blk_len = param->blk_size; + brq->blk_opc = param->blk_opc; + brq->blk_flags = param->blk_flags; + brq->blk_len = param->blk_size; brq->blk_offset = 0; /* reserved */ return 0; @@ -812,27 +810,27 @@ int lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, lstcon_test_t *test, lstcon_rpc_t **crpc) { - lstcon_group_t *sgrp = test->tes_src_grp; - lstcon_group_t *dgrp = test->tes_dst_grp; + lstcon_group_t *sgrp = test->tes_src_grp; + lstcon_group_t *dgrp = test->tes_dst_grp; srpc_test_reqst_t *trq; - srpc_bulk_t *bulk; - int i; - int npg = 0; - int nob = 0; - int rc = 0; + srpc_bulk_t *bulk; + int i; + int npg = 0; + int nob = 0; + int rc = 0; if (transop == LST_TRANS_TSBCLIADD) { npg = sfw_id_pages(test->tes_span); - nob = (feats & LST_FEAT_BULK_LEN) == 0 ? - npg * PAGE_CACHE_SIZE : + nob = !(feats & LST_FEAT_BULK_LEN) ? + npg * PAGE_SIZE : sizeof(lnet_process_id_packed_t) * test->tes_span; } rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc); - if (rc != 0) + if (rc) return rc; - trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst; + trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst; if (transop == LST_TRANS_TSBSRVADD) { int ndist = (sgrp->grp_nnode + test->tes_dist - 1) / @@ -842,27 +840,27 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, int nmax = (ndist + nspan - 1) / nspan; trq->tsr_ndest = 0; - trq->tsr_loop = nmax * test->tes_dist * test->tes_concur; + trq->tsr_loop = nmax * test->tes_dist * test->tes_concur; } else { bulk = &(*crpc)->crp_rpc->crpc_bulk; for (i = 0; i < npg; i++) { - int len; + int len; LASSERT(nob > 0); - len = (feats & LST_FEAT_BULK_LEN) == 0 ? - PAGE_CACHE_SIZE : - min_t(int, nob, PAGE_CACHE_SIZE); + len = !(feats & LST_FEAT_BULK_LEN) ? + PAGE_SIZE : + min_t(int, nob, PAGE_SIZE); nob -= len; bulk->bk_iovs[i].kiov_offset = 0; - bulk->bk_iovs[i].kiov_len = len; - bulk->bk_iovs[i].kiov_page = + bulk->bk_iovs[i].kiov_len = len; + bulk->bk_iovs[i].kiov_page = alloc_page(GFP_KERNEL); - if (bulk->bk_iovs[i].kiov_page == NULL) { + if (!bulk->bk_iovs[i].kiov_page) { lstcon_rpc_put(*crpc); return -ENOMEM; } @@ -877,19 +875,19 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, test->tes_dist, test->tes_span, npg, &bulk->bk_iovs[0]); - if (rc != 0) { + if (rc) { lstcon_rpc_put(*crpc); return rc; } trq->tsr_ndest = test->tes_span; - trq->tsr_loop = test->tes_loop; + trq->tsr_loop = test->tes_loop; } - trq->tsr_sid = console_session.ses_id; - trq->tsr_bid = test->tes_hdr.tsb_id; - trq->tsr_concur = test->tes_concur; - trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0; + trq->tsr_sid = console_session.ses_id; + trq->tsr_bid = test->tes_hdr.tsb_id; + trq->tsr_concur = test->tes_concur; + trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0; trq->tsr_stop_onerr = !!test->tes_stop_onerr; switch (test->tes_type) { @@ -901,7 +899,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, case LST_TEST_BULK: trq->tsr_service = SRPC_SERVICE_BRW; - if ((feats & LST_FEAT_BULK_LEN) == 0) { + if (!(feats & LST_FEAT_BULK_LEN)) { rc = lstcon_bulkrpc_v0_prep((lst_test_bulk_param_t *) &test->tes_param[0], trq); } else { @@ -923,10 +921,10 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, lstcon_node_t *nd, srpc_msg_t *reply) { srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply; - int status = mksn_rep->mksn_status; + int status = mksn_rep->mksn_status; - if (status == 0 && - (reply->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + if (!status && + (reply->msg_ses_feats & ~LST_FEATS_MASK)) { mksn_rep->mksn_status = EPROTO; status = EPROTO; } @@ -937,22 +935,27 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans, reply->msg_ses_feats); } - if (status != 0) + if (status) return status; if (!trans->tas_feats_updated) { - trans->tas_feats_updated = 1; - trans->tas_features = reply->msg_ses_feats; + spin_lock(&console_session.ses_rpc_lock); + if (!trans->tas_feats_updated) { /* recheck with lock */ + trans->tas_feats_updated = 1; + trans->tas_features = reply->msg_ses_feats; + } + spin_unlock(&console_session.ses_rpc_lock); } if (reply->msg_ses_feats != trans->tas_features) { CNETERR("Framework features %x from %s is different with features on this transaction: %x\n", - reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid), - trans->tas_features); - status = mksn_rep->mksn_status = EPROTO; + reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid), + trans->tas_features); + mksn_rep->mksn_status = EPROTO; + status = EPROTO; } - if (status == 0) { + if (!status) { /* session timeout on remote node */ nd->nd_timeout = mksn_rep->mksn_timeout; } @@ -964,17 +967,17 @@ void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, lstcon_node_t *nd, lstcon_trans_stat_t *stat) { - srpc_rmsn_reply_t *rmsn_rep; + srpc_rmsn_reply_t *rmsn_rep; srpc_debug_reply_t *dbg_rep; srpc_batch_reply_t *bat_rep; - srpc_test_reply_t *test_rep; - srpc_stat_reply_t *stat_rep; - int rc = 0; + srpc_test_reply_t *test_rep; + srpc_stat_reply_t *stat_rep; + int rc = 0; switch (trans->tas_opc) { case LST_TRANS_SESNEW: rc = lstcon_sesnew_stat_reply(trans, nd, msg); - if (rc == 0) { + if (!rc) { lstcon_sesop_stat_success(stat, 1); return; } @@ -985,7 +988,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_SESEND: rmsn_rep = &msg->msg_body.rmsn_reply; /* ESRCH is not an error for end session */ - if (rmsn_rep->rmsn_status == 0 || + if (!rmsn_rep->rmsn_status || rmsn_rep->rmsn_status == ESRCH) { lstcon_sesop_stat_success(stat, 1); return; @@ -1014,7 +1017,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_TSBSTOP: bat_rep = &msg->msg_body.bat_reply; - if (bat_rep->bar_status == 0) { + if (!bat_rep->bar_status) { lstcon_tsbop_stat_success(stat, 1); return; } @@ -1033,12 +1036,12 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_TSBSRVQRY: bat_rep = &msg->msg_body.bat_reply; - if (bat_rep->bar_active != 0) + if (bat_rep->bar_active) lstcon_tsbqry_stat_run(stat, 1); else lstcon_tsbqry_stat_idle(stat, 1); - if (bat_rep->bar_status == 0) + if (!bat_rep->bar_status) return; lstcon_tsbqry_stat_failure(stat, 1); @@ -1049,7 +1052,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_TSBSRVADD: test_rep = &msg->msg_body.tes_reply; - if (test_rep->tsr_status == 0) { + if (!test_rep->tsr_status) { lstcon_tsbop_stat_success(stat, 1); return; } @@ -1061,7 +1064,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, case LST_TRANS_STATQRY: stat_rep = &msg->msg_body.stat_reply; - if (stat_rep->str_status == 0) { + if (!stat_rep->str_status) { lstcon_statqry_stat_success(stat, 1); return; } @@ -1074,10 +1077,8 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, LBUG(); } - if (stat->trs_fwk_errno == 0) + if (!stat->trs_fwk_errno) stat->trs_fwk_errno = rc; - - return; } int @@ -1096,22 +1097,22 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, /* Creating session RPG for list of nodes */ rc = lstcon_rpc_trans_prep(translist, transop, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction %d: %d\n", transop, rc); return rc; } feats = trans->tas_features; list_for_each_entry(ndl, ndlist, ndl_link) { - rc = condition == NULL ? 1 : + rc = !condition ? 1 : condition(transop, ndl->ndl_node, arg); - if (rc == 0) + if (!rc) continue; if (rc < 0) { CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n", - transop, rc); + transop, rc); break; } @@ -1146,7 +1147,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, break; } - if (rc != 0) { + if (rc) { CERROR("Failed to create RPC for transaction %s: %d\n", lstcon_rpc_trans_name(transop), rc); break; @@ -1155,7 +1156,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, lstcon_rpc_trans_addreq(trans, rpc); } - if (rc == 0) { + if (!rc) { *transpp = trans; return 0; } @@ -1168,7 +1169,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, static void lstcon_rpc_pinger(void *arg) { - stt_timer_t *ptimer = (stt_timer_t *)arg; + struct stt_timer *ptimer = (struct stt_timer *)arg; lstcon_rpc_trans_t *trans; lstcon_rpc_t *crpc; srpc_msg_t *rep; @@ -1196,7 +1197,7 @@ lstcon_rpc_pinger(void *arg) trans = console_session.ses_ping; - LASSERT(trans != NULL); + LASSERT(trans); list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) { nd = ndl->ndl_node; @@ -1208,7 +1209,7 @@ lstcon_rpc_pinger(void *arg) rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND, trans->tas_features, &crpc); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); break; } @@ -1221,7 +1222,7 @@ lstcon_rpc_pinger(void *arg) crpc = &nd->nd_ping; - if (crpc->crp_rpc != NULL) { + if (crpc->crp_rpc) { LASSERT(crpc->crp_trans == trans); LASSERT(!list_empty(&crpc->crp_link)); @@ -1247,20 +1248,20 @@ lstcon_rpc_pinger(void *arg) if (nd->nd_state != LST_NODE_ACTIVE) continue; - intv = (jiffies - nd->nd_stamp) / HZ; + intv = (jiffies - nd->nd_stamp) / msecs_to_jiffies(MSEC_PER_SEC); if (intv < nd->nd_timeout / 2) continue; rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG, trans->tas_features, 0, 0, 1, crpc); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); break; } drq = &crpc->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst; - drq->dbg_sid = console_session.ses_id; + drq->dbg_sid = console_session.ses_id; drq->dbg_flags = 0; lstcon_rpc_trans_addreq(trans, crpc); @@ -1285,15 +1286,15 @@ lstcon_rpc_pinger(void *arg) int lstcon_rpc_pinger_start(void) { - stt_timer_t *ptimer; + struct stt_timer *ptimer; int rc; LASSERT(list_empty(&console_session.ses_rpc_freelist)); - LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0); + LASSERT(!atomic_read(&console_session.ses_rpc_counter)); rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING, &console_session.ses_ping); - if (rc != 0) { + if (rc) { CERROR("Failed to create console pinger\n"); return rc; } @@ -1327,6 +1328,7 @@ lstcon_rpc_cleanup_wait(void) { lstcon_rpc_trans_t *trans; lstcon_rpc_t *crpc; + lstcon_rpc_t *temp; struct list_head *pacer; struct list_head zlist; @@ -1337,7 +1339,7 @@ lstcon_rpc_cleanup_wait(void) while (!list_empty(&console_session.ses_trans_list)) { list_for_each(pacer, &console_session.ses_trans_list) { trans = list_entry(pacer, lstcon_rpc_trans_t, - tas_link); + tas_link); CDEBUG(D_NET, "Session closed, wakeup transaction %s\n", lstcon_rpc_trans_name(trans->tas_opc)); @@ -1356,7 +1358,7 @@ lstcon_rpc_cleanup_wait(void) spin_lock(&console_session.ses_rpc_lock); - lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0), + lst_wait_until(!atomic_read(&console_session.ses_rpc_counter), console_session.ses_rpc_lock, "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n", atomic_read(&console_session.ses_rpc_counter)); @@ -1366,9 +1368,7 @@ lstcon_rpc_cleanup_wait(void) spin_unlock(&console_session.ses_rpc_lock); - while (!list_empty(&zlist)) { - crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link); - + list_for_each_entry_safe(crpc, temp, &zlist, crp_link) { list_del(&crpc->crp_link); LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t)); } @@ -1394,5 +1394,5 @@ void lstcon_rpc_module_fini(void) { LASSERT(list_empty(&console_session.ses_rpc_freelist)); - LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0); + LASSERT(!atomic_read(&console_session.ses_rpc_counter)); } diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h index 95c832ff7..3e7839dad 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.h +++ b/drivers/staging/lustre/lnet/selftest/conrpc.h @@ -51,12 +51,12 @@ #include "selftest.h" /* Console rpc and rpc transaction */ -#define LST_TRANS_TIMEOUT 30 -#define LST_TRANS_MIN_TIMEOUT 3 +#define LST_TRANS_TIMEOUT 30 +#define LST_TRANS_MIN_TIMEOUT 3 #define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT) -#define LST_PING_INTERVAL 8 +#define LST_PING_INTERVAL 8 struct lstcon_rpc_trans; struct lstcon_tsb_hdr; @@ -64,49 +64,50 @@ struct lstcon_test; struct lstcon_node; typedef struct lstcon_rpc { - struct list_head crp_link; /* chain on rpc transaction */ - srpc_client_rpc_t *crp_rpc; /* client rpc */ - struct lstcon_node *crp_node; /* destination node */ - struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ - - unsigned int crp_posted:1; /* rpc is posted */ - unsigned int crp_finished:1; /* rpc is finished */ - unsigned int crp_unpacked:1; /* reply is unpacked */ + struct list_head crp_link; /* chain on rpc transaction */ + srpc_client_rpc_t *crp_rpc; /* client rpc */ + struct lstcon_node *crp_node; /* destination node */ + struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ + + unsigned int crp_posted:1; /* rpc is posted */ + unsigned int crp_finished:1; /* rpc is finished */ + unsigned int crp_unpacked:1; /* reply is unpacked */ /** RPC is embedded in other structure and can't free it */ - unsigned int crp_embedded:1; - int crp_status; /* console rpc errors */ - unsigned long crp_stamp; /* replied time stamp */ + unsigned int crp_embedded:1; + int crp_status; /* console rpc errors */ + unsigned long crp_stamp; /* replied time stamp */ } lstcon_rpc_t; typedef struct lstcon_rpc_trans { - struct list_head tas_olink; /* link chain on owner list */ - struct list_head tas_link; /* link chain on global list */ - int tas_opc; /* operation code of transaction */ - unsigned tas_feats_updated; /* features mask is uptodate */ - unsigned tas_features; /* test features mask */ - wait_queue_head_t tas_waitq; /* wait queue head */ - atomic_t tas_remaining; /* # of un-scheduled rpcs */ + struct list_head tas_olink; /* link chain on owner list */ + struct list_head tas_link; /* link chain on global list */ + int tas_opc; /* operation code of transaction */ + unsigned tas_feats_updated; /* features mask is uptodate */ + unsigned tas_features; /* test features mask */ + wait_queue_head_t tas_waitq; /* wait queue head */ + atomic_t tas_remaining; /* # of un-scheduled rpcs */ struct list_head tas_rpcs_list; /* queued requests */ } lstcon_rpc_trans_t; -#define LST_TRANS_PRIVATE 0x1000 +#define LST_TRANS_PRIVATE 0x1000 #define LST_TRANS_SESNEW (LST_TRANS_PRIVATE | 0x01) #define LST_TRANS_SESEND (LST_TRANS_PRIVATE | 0x02) #define LST_TRANS_SESQRY 0x03 -#define LST_TRANS_SESPING 0x04 +#define LST_TRANS_SESPING 0x04 -#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11) -#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12) +#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11) +#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12) #define LST_TRANS_TSBRUN (LST_TRANS_PRIVATE | 0x13) -#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14) -#define LST_TRANS_TSBCLIQRY 0x15 -#define LST_TRANS_TSBSRVQRY 0x16 +#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14) +#define LST_TRANS_TSBCLIQRY 0x15 +#define LST_TRANS_TSBSRVQRY 0x16 -#define LST_TRANS_STATQRY 0x21 +#define LST_TRANS_STATQRY 0x21 typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *); -typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *); +typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, + lstcon_rpc_ent_t __user *); int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop, unsigned version, lstcon_rpc_t **crpc); @@ -128,7 +129,7 @@ int lstcon_rpc_trans_ndlist(struct list_head *ndlist, void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat); int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, - struct list_head *head_up, + struct list_head __user *head_up, lstcon_rpc_readent_func_t readent); void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error); void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans); diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c index 5619fc430..1a923ea3a 100644 --- a/drivers/staging/lustre/lnet/selftest/console.c +++ b/drivers/staging/lustre/lnet/selftest/console.c @@ -49,16 +49,16 @@ do { \ if ((nd)->nd_state == LST_NODE_ACTIVE) \ (p)->nle_nactive++; \ - else if ((nd)->nd_state == LST_NODE_BUSY) \ + else if ((nd)->nd_state == LST_NODE_BUSY) \ (p)->nle_nbusy++; \ - else if ((nd)->nd_state == LST_NODE_DOWN) \ + else if ((nd)->nd_state == LST_NODE_DOWN) \ (p)->nle_ndown++; \ else \ (p)->nle_nunknown++; \ (p)->nle_nnode++; \ } while (0) -lstcon_session_t console_session; +struct lstcon_session console_session; static void lstcon_node_get(lstcon_node_t *nd) @@ -71,12 +71,13 @@ lstcon_node_get(lstcon_node_t *nd) static int lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) { - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; LASSERT(id.nid != LNET_NID_ANY); - list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) { + list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], + ndl_hlink) { if (ndl->ndl_node->nd_id.nid != id.nid || ndl->ndl_node->nd_id.pid != id.pid) continue; @@ -90,23 +91,25 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) return -ENOENT; LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t)); - if (*ndpp == NULL) + if (!*ndpp) return -ENOMEM; ndl = (lstcon_ndlink_t *)(*ndpp + 1); ndl->ndl_node = *ndpp; - ndl->ndl_node->nd_ref = 1; - ndl->ndl_node->nd_id = id; + ndl->ndl_node->nd_ref = 1; + ndl->ndl_node->nd_id = id; ndl->ndl_node->nd_stamp = cfs_time_current(); ndl->ndl_node->nd_state = LST_NODE_UNKNOWN; ndl->ndl_node->nd_timeout = 0; memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t)); - /* queued in global hash & list, no refcount is taken by + /* + * queued in global hash & list, no refcount is taken by * global hash & list, if caller release his refcount, - * node will be released */ + * node will be released + */ list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]); list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list); @@ -157,16 +160,16 @@ lstcon_ndlink_find(struct list_head *hash, return 0; } - if (create == 0) + if (!create) return -ENOENT; /* find or create in session hash */ rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0); - if (rc != 0) + if (rc) return rc; LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t)); - if (ndl == NULL) { + if (!ndl) { lstcon_node_put(nd); return -ENOMEM; } @@ -177,7 +180,7 @@ lstcon_ndlink_find(struct list_head *hash, INIT_LIST_HEAD(&ndl->ndl_link); list_add_tail(&ndl->ndl_hlink, &hash[idx]); - return 0; + return 0; } static void @@ -200,12 +203,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp) LIBCFS_ALLOC(grp, offsetof(lstcon_group_t, grp_ndl_hash[LST_NODE_HASHSIZE])); - if (grp == NULL) + if (!grp) return -ENOMEM; grp->grp_ref = 1; - if (name != NULL) - strcpy(grp->grp_name, name); + if (name) { + if (strlen(name) > sizeof(grp->grp_name) - 1) { + LIBCFS_FREE(grp, offsetof(lstcon_group_t, + grp_ndl_hash[LST_NODE_HASHSIZE])); + return -E2BIG; + } + strncpy(grp->grp_name, name, sizeof(grp->grp_name)); + } INIT_LIST_HEAD(&grp->grp_link); INIT_LIST_HEAD(&grp->grp_ndl_list); @@ -234,7 +243,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep) lstcon_ndlink_t *tmp; list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) { - if ((ndl->ndl_node->nd_state & keep) == 0) + if (!(ndl->ndl_node->nd_state & keep)) lstcon_group_ndlink_release(grp, ndl); } } @@ -252,9 +261,8 @@ lstcon_group_decref(lstcon_group_t *grp) lstcon_group_drain(grp, 0); - for (i = 0; i < LST_NODE_HASHSIZE; i++) { + for (i = 0; i < LST_NODE_HASHSIZE; i++) LASSERT(list_empty(&grp->grp_ndl_hash[i])); - } LIBCFS_FREE(grp, offsetof(lstcon_group_t, grp_ndl_hash[LST_NODE_HASHSIZE])); @@ -266,7 +274,7 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp) lstcon_group_t *grp; list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { - if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0) + if (strncmp(grp->grp_name, name, LST_NAME_SIZE)) continue; lstcon_group_addref(grp); /* +1 ref for caller */ @@ -284,7 +292,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id, int rc; rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create); - if (rc != 0) + if (rc) return rc; if (!list_empty(&(*ndlpp)->ndl_link)) @@ -309,7 +317,7 @@ lstcon_group_ndlink_move(lstcon_group_t *old, lstcon_group_t *new, lstcon_ndlink_t *ndl) { unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) % - LST_NODE_HASHSIZE; + LST_NODE_HASHSIZE; list_del(&ndl->ndl_hlink); list_del(&ndl->ndl_link); @@ -327,7 +335,7 @@ lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new) while (!list_empty(&old->grp_ndl_list)) { ndl = list_entry(old->grp_ndl_list.next, - lstcon_ndlink_t, ndl_link); + lstcon_ndlink_t, ndl_link); lstcon_group_ndlink_move(old, new, ndl); } } @@ -347,7 +355,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) if (nd->nd_state != LST_NODE_ACTIVE) return 0; - if (grp != NULL && nd->nd_ref > 1) + if (grp && nd->nd_ref > 1) return 0; break; @@ -363,7 +371,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg) static int lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, - lstcon_rpc_ent_t *ent_up) + lstcon_rpc_ent_t __user *ent_up) { srpc_debug_reply_t *rep; @@ -376,9 +384,9 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, rep = &msg->msg_body.dbg_reply; if (copy_to_user(&ent_up->rpe_priv[0], - &rep->dbg_timeout, sizeof(int)) || + &rep->dbg_timeout, sizeof(int)) || copy_to_user(&ent_up->rpe_payload[0], - &rep->dbg_name, LST_NAME_SIZE)) + &rep->dbg_name, LST_NAME_SIZE)) return -EFAULT; return 0; @@ -392,18 +400,18 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, static int lstcon_group_nodes_add(lstcon_group_t *grp, - int count, lnet_process_id_t *ids_up, - unsigned *featp, struct list_head *result_up) + int count, lnet_process_id_t __user *ids_up, + unsigned *featp, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; lstcon_group_t *tmp; lnet_process_id_t id; int i; int rc; rc = lstcon_group_alloc(NULL, &tmp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); return -ENOMEM; } @@ -416,18 +424,18 @@ lstcon_group_nodes_add(lstcon_group_t *grp, /* skip if it's in this group already */ rc = lstcon_group_ndlink_find(grp, id, &ndl, 0); - if (rc == 0) + if (!rc) continue; /* add to tmp group */ rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1); - if (rc != 0) { + if (rc) { CERROR("Can't create ndlink, out of memory\n"); break; } } - if (rc != 0) { + if (rc) { lstcon_group_decref(tmp); return rc; } @@ -435,7 +443,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp, rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list, &tmp->grp_trans_list, LST_TRANS_SESNEW, tmp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); lstcon_group_decref(tmp); return rc; @@ -459,8 +467,8 @@ lstcon_group_nodes_add(lstcon_group_t *grp, static int lstcon_group_nodes_remove(lstcon_group_t *grp, - int count, lnet_process_id_t *ids_up, - struct list_head *result_up) + int count, lnet_process_id_t __user *ids_up, + struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; lstcon_ndlink_t *ndl; @@ -472,7 +480,7 @@ lstcon_group_nodes_remove(lstcon_group_t *grp, /* End session and remove node from the group */ rc = lstcon_group_alloc(NULL, &tmp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); return -ENOMEM; } @@ -484,14 +492,14 @@ lstcon_group_nodes_remove(lstcon_group_t *grp, } /* move node to tmp group */ - if (lstcon_group_ndlink_find(grp, id, &ndl, 0) == 0) + if (!lstcon_group_ndlink_find(grp, id, &ndl, 0)) lstcon_group_ndlink_move(grp, tmp, ndl); } rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list, &tmp->grp_trans_list, LST_TRANS_SESEND, tmp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); goto error; } @@ -518,15 +526,15 @@ lstcon_group_add(char *name) lstcon_group_t *grp; int rc; - rc = (lstcon_group_find(name, &grp) == 0) ? -EEXIST : 0; - if (rc != 0) { + rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST; + if (rc) { /* find a group with same name */ lstcon_group_decref(grp); return rc; } rc = lstcon_group_alloc(name, &grp); - if (rc != 0) { + if (rc) { CERROR("Can't allocate descriptor for group %s\n", name); return -ENOMEM; } @@ -537,17 +545,17 @@ lstcon_group_add(char *name) } int -lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up, - unsigned *featp, struct list_head *result_up) +lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up, + unsigned *featp, struct list_head __user *result_up) { lstcon_group_t *grp; int rc; LASSERT(count > 0); - LASSERT(ids_up != NULL); + LASSERT(ids_up); rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", name); return rc; } @@ -575,7 +583,7 @@ lstcon_group_del(char *name) int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group: %s\n", name); return rc; } @@ -590,7 +598,7 @@ lstcon_group_del(char *name) rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, &grp->grp_trans_list, LST_TRANS_SESEND, grp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); lstcon_group_decref(grp); return rc; @@ -601,8 +609,10 @@ lstcon_group_del(char *name) lstcon_rpc_trans_destroy(trans); lstcon_group_decref(grp); - /* -ref for session, it's destroyed, - * status can't be rolled back, destroy group anyway */ + /* + * -ref for session, it's destroyed, + * status can't be rolled back, destroy group anyway + */ lstcon_group_decref(grp); return rc; @@ -615,7 +625,7 @@ lstcon_group_clean(char *name, int args) int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", name); return rc; } @@ -641,14 +651,14 @@ lstcon_group_clean(char *name, int args) } int -lstcon_nodes_remove(char *name, int count, - lnet_process_id_t *ids_up, struct list_head *result_up) +lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up, + struct list_head __user *result_up) { lstcon_group_t *grp = NULL; int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group: %s\n", name); return rc; } @@ -671,14 +681,14 @@ lstcon_nodes_remove(char *name, int count, } int -lstcon_group_refresh(char *name, struct list_head *result_up) +lstcon_group_refresh(char *name, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; lstcon_group_t *grp; int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group: %s\n", name); return rc; } @@ -694,7 +704,7 @@ lstcon_group_refresh(char *name, struct list_head *result_up) rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, &grp->grp_trans_list, LST_TRANS_SESNEW, grp, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { /* local error, return */ CDEBUG(D_NET, "Can't create transaction: %d\n", rc); lstcon_group_decref(grp); @@ -713,15 +723,15 @@ lstcon_group_refresh(char *name, struct list_head *result_up) } int -lstcon_group_list(int index, int len, char *name_up) +lstcon_group_list(int index, int len, char __user *name_up) { lstcon_group_t *grp; LASSERT(index >= 0); - LASSERT(name_up != NULL); + LASSERT(name_up); list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { - if (index-- == 0) { + if (!index--) { return copy_to_user(name_up, grp->grp_name, len) ? -EFAULT : 0; } @@ -732,15 +742,15 @@ lstcon_group_list(int index, int len, char *name_up) static int lstcon_nodes_getent(struct list_head *head, int *index_p, - int *count_p, lstcon_node_ent_t *dents_up) + int *count_p, lstcon_node_ent_t __user *dents_up) { lstcon_ndlink_t *ndl; lstcon_node_t *nd; int count = 0; int index = 0; - LASSERT(index_p != NULL && count_p != NULL); - LASSERT(dents_up != NULL); + LASSERT(index_p && count_p); + LASSERT(dents_up); LASSERT(*index_p >= 0); LASSERT(*count_p > 0); @@ -753,9 +763,9 @@ lstcon_nodes_getent(struct list_head *head, int *index_p, nd = ndl->ndl_node; if (copy_to_user(&dents_up[count].nde_id, - &nd->nd_id, sizeof(nd->nd_id)) || + &nd->nd_id, sizeof(nd->nd_id)) || copy_to_user(&dents_up[count].nde_state, - &nd->nd_state, sizeof(nd->nd_state))) + &nd->nd_state, sizeof(nd->nd_state))) return -EFAULT; count++; @@ -771,8 +781,9 @@ lstcon_nodes_getent(struct list_head *head, int *index_p, } int -lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, - int *index_p, int *count_p, lstcon_node_ent_t *dents_up) +lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p, + int *index_p, int *count_p, + lstcon_node_ent_t __user *dents_up) { lstcon_ndlist_ent_t *gentp; lstcon_group_t *grp; @@ -780,7 +791,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", name); return rc; } @@ -796,7 +807,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, /* non-verbose query */ LIBCFS_ALLOC(gentp, sizeof(lstcon_ndlist_ent_t)); - if (gentp == NULL) { + if (!gentp) { CERROR("Can't allocate ndlist_ent\n"); lstcon_group_decref(grp); @@ -807,7 +818,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp); rc = copy_to_user(gents_p, gentp, - sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0; + sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0; LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t)); @@ -822,7 +833,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp) lstcon_batch_t *bat; list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { - if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) { + if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) { *batpp = bat; return 0; } @@ -838,21 +849,21 @@ lstcon_batch_add(char *name) int i; int rc; - rc = (lstcon_batch_find(name, &bat) == 0) ? -EEXIST : 0; - if (rc != 0) { + rc = !lstcon_batch_find(name, &bat) ? -EEXIST : 0; + if (rc) { CDEBUG(D_NET, "Batch %s already exists\n", name); return rc; } LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t)); - if (bat == NULL) { + if (!bat) { CERROR("Can't allocate descriptor for batch %s\n", name); return -ENOMEM; } LIBCFS_ALLOC(bat->bat_cli_hash, sizeof(struct list_head) * LST_NODE_HASHSIZE); - if (bat->bat_cli_hash == NULL) { + if (!bat->bat_cli_hash) { CERROR("Can't allocate hash for batch %s\n", name); LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); @@ -861,7 +872,7 @@ lstcon_batch_add(char *name) LIBCFS_ALLOC(bat->bat_srv_hash, sizeof(struct list_head) * LST_NODE_HASHSIZE); - if (bat->bat_srv_hash == NULL) { + if (!bat->bat_srv_hash) { CERROR("Can't allocate hash for batch %s\n", name); LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); @@ -869,7 +880,13 @@ lstcon_batch_add(char *name) return -ENOMEM; } - strcpy(bat->bat_name, name); + if (strlen(name) > sizeof(bat->bat_name) - 1) { + LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE); + LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE); + LIBCFS_FREE(bat, sizeof(lstcon_batch_t)); + return -E2BIG; + } + strncpy(bat->bat_name, name, sizeof(bat->bat_name)); bat->bat_hdr.tsb_index = 0; bat->bat_hdr.tsb_id.bat_id = ++console_session.ses_id_cookie; @@ -892,17 +909,17 @@ lstcon_batch_add(char *name) } int -lstcon_batch_list(int index, int len, char *name_up) +lstcon_batch_list(int index, int len, char __user *name_up) { lstcon_batch_t *bat; - LASSERT(name_up != NULL); + LASSERT(name_up); LASSERT(index >= 0); list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { - if (index-- == 0) { + if (!index--) { return copy_to_user(name_up, bat->bat_name, len) ? - -EFAULT : 0; + -EFAULT : 0; } } @@ -910,20 +927,20 @@ lstcon_batch_list(int index, int len, char *name_up) } int -lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, - int testidx, int *index_p, int *ndent_p, - lstcon_node_ent_t *dents_up) +lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up, + int server, int testidx, int *index_p, int *ndent_p, + lstcon_node_ent_t __user *dents_up) { lstcon_test_batch_ent_t *entp; struct list_head *clilst; struct list_head *srvlst; lstcon_test_t *test = NULL; lstcon_batch_t *bat; - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; int rc; rc = lstcon_batch_find(name, &bat); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find batch %s\n", name); return -ENOENT; } @@ -941,12 +958,12 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, } } - clilst = (test == NULL) ? &bat->bat_cli_list : - &test->tes_src_grp->grp_ndl_list; - srvlst = (test == NULL) ? &bat->bat_srv_list : - &test->tes_dst_grp->grp_ndl_list; + clilst = !test ? &bat->bat_cli_list : + &test->tes_src_grp->grp_ndl_list; + srvlst = !test ? &bat->bat_srv_list : + &test->tes_dst_grp->grp_ndl_list; - if (dents_up != NULL) { + if (dents_up) { rc = lstcon_nodes_getent((server ? srvlst : clilst), index_p, ndent_p, dents_up); return rc; @@ -954,17 +971,16 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, /* non-verbose query */ LIBCFS_ALLOC(entp, sizeof(lstcon_test_batch_ent_t)); - if (entp == NULL) + if (!entp) return -ENOMEM; - if (test == NULL) { + if (!test) { entp->u.tbe_batch.bae_ntest = bat->bat_ntest; entp->u.tbe_batch.bae_state = bat->bat_state; } else { - - entp->u.tbe_test.tse_type = test->tes_type; - entp->u.tbe_test.tse_loop = test->tes_loop; + entp->u.tbe_test.tse_type = test->tes_type; + entp->u.tbe_test.tse_loop = test->tes_loop; entp->u.tbe_test.tse_concur = test->tes_concur; } @@ -975,7 +991,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle); rc = copy_to_user(ent_up, entp, - sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0; + sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0; LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t)); @@ -1006,7 +1022,7 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg) static int lstcon_batch_op(lstcon_batch_t *bat, int transop, - struct list_head *result_up) + struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; int rc; @@ -1014,7 +1030,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop, rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list, &bat->bat_trans_list, transop, bat, lstcon_batrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1029,12 +1045,12 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop, } int -lstcon_batch_run(char *name, int timeout, struct list_head *result_up) +lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up) { lstcon_batch_t *bat; int rc; - if (lstcon_batch_find(name, &bat) != 0) { + if (lstcon_batch_find(name, &bat)) { CDEBUG(D_NET, "Can't find batch %s\n", name); return -ENOENT; } @@ -1044,19 +1060,19 @@ lstcon_batch_run(char *name, int timeout, struct list_head *result_up) rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up); /* mark batch as running if it's started in any node */ - if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0) != 0) + if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0)) bat->bat_state = LST_BATCH_RUNNING; return rc; } int -lstcon_batch_stop(char *name, int force, struct list_head *result_up) +lstcon_batch_stop(char *name, int force, struct list_head __user *result_up) { lstcon_batch_t *bat; int rc; - if (lstcon_batch_find(name, &bat) != 0) { + if (lstcon_batch_find(name, &bat)) { CDEBUG(D_NET, "Can't find batch %s\n", name); return -ENOENT; } @@ -1066,7 +1082,7 @@ lstcon_batch_stop(char *name, int force, struct list_head *result_up) rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up); /* mark batch as stopped if all RPCs finished */ - if (lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0) == 0) + if (!lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0)) bat->bat_state = LST_BATCH_IDLE; return rc; @@ -1083,7 +1099,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_test_list)) { test = list_entry(bat->bat_test_list.next, - lstcon_test_t, tes_link); + lstcon_test_t, tes_link); LASSERT(list_empty(&test->tes_trans_list)); list_del(&test->tes_link); @@ -1099,7 +1115,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_cli_list)) { ndl = list_entry(bat->bat_cli_list.next, - lstcon_ndlink_t, ndl_link); + lstcon_ndlink_t, ndl_link); list_del_init(&ndl->ndl_link); lstcon_ndlink_release(ndl); @@ -1107,7 +1123,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat) while (!list_empty(&bat->bat_srv_list)) { ndl = list_entry(bat->bat_srv_list.next, - lstcon_ndlink_t, ndl_link); + lstcon_ndlink_t, ndl_link); list_del_init(&ndl->ndl_link); lstcon_ndlink_release(ndl); @@ -1135,10 +1151,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) struct list_head *head; test = (lstcon_test_t *)arg; - LASSERT(test != NULL); + LASSERT(test); batch = test->tes_batch; - LASSERT(batch != NULL); + LASSERT(batch); if (test->tes_oneside && transop == LST_TRANS_TSBSRVADD) @@ -1160,7 +1176,7 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) LASSERT(nd->nd_id.nid != LNET_NID_ANY); - if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0) + if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1)) return -ENOMEM; if (list_empty(&ndl->ndl_link)) @@ -1170,31 +1186,31 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) } static int -lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up) +lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; lstcon_group_t *grp; int transop; int rc; - LASSERT(test->tes_src_grp != NULL); - LASSERT(test->tes_dst_grp != NULL); + LASSERT(test->tes_src_grp); + LASSERT(test->tes_dst_grp); transop = LST_TRANS_TSBSRVADD; - grp = test->tes_dst_grp; + grp = test->tes_dst_grp; again: rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list, &test->tes_trans_list, transop, test, lstcon_testrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT); - if (lstcon_trans_stat()->trs_rpc_errno != 0 || - lstcon_trans_stat()->trs_fwk_errno != 0) { + if (lstcon_trans_stat()->trs_rpc_errno || + lstcon_trans_stat()->trs_fwk_errno) { lstcon_rpc_trans_interpreter(trans, result_up, NULL); lstcon_rpc_trans_destroy(trans); @@ -1226,7 +1242,7 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch) int rc; rc = lstcon_batch_find(name, batch); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find batch %s\n", name); return rc; } @@ -1243,10 +1259,10 @@ static int lstcon_verify_group(const char *name, lstcon_group_t **grp) { int rc; - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; rc = lstcon_group_find(name, grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "can't find group %s\n", name); return rc; } @@ -1266,13 +1282,13 @@ lstcon_test_add(char *batch_name, int type, int loop, int concur, int dist, int span, char *src_name, char *dst_name, void *param, int paramlen, int *retp, - struct list_head *result_up) + struct list_head __user *result_up) { - lstcon_test_t *test = NULL; - int rc; - lstcon_group_t *src_grp = NULL; - lstcon_group_t *dst_grp = NULL; - lstcon_batch_t *batch = NULL; + lstcon_test_t *test = NULL; + int rc; + lstcon_group_t *src_grp = NULL; + lstcon_group_t *dst_grp = NULL; + lstcon_batch_t *batch = NULL; /* * verify that a batch of the given name exists, and the groups @@ -1280,15 +1296,15 @@ lstcon_test_add(char *batch_name, int type, int loop, * active node */ rc = lstcon_verify_batch(batch_name, &batch); - if (rc != 0) + if (rc) goto out; rc = lstcon_verify_group(src_name, &src_grp); - if (rc != 0) + if (rc) goto out; rc = lstcon_verify_group(dst_name, &dst_grp); - if (rc != 0) + if (rc) goto out; if (dst_grp->grp_userland) @@ -1302,32 +1318,32 @@ lstcon_test_add(char *batch_name, int type, int loop, goto out; } - test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id; - test->tes_batch = batch; - test->tes_type = type; - test->tes_oneside = 0; /* TODO */ - test->tes_loop = loop; - test->tes_concur = concur; - test->tes_stop_onerr = 1; /* TODO */ - test->tes_span = span; - test->tes_dist = dist; - test->tes_cliidx = 0; /* just used for creating RPC */ - test->tes_src_grp = src_grp; - test->tes_dst_grp = dst_grp; + test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id; + test->tes_batch = batch; + test->tes_type = type; + test->tes_oneside = 0; /* TODO */ + test->tes_loop = loop; + test->tes_concur = concur; + test->tes_stop_onerr = 1; /* TODO */ + test->tes_span = span; + test->tes_dist = dist; + test->tes_cliidx = 0; /* just used for creating RPC */ + test->tes_src_grp = src_grp; + test->tes_dst_grp = dst_grp; INIT_LIST_HEAD(&test->tes_trans_list); - if (param != NULL) { + if (param) { test->tes_paramlen = paramlen; memcpy(&test->tes_param[0], param, paramlen); } rc = lstcon_test_nodes_add(test, result_up); - if (rc != 0) + if (rc) goto out; - if (lstcon_trans_stat()->trs_rpc_errno != 0 || - lstcon_trans_stat()->trs_fwk_errno != 0) + if (lstcon_trans_stat()->trs_rpc_errno || + lstcon_trans_stat()->trs_fwk_errno) CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, batch_name); @@ -1340,13 +1356,13 @@ lstcon_test_add(char *batch_name, int type, int loop, /* hold groups so nobody can change them */ return rc; out: - if (test != NULL) + if (test) LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen])); - if (dst_grp != NULL) + if (dst_grp) lstcon_group_decref(dst_grp); - if (src_grp != NULL) + if (src_grp) lstcon_group_decref(src_grp); return rc; @@ -1369,16 +1385,16 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp) static int lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, - lstcon_rpc_ent_t *ent_up) + lstcon_rpc_ent_t __user *ent_up) { srpc_batch_reply_t *rep = &msg->msg_body.bat_reply; LASSERT(transop == LST_TRANS_TSBCLIQRY || - transop == LST_TRANS_TSBSRVQRY); + transop == LST_TRANS_TSBSRVQRY); /* positive errno, framework error code */ - if (copy_to_user(&ent_up->rpe_priv[0], - &rep->bar_active, sizeof(rep->bar_active))) + if (copy_to_user(&ent_up->rpe_priv[0], &rep->bar_active, + sizeof(rep->bar_active))) return -EFAULT; return 0; @@ -1386,7 +1402,7 @@ lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, int lstcon_test_batch_query(char *name, int testidx, int client, - int timeout, struct list_head *result_up) + int timeout, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; struct list_head *translist; @@ -1398,43 +1414,43 @@ lstcon_test_batch_query(char *name, int testidx, int client, int rc; rc = lstcon_batch_find(name, &batch); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find batch: %s\n", name); return rc; } - if (testidx == 0) { + if (!testidx) { translist = &batch->bat_trans_list; - ndlist = &batch->bat_cli_list; - hdr = &batch->bat_hdr; + ndlist = &batch->bat_cli_list; + hdr = &batch->bat_hdr; } else { /* query specified test only */ rc = lstcon_test_find(batch, testidx, &test); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find test: %d\n", testidx); return rc; } translist = &test->tes_trans_list; - ndlist = &test->tes_src_grp->grp_ndl_list; - hdr = &test->tes_hdr; + ndlist = &test->tes_src_grp->grp_ndl_list; + hdr = &test->tes_hdr; } transop = client ? LST_TRANS_TSBCLIQRY : LST_TRANS_TSBSRVQRY; rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr, lstcon_batrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } lstcon_rpc_trans_postwait(trans, timeout); - if (testidx == 0 && /* query a batch, not a test */ - lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) == 0 && - lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0) == 0) { + if (!testidx && /* query a batch, not a test */ + !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) && + !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) { /* all RPCs finished, and no active test */ batch->bat_state = LST_BATCH_IDLE; } @@ -1448,19 +1464,19 @@ lstcon_test_batch_query(char *name, int testidx, int client, static int lstcon_statrpc_readent(int transop, srpc_msg_t *msg, - lstcon_rpc_ent_t *ent_up) + lstcon_rpc_ent_t __user *ent_up) { srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; - sfw_counters_t *sfwk_stat; - srpc_counters_t *srpc_stat; - lnet_counters_t *lnet_stat; + sfw_counters_t __user *sfwk_stat; + srpc_counters_t __user *srpc_stat; + lnet_counters_t __user *lnet_stat; - if (rep->str_status != 0) + if (rep->str_status) return 0; - sfwk_stat = (sfw_counters_t *)&ent_up->rpe_payload[0]; - srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat)); - lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat)); + sfwk_stat = (sfw_counters_t __user *)&ent_up->rpe_payload[0]; + srpc_stat = (srpc_counters_t __user *)(sfwk_stat + 1); + lnet_stat = (lnet_counters_t __user *)(srpc_stat + 1); if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) || copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) || @@ -1472,7 +1488,7 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg, static int lstcon_ndlist_stat(struct list_head *ndlist, - int timeout, struct list_head *result_up) + int timeout, struct list_head __user *result_up) { struct list_head head; lstcon_rpc_trans_t *trans; @@ -1482,7 +1498,7 @@ lstcon_ndlist_stat(struct list_head *ndlist, rc = lstcon_rpc_trans_ndlist(ndlist, &head, LST_TRANS_STATQRY, NULL, NULL, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1497,13 +1513,14 @@ lstcon_ndlist_stat(struct list_head *ndlist, } int -lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up) +lstcon_group_stat(char *grp_name, int timeout, + struct list_head __user *result_up) { lstcon_group_t *grp; int rc; rc = lstcon_group_find(grp_name, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Can't find group %s\n", grp_name); return rc; } @@ -1516,17 +1533,17 @@ lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up) } int -lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, - int timeout, struct list_head *result_up) +lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up, + int timeout, struct list_head __user *result_up) { - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; lstcon_group_t *tmp; lnet_process_id_t id; int i; int rc; rc = lstcon_group_alloc(NULL, &tmp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); return -ENOMEM; } @@ -1539,7 +1556,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, /* add to tmp group */ rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2); - if (rc != 0) { + if (rc) { CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET, "Failed to find or create %s: %d\n", libcfs_id2str(id), rc); @@ -1547,7 +1564,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, } } - if (rc != 0) { + if (rc) { lstcon_group_decref(tmp); return rc; } @@ -1562,14 +1579,14 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, static int lstcon_debug_ndlist(struct list_head *ndlist, struct list_head *translist, - int timeout, struct list_head *result_up) + int timeout, struct list_head __user *result_up) { lstcon_rpc_trans_t *trans; - int rc; + int rc; rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY, NULL, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1584,7 +1601,7 @@ lstcon_debug_ndlist(struct list_head *ndlist, } int -lstcon_session_debug(int timeout, struct list_head *result_up) +lstcon_session_debug(int timeout, struct list_head __user *result_up) { return lstcon_debug_ndlist(&console_session.ses_ndl_list, NULL, timeout, result_up); @@ -1592,13 +1609,13 @@ lstcon_session_debug(int timeout, struct list_head *result_up) int lstcon_batch_debug(int timeout, char *name, - int client, struct list_head *result_up) + int client, struct list_head __user *result_up) { lstcon_batch_t *bat; int rc; rc = lstcon_batch_find(name, &bat); - if (rc != 0) + if (rc) return -ENOENT; rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list : @@ -1610,13 +1627,13 @@ lstcon_batch_debug(int timeout, char *name, int lstcon_group_debug(int timeout, char *name, - struct list_head *result_up) + struct list_head __user *result_up) { lstcon_group_t *grp; int rc; rc = lstcon_group_find(name, &grp); - if (rc != 0) + if (rc) return -ENOENT; rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL, @@ -1628,8 +1645,8 @@ lstcon_group_debug(int timeout, char *name, int lstcon_nodes_debug(int timeout, - int count, lnet_process_id_t *ids_up, - struct list_head *result_up) + int count, lnet_process_id_t __user *ids_up, + struct list_head __user *result_up) { lnet_process_id_t id; lstcon_ndlink_t *ndl; @@ -1638,7 +1655,7 @@ lstcon_nodes_debug(int timeout, int rc; rc = lstcon_group_alloc(NULL, &grp); - if (rc != 0) { + if (rc) { CDEBUG(D_NET, "Out of memory\n"); return rc; } @@ -1651,13 +1668,13 @@ lstcon_nodes_debug(int timeout, /* node is added to tmp group */ rc = lstcon_group_ndlink_find(grp, id, &ndl, 1); - if (rc != 0) { + if (rc) { CERROR("Can't create node link\n"); break; } } - if (rc != 0) { + if (rc) { lstcon_group_decref(grp); return rc; } @@ -1673,8 +1690,8 @@ lstcon_nodes_debug(int timeout, int lstcon_session_match(lst_sid_t sid) { - return (console_session.ses_id.ses_nid == sid.ses_nid && - console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0; + return (console_session.ses_id.ses_nid == sid.ses_nid && + console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0; } static void @@ -1685,15 +1702,13 @@ lstcon_new_session_id(lst_sid_t *sid) LASSERT(console_session.ses_state == LST_SESSION_NONE); LNetGetId(1, &id); - sid->ses_nid = id.nid; + sid->ses_nid = id.nid; sid->ses_stamp = cfs_time_current(); } -extern srpc_service_t lstcon_acceptor_service; - int lstcon_session_new(char *name, int key, unsigned feats, - int timeout, int force, lst_sid_t *sid_up) + int timeout, int force, lst_sid_t __user *sid_up) { int rc = 0; int i; @@ -1709,11 +1724,11 @@ lstcon_session_new(char *name, int key, unsigned feats, rc = lstcon_session_end(); /* lstcon_session_end() only return local error */ - if (rc != 0) + if (rc) return rc; } - if ((feats & ~LST_FEATS_MASK) != 0) { + if (feats & ~LST_FEATS_MASK) { CNETERR("Unknown session features %x\n", (feats & ~LST_FEATS_MASK)); return -EINVAL; @@ -1731,15 +1746,18 @@ lstcon_session_new(char *name, int key, unsigned feats, console_session.ses_feats_updated = 0; console_session.ses_timeout = (timeout <= 0) ? LST_CONSOLE_TIMEOUT : timeout; - strlcpy(console_session.ses_name, name, + + if (strlen(name) > sizeof(console_session.ses_name) - 1) + return -E2BIG; + strncpy(console_session.ses_name, name, sizeof(console_session.ses_name)); rc = lstcon_batch_add(LST_DEFAULT_BATCH); - if (rc != 0) + if (rc) return rc; rc = lstcon_rpc_pinger_start(); - if (rc != 0) { + if (rc) { lstcon_batch_t *bat = NULL; lstcon_batch_find(LST_DEFAULT_BATCH, &bat); @@ -1748,8 +1766,8 @@ lstcon_session_new(char *name, int key, unsigned feats, return rc; } - if (copy_to_user(sid_up, &console_session.ses_id, - sizeof(lst_sid_t)) == 0) + if (!copy_to_user(sid_up, &console_session.ses_id, + sizeof(lst_sid_t))) return rc; lstcon_session_end(); @@ -1758,8 +1776,10 @@ lstcon_session_new(char *name, int key, unsigned feats, } int -lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp, - lstcon_ndlist_ent_t *ndinfo_up, char *name_up, int len) +lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up, + unsigned __user *featp, + lstcon_ndlist_ent_t __user *ndinfo_up, + char __user *name_up, int len) { lstcon_ndlist_ent_t *entp; lstcon_ndlink_t *ndl; @@ -1769,18 +1789,18 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp, return -ESRCH; LIBCFS_ALLOC(entp, sizeof(*entp)); - if (entp == NULL) + if (!entp) return -ENOMEM; list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) LST_NODE_STATE_COUNTER(ndl->ndl_node, entp); if (copy_to_user(sid_up, &console_session.ses_id, - sizeof(lst_sid_t)) || + sizeof(lst_sid_t)) || copy_to_user(key_up, &console_session.ses_key, - sizeof(*key_up)) || + sizeof(*key_up)) || copy_to_user(featp, &console_session.ses_features, - sizeof(*featp)) || + sizeof(*featp)) || copy_to_user(ndinfo_up, entp, sizeof(*entp)) || copy_to_user(name_up, console_session.ses_name, len)) rc = -EFAULT; @@ -1803,7 +1823,7 @@ lstcon_session_end(void) rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list, NULL, LST_TRANS_SESEND, NULL, lstcon_sesrpc_condition, &trans); - if (rc != 0) { + if (rc) { CERROR("Can't create transaction: %d\n", rc); return rc; } @@ -1820,16 +1840,16 @@ lstcon_session_end(void) /* waiting for orphan rpcs to die */ lstcon_rpc_cleanup_wait(); - console_session.ses_id = LST_INVALID_SID; + console_session.ses_id = LST_INVALID_SID; console_session.ses_state = LST_SESSION_NONE; - console_session.ses_key = 0; + console_session.ses_key = 0; console_session.ses_force = 0; console_session.ses_feats_updated = 0; /* destroy all batches */ while (!list_empty(&console_session.ses_bat_list)) { bat = list_entry(console_session.ses_bat_list.next, - lstcon_batch_t, bat_link); + lstcon_batch_t, bat_link); lstcon_batch_destroy(bat); } @@ -1837,7 +1857,7 @@ lstcon_session_end(void) /* destroy all groups */ while (!list_empty(&console_session.ses_grp_list)) { grp = list_entry(console_session.ses_grp_list.next, - lstcon_group_t, grp_link); + lstcon_group_t, grp_link); LASSERT(grp->grp_ref == 1); lstcon_group_decref(grp); @@ -1847,7 +1867,7 @@ lstcon_session_end(void) LASSERT(list_empty(&console_session.ses_ndl_list)); console_session.ses_shutdown = 0; - console_session.ses_expired = 0; + console_session.ses_expired = 0; return rc; } @@ -1857,7 +1877,7 @@ lstcon_session_feats_check(unsigned feats) { int rc = 0; - if ((feats & ~LST_FEATS_MASK) != 0) { + if (feats & ~LST_FEATS_MASK) { CERROR("Can't support these features: %x\n", (feats & ~LST_FEATS_MASK)); return -EPROTO; @@ -1875,7 +1895,7 @@ lstcon_session_feats_check(unsigned feats) spin_unlock(&console_session.ses_rpc_lock); - if (rc != 0) { + if (rc) { CERROR("remote features %x do not match with session features %x of console\n", feats, console_session.ses_features); } @@ -1886,13 +1906,13 @@ lstcon_session_feats_check(unsigned feats) static int lstcon_acceptor_handle(struct srpc_server_rpc *rpc) { - srpc_msg_t *rep = &rpc->srpc_replymsg; - srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; + srpc_msg_t *rep = &rpc->srpc_replymsg; + srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; srpc_join_reqst_t *jreq = &req->msg_body.join_reqst; srpc_join_reply_t *jrep = &rep->msg_body.join_reply; - lstcon_group_t *grp = NULL; + lstcon_group_t *grp = NULL; lstcon_ndlink_t *ndl; - int rc = 0; + int rc = 0; sfw_unpack_message(req); @@ -1905,26 +1925,26 @@ lstcon_acceptor_handle(struct srpc_server_rpc *rpc) goto out; } - if (lstcon_session_feats_check(req->msg_ses_feats) != 0) { + if (lstcon_session_feats_check(req->msg_ses_feats)) { jrep->join_status = EPROTO; goto out; } if (jreq->join_sid.ses_nid != LNET_NID_ANY && - !lstcon_session_match(jreq->join_sid)) { + !lstcon_session_match(jreq->join_sid)) { jrep->join_status = EBUSY; goto out; } - if (lstcon_group_find(jreq->join_group, &grp) != 0) { + if (lstcon_group_find(jreq->join_group, &grp)) { rc = lstcon_group_alloc(jreq->join_group, &grp); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); goto out; } list_add_tail(&grp->grp_link, - &console_session.ses_grp_list); + &console_session.ses_grp_list); lstcon_group_addref(grp); } @@ -1935,31 +1955,31 @@ lstcon_acceptor_handle(struct srpc_server_rpc *rpc) } rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0); - if (rc == 0) { + if (!rc) { jrep->join_status = EEXIST; goto out; } rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1); - if (rc != 0) { + if (rc) { CERROR("Out of memory\n"); goto out; } - ndl->ndl_node->nd_state = LST_NODE_ACTIVE; + ndl->ndl_node->nd_state = LST_NODE_ACTIVE; ndl->ndl_node->nd_timeout = console_session.ses_timeout; - if (grp->grp_userland == 0) + if (!grp->grp_userland) grp->grp_userland = 1; strlcpy(jrep->join_session, console_session.ses_name, sizeof(jrep->join_session)); jrep->join_timeout = console_session.ses_timeout; - jrep->join_status = 0; + jrep->join_status = 0; out: rep->msg_ses_feats = console_session.ses_features; - if (grp != NULL) + if (grp) lstcon_group_decref(grp); mutex_unlock(&console_session.ses_mutex); @@ -1967,17 +1987,17 @@ out: return rc; } -srpc_service_t lstcon_acceptor_service; +static srpc_service_t lstcon_acceptor_service; static void lstcon_init_acceptor_service(void) { /* initialize selftest console acceptor service table */ - lstcon_acceptor_service.sv_name = "join session"; - lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle; - lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN; + lstcon_acceptor_service.sv_name = "join session"; + lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle; + lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN; lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX; } -extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data); +extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr); static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry); @@ -1988,16 +2008,16 @@ lstcon_console_init(void) int i; int rc; - memset(&console_session, 0, sizeof(lstcon_session_t)); + memset(&console_session, 0, sizeof(struct lstcon_session)); - console_session.ses_id = LST_INVALID_SID; - console_session.ses_state = LST_SESSION_NONE; - console_session.ses_timeout = 0; - console_session.ses_force = 0; - console_session.ses_expired = 0; + console_session.ses_id = LST_INVALID_SID; + console_session.ses_state = LST_SESSION_NONE; + console_session.ses_timeout = 0; + console_session.ses_force = 0; + console_session.ses_expired = 0; console_session.ses_feats_updated = 0; - console_session.ses_features = LST_FEATS_MASK; - console_session.ses_laststamp = ktime_get_real_seconds(); + console_session.ses_features = LST_FEATS_MASK; + console_session.ses_laststamp = ktime_get_real_seconds(); mutex_init(&console_session.ses_mutex); @@ -2008,7 +2028,7 @@ lstcon_console_init(void) LIBCFS_ALLOC(console_session.ses_ndl_hash, sizeof(struct list_head) * LST_GLOBAL_HASHSIZE); - if (console_session.ses_ndl_hash == NULL) + if (!console_session.ses_ndl_hash) return -ENOMEM; for (i = 0; i < LST_GLOBAL_HASHSIZE; i++) @@ -2019,7 +2039,7 @@ lstcon_console_init(void) rc = srpc_add_service(&lstcon_acceptor_service); LASSERT(rc != -EBUSY); - if (rc != 0) { + if (rc) { LIBCFS_FREE(console_session.ses_ndl_hash, sizeof(struct list_head) * LST_GLOBAL_HASHSIZE); return rc; @@ -2027,14 +2047,14 @@ lstcon_console_init(void) rc = srpc_service_add_buffers(&lstcon_acceptor_service, lstcon_acceptor_service.sv_wi_total); - if (rc != 0) { + if (rc) { rc = -ENOMEM; goto out; } rc = libcfs_register_ioctl(&lstcon_ioctl_handler); - if (rc == 0) { + if (!rc) { lstcon_rpc_module_init(); return 0; } @@ -2075,9 +2095,8 @@ lstcon_console_fini(void) LASSERT(list_empty(&console_session.ses_bat_list)); LASSERT(list_empty(&console_session.ses_trans_list)); - for (i = 0; i < LST_NODE_HASHSIZE; i++) { + for (i = 0; i < LST_NODE_HASHSIZE; i++) LASSERT(list_empty(&console_session.ses_ndl_hash[i])); - } LIBCFS_FREE(console_session.ses_ndl_hash, sizeof(struct list_head) * LST_GLOBAL_HASHSIZE); diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h index 3f3286c0c..554f58244 100644 --- a/drivers/staging/lustre/lnet/selftest/console.h +++ b/drivers/staging/lustre/lnet/selftest/console.h @@ -52,79 +52,79 @@ typedef struct lstcon_node { lnet_process_id_t nd_id; /* id of the node */ - int nd_ref; /* reference count */ - int nd_state; /* state of the node */ - int nd_timeout; /* session timeout */ - unsigned long nd_stamp; /* timestamp of last replied RPC */ + int nd_ref; /* reference count */ + int nd_state; /* state of the node */ + int nd_timeout; /* session timeout */ + unsigned long nd_stamp; /* timestamp of last replied RPC */ struct lstcon_rpc nd_ping; /* ping rpc */ } lstcon_node_t; /* node descriptor */ typedef struct { struct list_head ndl_link; /* chain on list */ struct list_head ndl_hlink; /* chain on hash */ - lstcon_node_t *ndl_node; /* pointer to node */ + lstcon_node_t *ndl_node; /* pointer to node */ } lstcon_ndlink_t; /* node link descriptor */ typedef struct { - struct list_head grp_link; /* chain on global group list + struct list_head grp_link; /* chain on global group list */ - int grp_ref; /* reference count */ - int grp_userland; /* has userland nodes */ - int grp_nnode; /* # of nodes */ - char grp_name[LST_NAME_SIZE]; /* group name */ - - struct list_head grp_trans_list; /* transaction list */ - struct list_head grp_ndl_list; /* nodes list */ - struct list_head grp_ndl_hash[0]; /* hash table for nodes */ + int grp_ref; /* reference count */ + int grp_userland; /* has userland nodes */ + int grp_nnode; /* # of nodes */ + char grp_name[LST_NAME_SIZE]; /* group name */ + + struct list_head grp_trans_list; /* transaction list */ + struct list_head grp_ndl_list; /* nodes list */ + struct list_head grp_ndl_hash[0]; /* hash table for nodes */ } lstcon_group_t; /* (alias of nodes) group descriptor */ -#define LST_BATCH_IDLE 0xB0 /* idle batch */ +#define LST_BATCH_IDLE 0xB0 /* idle batch */ #define LST_BATCH_RUNNING 0xB1 /* running batch */ typedef struct lstcon_tsb_hdr { - lst_bid_t tsb_id; /* batch ID */ - int tsb_index; /* test index */ + lst_bid_t tsb_id; /* batch ID */ + int tsb_index; /* test index */ } lstcon_tsb_hdr_t; typedef struct { - lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ - struct list_head bat_link; /* chain on session's batches list */ - int bat_ntest; /* # of test */ - int bat_state; /* state of the batch */ - int bat_arg; /* parameter for run|stop, timeout + lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ + struct list_head bat_link; /* chain on session's batches list */ + int bat_ntest; /* # of test */ + int bat_state; /* state of the batch */ + int bat_arg; /* parameter for run|stop, timeout * for run, force for stop */ - char bat_name[LST_NAME_SIZE];/* name of batch */ + char bat_name[LST_NAME_SIZE];/* name of batch */ struct list_head bat_test_list; /* list head of tests (lstcon_test_t) */ struct list_head bat_trans_list; /* list head of transaction */ - struct list_head bat_cli_list; /* list head of client nodes + struct list_head bat_cli_list; /* list head of client nodes * (lstcon_node_t) */ struct list_head *bat_cli_hash; /* hash table of client nodes */ - struct list_head bat_srv_list; /* list head of server nodes */ + struct list_head bat_srv_list; /* list head of server nodes */ struct list_head *bat_srv_hash; /* hash table of server nodes */ } lstcon_batch_t; /* (tests ) batch descriptor */ typedef struct lstcon_test { - lstcon_tsb_hdr_t tes_hdr; /* test batch header */ - struct list_head tes_link; /* chain on batch's tests list */ - lstcon_batch_t *tes_batch; /* pointer to batch */ - - int tes_type; /* type of the test, i.e: bulk, ping */ - int tes_stop_onerr; /* stop on error */ - int tes_oneside; /* one-sided test */ - int tes_concur; /* concurrency */ - int tes_loop; /* loop count */ - int tes_dist; /* nodes distribution of target group */ - int tes_span; /* nodes span of target group */ - int tes_cliidx; /* client index, used for RPC creating */ + lstcon_tsb_hdr_t tes_hdr; /* test batch header */ + struct list_head tes_link; /* chain on batch's tests list */ + lstcon_batch_t *tes_batch; /* pointer to batch */ + + int tes_type; /* type of the test, i.e: bulk, ping */ + int tes_stop_onerr; /* stop on error */ + int tes_oneside; /* one-sided test */ + int tes_concur; /* concurrency */ + int tes_loop; /* loop count */ + int tes_dist; /* nodes distribution of target group */ + int tes_span; /* nodes span of target group */ + int tes_cliidx; /* client index, used for RPC creating */ struct list_head tes_trans_list; /* transaction list */ - lstcon_group_t *tes_src_grp; /* group run the test */ - lstcon_group_t *tes_dst_grp; /* target group */ + lstcon_group_t *tes_src_grp; /* group run the test */ + lstcon_group_t *tes_dst_grp; /* target group */ - int tes_paramlen; /* test parameter length */ - char tes_param[0]; /* test parameter */ + int tes_paramlen; /* test parameter length */ + char tes_param[0]; /* test parameter */ } lstcon_test_t; /* a single test descriptor */ #define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ @@ -135,25 +135,25 @@ typedef struct lstcon_test { #define LST_CONSOLE_TIMEOUT 300 /* default console timeout */ -typedef struct { - struct mutex ses_mutex; /* only 1 thread in session */ - lst_sid_t ses_id; /* global session id */ - int ses_key; /* local session key */ - int ses_state; /* state of session */ - int ses_timeout; /* timeout in seconds */ - time64_t ses_laststamp; /* last operation stamp (seconds) +struct lstcon_session { + struct mutex ses_mutex; /* only 1 thread in session */ + lst_sid_t ses_id; /* global session id */ + int ses_key; /* local session key */ + int ses_state; /* state of session */ + int ses_timeout; /* timeout in seconds */ + time64_t ses_laststamp; /* last operation stamp (seconds) */ - unsigned ses_features; /* tests features of the session + unsigned ses_features; /* tests features of the session */ - unsigned ses_feats_updated:1; /* features are synced with + unsigned ses_feats_updated:1; /* features are synced with * remote test nodes */ - unsigned ses_force:1; /* force creating */ - unsigned ses_shutdown:1; /* session is shutting down */ - unsigned ses_expired:1; /* console is timedout */ - __u64 ses_id_cookie; /* batch id cookie */ - char ses_name[LST_NAME_SIZE];/* session name */ - lstcon_rpc_trans_t *ses_ping; /* session pinger */ - stt_timer_t ses_ping_timer; /* timer for pinger */ + unsigned ses_force:1; /* force creating */ + unsigned ses_shutdown:1; /* session is shutting down */ + unsigned ses_expired:1; /* console is timedout */ + __u64 ses_id_cookie; /* batch id cookie */ + char ses_name[LST_NAME_SIZE];/* session name */ + lstcon_rpc_trans_t *ses_ping; /* session pinger */ + struct stt_timer ses_ping_timer; /* timer for pinger */ lstcon_trans_stat_t ses_trans_stat; /* transaction stats */ struct list_head ses_trans_list; /* global list of transaction */ @@ -162,12 +162,12 @@ typedef struct { struct list_head ses_ndl_list; /* global list of nodes */ struct list_head *ses_ndl_hash; /* hash table of nodes */ - spinlock_t ses_rpc_lock; /* serialize */ - atomic_t ses_rpc_counter; /* # of initialized RPCs */ + spinlock_t ses_rpc_lock; /* serialize */ + atomic_t ses_rpc_counter; /* # of initialized RPCs */ struct list_head ses_rpc_freelist; /* idle console rpc */ -} lstcon_session_t; /* session descriptor */ +}; /* session descriptor */ -extern lstcon_session_t console_session; +extern struct lstcon_session console_session; static inline lstcon_trans_stat_t * lstcon_trans_stat(void) @@ -176,7 +176,7 @@ lstcon_trans_stat(void) } static inline struct list_head * -lstcon_id2hash (lnet_process_id_t id, struct list_head *hash) +lstcon_id2hash(lnet_process_id_t id, struct list_head *hash) { unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; @@ -184,51 +184,54 @@ lstcon_id2hash (lnet_process_id_t id, struct list_head *hash) } int lstcon_console_init(void); -int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data); int lstcon_console_fini(void); int lstcon_session_match(lst_sid_t sid); int lstcon_session_new(char *name, int key, unsigned version, - int timeout, int flags, lst_sid_t *sid_up); -int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp, - lstcon_ndlist_ent_t *entp, char *name_up, int len); + int timeout, int flags, lst_sid_t __user *sid_up); +int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key, + unsigned __user *verp, lstcon_ndlist_ent_t __user *entp, + char __user *name_up, int len); int lstcon_session_end(void); -int lstcon_session_debug(int timeout, struct list_head *result_up); +int lstcon_session_debug(int timeout, struct list_head __user *result_up); int lstcon_session_feats_check(unsigned feats); int lstcon_batch_debug(int timeout, char *name, - int client, struct list_head *result_up); + int client, struct list_head __user *result_up); int lstcon_group_debug(int timeout, char *name, - struct list_head *result_up); -int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up, - struct list_head *result_up); + struct list_head __user *result_up); +int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t __user *nds_up, + struct list_head __user *result_up); int lstcon_group_add(char *name); int lstcon_group_del(char *name); int lstcon_group_clean(char *name, int args); -int lstcon_group_refresh(char *name, struct list_head *result_up); -int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up, - unsigned *featp, struct list_head *result_up); -int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up, - struct list_head *result_up); -int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up, - int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up); -int lstcon_group_list(int idx, int len, char *name_up); +int lstcon_group_refresh(char *name, struct list_head __user *result_up); +int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up, + unsigned *featp, struct list_head __user *result_up); +int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up, + struct list_head __user *result_up); +int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up, + int *index_p, int *ndent_p, + lstcon_node_ent_t __user *ndents_up); +int lstcon_group_list(int idx, int len, char __user *name_up); int lstcon_batch_add(char *name); -int lstcon_batch_run(char *name, int timeout, struct list_head *result_up); -int lstcon_batch_stop(char *name, int force, struct list_head *result_up); +int lstcon_batch_run(char *name, int timeout, + struct list_head __user *result_up); +int lstcon_batch_stop(char *name, int force, + struct list_head __user *result_up); int lstcon_test_batch_query(char *name, int testidx, int client, int timeout, - struct list_head *result_up); + struct list_head __user *result_up); int lstcon_batch_del(char *name); -int lstcon_batch_list(int idx, int namelen, char *name_up); -int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, +int lstcon_batch_list(int idx, int namelen, char __user *name_up); +int lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up, int server, int testidx, int *index_p, - int *ndent_p, lstcon_node_ent_t *dents_up); + int *ndent_p, lstcon_node_ent_t __user *dents_up); int lstcon_group_stat(char *grp_name, int timeout, - struct list_head *result_up); -int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, - int timeout, struct list_head *result_up); + struct list_head __user *result_up); +int lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up, + int timeout, struct list_head __user *result_up); int lstcon_test_add(char *batch_name, int type, int loop, int concur, int dist, int span, char *src_name, char *dst_name, void *param, int paramlen, int *retp, - struct list_head *result_up); + struct list_head __user *result_up); #endif diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c index 1a2da7430..e2c532399 100644 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ b/drivers/staging/lustre/lnet/selftest/framework.c @@ -53,64 +53,64 @@ static int rpc_timeout = 64; module_param(rpc_timeout, int, 0644); MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)"); -#define sfw_unpack_id(id) \ -do { \ - __swab64s(&(id).nid); \ - __swab32s(&(id).pid); \ +#define sfw_unpack_id(id) \ +do { \ + __swab64s(&(id).nid); \ + __swab32s(&(id).pid); \ } while (0) -#define sfw_unpack_sid(sid) \ -do { \ - __swab64s(&(sid).ses_nid); \ - __swab64s(&(sid).ses_stamp); \ +#define sfw_unpack_sid(sid) \ +do { \ + __swab64s(&(sid).ses_nid); \ + __swab64s(&(sid).ses_stamp); \ } while (0) -#define sfw_unpack_fw_counters(fc) \ -do { \ - __swab32s(&(fc).running_ms); \ +#define sfw_unpack_fw_counters(fc) \ +do { \ + __swab32s(&(fc).running_ms); \ __swab32s(&(fc).active_batches); \ __swab32s(&(fc).zombie_sessions); \ - __swab32s(&(fc).brw_errors); \ - __swab32s(&(fc).ping_errors); \ + __swab32s(&(fc).brw_errors); \ + __swab32s(&(fc).ping_errors); \ } while (0) -#define sfw_unpack_rpc_counters(rc) \ -do { \ +#define sfw_unpack_rpc_counters(rc) \ +do { \ __swab32s(&(rc).errors); \ - __swab32s(&(rc).rpcs_sent); \ - __swab32s(&(rc).rpcs_rcvd); \ - __swab32s(&(rc).rpcs_dropped); \ - __swab32s(&(rc).rpcs_expired); \ - __swab64s(&(rc).bulk_get); \ - __swab64s(&(rc).bulk_put); \ + __swab32s(&(rc).rpcs_sent); \ + __swab32s(&(rc).rpcs_rcvd); \ + __swab32s(&(rc).rpcs_dropped); \ + __swab32s(&(rc).rpcs_expired); \ + __swab64s(&(rc).bulk_get); \ + __swab64s(&(rc).bulk_put); \ } while (0) -#define sfw_unpack_lnet_counters(lc) \ -do { \ +#define sfw_unpack_lnet_counters(lc) \ +do { \ __swab32s(&(lc).errors); \ - __swab32s(&(lc).msgs_max); \ - __swab32s(&(lc).msgs_alloc); \ - __swab32s(&(lc).send_count); \ - __swab32s(&(lc).recv_count); \ - __swab32s(&(lc).drop_count); \ - __swab32s(&(lc).route_count); \ - __swab64s(&(lc).send_length); \ - __swab64s(&(lc).recv_length); \ - __swab64s(&(lc).drop_length); \ - __swab64s(&(lc).route_length); \ + __swab32s(&(lc).msgs_max); \ + __swab32s(&(lc).msgs_alloc); \ + __swab32s(&(lc).send_count); \ + __swab32s(&(lc).recv_count); \ + __swab32s(&(lc).drop_count); \ + __swab32s(&(lc).route_count); \ + __swab64s(&(lc).send_length); \ + __swab64s(&(lc).recv_length); \ + __swab64s(&(lc).drop_length); \ + __swab64s(&(lc).route_length); \ } while (0) -#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0) -#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0) +#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive)) +#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive)) static struct smoketest_framework { struct list_head fw_zombie_rpcs; /* RPCs to be recycled */ struct list_head fw_zombie_sessions; /* stopping sessions */ - struct list_head fw_tests; /* registered test cases */ - atomic_t fw_nzombies; /* # zombie sessions */ - spinlock_t fw_lock; /* serialise */ - sfw_session_t *fw_session; /* _the_ session */ - int fw_shuttingdown; /* shutdown in progress */ + struct list_head fw_tests; /* registered test cases */ + atomic_t fw_nzombies; /* # zombie sessions */ + spinlock_t fw_lock; /* serialise */ + sfw_session_t *fw_session; /* _the_ session */ + int fw_shuttingdown; /* shutdown in progress */ struct srpc_server_rpc *fw_active_srpc;/* running RPC */ } sfw_data; @@ -139,17 +139,17 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops) { sfw_test_case_t *tsc; - if (sfw_find_test_case(service->sv_id) != NULL) { + if (sfw_find_test_case(service->sv_id)) { CERROR("Failed to register test %s (%d)\n", - service->sv_name, service->sv_id); + service->sv_name, service->sv_id); return -EEXIST; } LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t)); - if (tsc == NULL) + if (!tsc) return -ENOMEM; - tsc->tsc_cli_ops = cliops; + tsc->tsc_cli_ops = cliops; tsc->tsc_srv_service = service; list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests); @@ -160,11 +160,11 @@ static void sfw_add_session_timer(void) { sfw_session_t *sn = sfw_data.fw_session; - stt_timer_t *timer = &sn->sn_timer; + struct stt_timer *timer = &sn->sn_timer; LASSERT(!sfw_data.fw_shuttingdown); - if (sn == NULL || sn->sn_timeout == 0) + if (!sn || !sn->sn_timeout) return; LASSERT(!sn->sn_timer_active); @@ -172,7 +172,6 @@ sfw_add_session_timer(void) sn->sn_timer_active = 1; timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout; stt_add_timer(timer); - return; } static int @@ -180,10 +179,10 @@ sfw_del_session_timer(void) { sfw_session_t *sn = sfw_data.fw_session; - if (sn == NULL || !sn->sn_timer_active) + if (!sn || !sn->sn_timer_active) return 0; - LASSERT(sn->sn_timeout != 0); + LASSERT(sn->sn_timeout); if (stt_del_timer(&sn->sn_timer)) { /* timer defused */ sn->sn_timer_active = 0; @@ -195,14 +194,14 @@ sfw_del_session_timer(void) static void sfw_deactivate_session(void) - __must_hold(&sfw_data.fw_lock) +__must_hold(&sfw_data.fw_lock) { sfw_session_t *sn = sfw_data.fw_session; int nactive = 0; sfw_batch_t *tsb; sfw_test_case_t *tsc; - if (sn == NULL) + if (!sn) return; LASSERT(!sn->sn_timer_active); @@ -226,7 +225,7 @@ sfw_deactivate_session(void) } } - if (nactive != 0) + if (nactive) return; /* wait for active batches to stop */ list_del_init(&sn->sn_list); @@ -248,8 +247,8 @@ sfw_session_expired(void *data) LASSERT(sn == sfw_data.fw_session); CWARN("Session expired! sid: %s-%llu, name: %s\n", - libcfs_nid2str(sn->sn_id.ses_nid), - sn->sn_id.ses_stamp, &sn->sn_name[0]); + libcfs_nid2str(sn->sn_id.ses_nid), + sn->sn_id.ses_stamp, &sn->sn_name[0]); sn->sn_timer_active = 0; sfw_deactivate_session(); @@ -261,7 +260,7 @@ static inline void sfw_init_session(sfw_session_t *sn, lst_sid_t sid, unsigned features, const char *name) { - stt_timer_t *timer = &sn->sn_timer; + struct stt_timer *timer = &sn->sn_timer; memset(sn, 0, sizeof(sfw_session_t)); INIT_LIST_HEAD(&sn->sn_list); @@ -272,10 +271,10 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid, strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name)); sn->sn_timer_active = 0; - sn->sn_id = sid; - sn->sn_features = features; - sn->sn_timeout = session_timeout; - sn->sn_started = cfs_time_current(); + sn->sn_id = sid; + sn->sn_features = features; + sn->sn_timeout = session_timeout; + sn->sn_started = cfs_time_current(); timer->stt_data = sn; timer->stt_func = sfw_session_expired; @@ -289,29 +288,26 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc) struct srpc_service *sv = rpc->srpc_scd->scd_svc; int status = rpc->srpc_status; - CDEBUG(D_NET, - "Incoming framework RPC done: service %s, peer %s, status %s:%d\n", - sv->sv_name, libcfs_id2str(rpc->srpc_peer), - swi_state2str(rpc->srpc_wi.swi_state), - status); + CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n", + sv->sv_name, libcfs_id2str(rpc->srpc_peer), + swi_state2str(rpc->srpc_wi.swi_state), + status); - if (rpc->srpc_bulk != NULL) + if (rpc->srpc_bulk) sfw_free_pages(rpc); - return; } static void sfw_client_rpc_fini(srpc_client_rpc_t *rpc) { - LASSERT(rpc->crpc_bulk.bk_niov == 0); + LASSERT(!rpc->crpc_bulk.bk_niov); LASSERT(list_empty(&rpc->crpc_list)); - LASSERT(atomic_read(&rpc->crpc_refcount) == 0); + LASSERT(!atomic_read(&rpc->crpc_refcount)); - CDEBUG(D_NET, - "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(rpc->crpc_wi.swi_state), - rpc->crpc_aborted, rpc->crpc_status); + CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n", + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + swi_state2str(rpc->crpc_wi.swi_state), + rpc->crpc_aborted, rpc->crpc_status); spin_lock(&sfw_data.fw_lock); @@ -328,7 +324,7 @@ sfw_find_batch(lst_bid_t bid) sfw_session_t *sn = sfw_data.fw_session; sfw_batch_t *bat; - LASSERT(sn != NULL); + LASSERT(sn); list_for_each_entry(bat, &sn->sn_batches, bat_list) { if (bat->bat_id.bat_id == bid.bat_id) @@ -344,19 +340,19 @@ sfw_bid2batch(lst_bid_t bid) sfw_session_t *sn = sfw_data.fw_session; sfw_batch_t *bat; - LASSERT(sn != NULL); + LASSERT(sn); bat = sfw_find_batch(bid); - if (bat != NULL) + if (bat) return bat; LIBCFS_ALLOC(bat, sizeof(sfw_batch_t)); - if (bat == NULL) + if (!bat) return NULL; - bat->bat_error = 0; - bat->bat_session = sn; - bat->bat_id = bid; + bat->bat_error = 0; + bat->bat_session = sn; + bat->bat_id = bid; atomic_set(&bat->bat_nactive, 0); INIT_LIST_HEAD(&bat->bat_tests); @@ -371,14 +367,14 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) sfw_counters_t *cnt = &reply->str_fw; sfw_batch_t *bat; - reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id; if (request->str_sid.ses_nid == LNET_NID_ANY) { reply->str_status = EINVAL; return 0; } - if (sn == NULL || !sfw_sid_equal(request->str_sid, sn->sn_id)) { + if (!sn || !sfw_sid_equal(request->str_sid, sn->sn_id)) { reply->str_status = ESRCH; return 0; } @@ -386,11 +382,13 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) lnet_counters_get(&reply->str_lnet); srpc_get_counters(&reply->str_rpc); - /* send over the msecs since the session was started - - with 32 bits to send, this is ~49 days */ - cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started); - cnt->brw_errors = atomic_read(&sn->sn_brw_errors); - cnt->ping_errors = atomic_read(&sn->sn_ping_errors); + /* + * send over the msecs since the session was started + * with 32 bits to send, this is ~49 days + */ + cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started); + cnt->brw_errors = atomic_read(&sn->sn_brw_errors); + cnt->ping_errors = atomic_read(&sn->sn_ping_errors); cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies); cnt->active_batches = 0; @@ -408,18 +406,18 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; srpc_msg_t *msg = container_of(request, srpc_msg_t, - msg_body.mksn_reqst); + msg_body.mksn_reqst); int cplen = 0; if (request->mksn_sid.ses_nid == LNET_NID_ANY) { - reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->mksn_sid = !sn ? LST_INVALID_SID : sn->sn_id; reply->mksn_status = EINVAL; return 0; } - if (sn != NULL) { - reply->mksn_status = 0; - reply->mksn_sid = sn->sn_id; + if (sn) { + reply->mksn_status = 0; + reply->mksn_sid = sn->sn_id; reply->mksn_timeout = sn->sn_timeout; if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) { @@ -437,21 +435,23 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) } } - /* reject the request if it requires unknown features + /* + * reject the request if it requires unknown features * NB: old version will always accept all features because it's not * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also * harmless because it will return zero feature to console, and it's * console's responsibility to make sure all nodes in a session have - * same feature mask. */ - if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + * same feature mask. + */ + if (msg->msg_ses_feats & ~LST_FEATS_MASK) { reply->mksn_status = EPROTO; return 0; } /* brand new or create by force */ LIBCFS_ALLOC(sn, sizeof(sfw_session_t)); - if (sn == NULL) { - CERROR("Dropping RPC (mksn) under memory pressure.\n"); + if (!sn) { + CERROR("dropping RPC mksn under memory pressure\n"); return -ENOMEM; } @@ -461,13 +461,13 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) spin_lock(&sfw_data.fw_lock); sfw_deactivate_session(); - LASSERT(sfw_data.fw_session == NULL); + LASSERT(!sfw_data.fw_session); sfw_data.fw_session = sn; spin_unlock(&sfw_data.fw_lock); - reply->mksn_status = 0; - reply->mksn_sid = sn->sn_id; + reply->mksn_status = 0; + reply->mksn_sid = sn->sn_id; reply->mksn_timeout = sn->sn_timeout; return 0; } @@ -477,15 +477,15 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; - reply->rmsn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id; if (request->rmsn_sid.ses_nid == LNET_NID_ANY) { reply->rmsn_status = EINVAL; return 0; } - if (sn == NULL || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) { - reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY; + if (!sn || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) { + reply->rmsn_status = !sn ? ESRCH : EBUSY; return 0; } @@ -499,8 +499,8 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) spin_unlock(&sfw_data.fw_lock); reply->rmsn_status = 0; - reply->rmsn_sid = LST_INVALID_SID; - LASSERT(sfw_data.fw_session == NULL); + reply->rmsn_sid = LST_INVALID_SID; + LASSERT(!sfw_data.fw_session); return 0; } @@ -509,14 +509,14 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; - if (sn == NULL) { + if (!sn) { reply->dbg_status = ESRCH; - reply->dbg_sid = LST_INVALID_SID; + reply->dbg_sid = LST_INVALID_SID; return 0; } - reply->dbg_status = 0; - reply->dbg_sid = sn->sn_id; + reply->dbg_status = 0; + reply->dbg_sid = sn->sn_id; reply->dbg_timeout = sn->sn_timeout; if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name)) >= sizeof(reply->dbg_name)) @@ -539,10 +539,16 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc) static inline int sfw_test_buffers(sfw_test_instance_t *tsi) { - struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service); - struct srpc_service *svc = tsc->tsc_srv_service; + struct sfw_test_case *tsc; + struct srpc_service *svc; int nbuf; + LASSERT(tsi); + tsc = sfw_find_test_case(tsi->tsi_service); + LASSERT(tsc); + svc = tsc->tsc_srv_service; + LASSERT(svc); + nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts; return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA); } @@ -555,10 +561,10 @@ sfw_load_test(struct sfw_test_instance *tsi) int nbuf; int rc; - LASSERT(tsi != NULL); + LASSERT(tsi); tsc = sfw_find_test_case(tsi->tsi_service); nbuf = sfw_test_buffers(tsi); - LASSERT(tsc != NULL); + LASSERT(tsc); svc = tsc->tsc_srv_service; if (tsi->tsi_is_client) { @@ -567,39 +573,44 @@ sfw_load_test(struct sfw_test_instance *tsi) } rc = srpc_service_add_buffers(svc, nbuf); - if (rc != 0) { + if (rc) { CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n", svc->sv_name, nbuf, rc); - /* NB: this error handler is not strictly correct, because + /* + * NB: this error handler is not strictly correct, because * it may release more buffers than already allocated, * but it doesn't matter because request portal should - * be lazy portal and will grow buffers if necessary. */ + * be lazy portal and will grow buffers if necessary. + */ srpc_service_remove_buffers(svc, nbuf); return -ENOMEM; } CDEBUG(D_NET, "Reserved %d buffers for test %s\n", nbuf * (srpc_serv_is_framework(svc) ? - 1 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name); + 2 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name); return 0; } static void sfw_unload_test(struct sfw_test_instance *tsi) { - struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service); + struct sfw_test_case *tsc; - LASSERT(tsc != NULL); + LASSERT(tsi); + tsc = sfw_find_test_case(tsi->tsi_service); + LASSERT(tsc); if (tsi->tsi_is_client) return; - /* shrink buffers, because request portal is lazy portal + /* + * shrink buffers, because request portal is lazy portal * which can grow buffers at runtime so we may leave - * some buffers behind, but never mind... */ + * some buffers behind, but never mind... + */ srpc_service_remove_buffers(tsc->tsc_srv_service, sfw_test_buffers(tsi)); - return; } static void @@ -619,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi) while (!list_empty(&tsi->tsi_units)) { tsu = list_entry(tsi->tsi_units.next, - sfw_test_unit_t, tsu_list); + sfw_test_unit_t, tsu_list); list_del(&tsu->tsu_list); LIBCFS_FREE(tsu, sizeof(*tsu)); } while (!list_empty(&tsi->tsi_free_rpcs)) { rpc = list_entry(tsi->tsi_free_rpcs.next, - srpc_client_rpc_t, crpc_list); + srpc_client_rpc_t, crpc_list); list_del(&rpc->crpc_list); LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); } @@ -634,7 +645,6 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi) clean: sfw_unload_test(tsi); LIBCFS_FREE(tsi, sizeof(*tsi)); - return; } static void @@ -647,13 +657,12 @@ sfw_destroy_batch(sfw_batch_t *tsb) while (!list_empty(&tsb->bat_tests)) { tsi = list_entry(tsb->bat_tests.next, - sfw_test_instance_t, tsi_list); + sfw_test_instance_t, tsi_list); list_del_init(&tsi->tsi_list); sfw_destroy_test_instance(tsi); } LIBCFS_FREE(tsb, sizeof(sfw_batch_t)); - return; } void @@ -666,14 +675,13 @@ sfw_destroy_session(sfw_session_t *sn) while (!list_empty(&sn->sn_batches)) { batch = list_entry(sn->sn_batches.next, - sfw_batch_t, bat_list); + sfw_batch_t, bat_list); list_del_init(&batch->bat_list); sfw_destroy_batch(batch); } LIBCFS_FREE(sn, sizeof(*sn)); atomic_dec(&sfw_data.fw_nzombies); - return; } static void @@ -690,7 +698,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg) LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC)); if (req->tsr_service == SRPC_SERVICE_BRW) { - if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) { + if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) { test_bulk_req_t *bulk = &req->tsr_u.bulk_v0; __swab32s(&bulk->blk_opc); @@ -718,7 +726,6 @@ sfw_unpack_addtest_req(srpc_msg_t *msg) } LBUG(); - return; } static int @@ -734,9 +741,9 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) int rc; LIBCFS_ALLOC(tsi, sizeof(*tsi)); - if (tsi == NULL) { + if (!tsi) { CERROR("Can't allocate test instance for batch: %llu\n", - tsb->bat_id.bat_id); + tsb->bat_id.bat_id); return -ENOMEM; } @@ -746,16 +753,16 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) INIT_LIST_HEAD(&tsi->tsi_free_rpcs); INIT_LIST_HEAD(&tsi->tsi_active_rpcs); - tsi->tsi_stopping = 0; - tsi->tsi_batch = tsb; - tsi->tsi_loop = req->tsr_loop; - tsi->tsi_concur = req->tsr_concur; - tsi->tsi_service = req->tsr_service; - tsi->tsi_is_client = !!(req->tsr_is_client); + tsi->tsi_stopping = 0; + tsi->tsi_batch = tsb; + tsi->tsi_loop = req->tsr_loop; + tsi->tsi_concur = req->tsr_concur; + tsi->tsi_service = req->tsr_service; + tsi->tsi_is_client = !!(req->tsr_is_client); tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr); rc = sfw_load_test(tsi); - if (rc != 0) { + if (rc) { LIBCFS_FREE(tsi, sizeof(*tsi)); return rc; } @@ -768,7 +775,7 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) return 0; } - LASSERT(bk != NULL); + LASSERT(bk); LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest); LASSERT((unsigned int)bk->bk_len >= sizeof(lnet_process_id_packed_t) * ndest); @@ -782,36 +789,36 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc) int j; dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); - LASSERT(dests != NULL); /* my pages are within KVM always */ + LASSERT(dests); /* my pages are within KVM always */ id = dests[i % SFW_ID_PER_PAGE]; if (msg->msg_magic != SRPC_MSG_MAGIC) sfw_unpack_id(id); for (j = 0; j < tsi->tsi_concur; j++) { LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t)); - if (tsu == NULL) { + if (!tsu) { rc = -ENOMEM; CERROR("Can't allocate tsu for %d\n", - tsi->tsi_service); + tsi->tsi_service); goto error; } tsu->tsu_dest.nid = id.nid; tsu->tsu_dest.pid = id.pid; tsu->tsu_instance = tsi; - tsu->tsu_private = NULL; + tsu->tsu_private = NULL; list_add_tail(&tsu->tsu_list, &tsi->tsi_units); } } rc = tsi->tsi_ops->tso_init(tsi); - if (rc == 0) { + if (!rc) { list_add_tail(&tsi->tsi_list, &tsb->bat_tests); return 0; } error: - LASSERT(rc != 0); + LASSERT(rc); sfw_destroy_test_instance(tsi); return rc; } @@ -856,7 +863,6 @@ sfw_test_unit_done(sfw_test_unit_t *tsu) spin_unlock(&sfw_data.fw_lock); sfw_destroy_session(sn); - return; } static void @@ -876,9 +882,8 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc) list_del_init(&rpc->crpc_list); /* batch is stopping or loop is done or get error */ - if (tsi->tsi_stopping || - tsu->tsu_loop == 0 || - (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr)) + if (tsi->tsi_stopping || !tsu->tsu_loop || + (rpc->crpc_status && tsi->tsi_stoptsu_onerr)) done = 1; /* dec ref for poster */ @@ -892,7 +897,6 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc) } sfw_test_unit_done(tsu); - return; } int @@ -906,18 +910,17 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, spin_lock(&tsi->tsi_lock); LASSERT(sfw_test_active(tsi)); - - if (!list_empty(&tsi->tsi_free_rpcs)) { /* pick request from buffer */ - rpc = list_entry(tsi->tsi_free_rpcs.next, - srpc_client_rpc_t, crpc_list); + rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs, + srpc_client_rpc_t, crpc_list); + if (rpc) { LASSERT(nblk == rpc->crpc_bulk.bk_niov); list_del_init(&rpc->crpc_list); } spin_unlock(&tsi->tsi_lock); - if (rpc == NULL) { + if (!rpc) { rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk, blklen, sfw_test_rpc_done, sfw_test_rpc_fini, tsu); @@ -927,7 +930,7 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, sfw_test_rpc_fini, tsu); } - if (rpc == NULL) { + if (!rpc) { CERROR("Can't create rpc for test %d\n", tsi->tsi_service); return -ENOMEM; } @@ -947,12 +950,12 @@ sfw_run_test(swi_workitem_t *wi) LASSERT(wi == &tsu->tsu_worker); - if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) { - LASSERT(rpc == NULL); + if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) { + LASSERT(!rpc); goto test_done; } - LASSERT(rpc != NULL); + LASSERT(rpc); spin_lock(&tsi->tsi_lock); @@ -968,9 +971,8 @@ sfw_run_test(swi_workitem_t *wi) list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs); spin_unlock(&tsi->tsi_lock); - rpc->crpc_timeout = rpc_timeout; - spin_lock(&rpc->crpc_lock); + rpc->crpc_timeout = rpc_timeout; srpc_post_rpc(rpc); spin_unlock(&rpc->crpc_lock); return 0; @@ -1015,8 +1017,7 @@ sfw_run_batch(sfw_batch_t *tsb) tsu->tsu_loop = tsi->tsi_loop; wi = &tsu->tsu_worker; swi_init_workitem(wi, tsu, sfw_run_test, - lst_sched_test[\ - lnet_cpt_of_nid(tsu->tsu_dest.nid)]); + lst_sched_test[lnet_cpt_of_nid(tsu->tsu_dest.nid)]); swi_schedule_workitem(wi); } } @@ -1074,7 +1075,7 @@ sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply) if (testidx < 0) return -EINVAL; - if (testidx == 0) { + if (!testidx) { reply->bar_active = atomic_read(&tsb->bat_nactive); return 0; } @@ -1101,11 +1102,11 @@ int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, int sink) { - LASSERT(rpc->srpc_bulk == NULL); + LASSERT(!rpc->srpc_bulk); LASSERT(npages > 0 && npages <= LNET_MAX_IOV); rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink); - if (rpc->srpc_bulk == NULL) + if (!rpc->srpc_bulk) return -ENOMEM; return 0; @@ -1121,13 +1122,13 @@ sfw_add_test(struct srpc_server_rpc *rpc) sfw_batch_t *bat; request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst; - reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id; - if (request->tsr_loop == 0 || - request->tsr_concur == 0 || + if (!request->tsr_loop || + !request->tsr_concur || request->tsr_sid.ses_nid == LNET_NID_ANY || request->tsr_ndest > SFW_MAX_NDESTS || - (request->tsr_is_client && request->tsr_ndest == 0) || + (request->tsr_is_client && !request->tsr_ndest) || request->tsr_concur > SFW_MAX_CONCUR || request->tsr_service > SRPC_SERVICE_MAX_ID || request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) { @@ -1135,17 +1136,17 @@ sfw_add_test(struct srpc_server_rpc *rpc) return 0; } - if (sn == NULL || !sfw_sid_equal(request->tsr_sid, sn->sn_id) || - sfw_find_test_case(request->tsr_service) == NULL) { + if (!sn || !sfw_sid_equal(request->tsr_sid, sn->sn_id) || + !sfw_find_test_case(request->tsr_service)) { reply->tsr_status = ENOENT; return 0; } bat = sfw_bid2batch(request->tsr_bid); - if (bat == NULL) { - CERROR("Dropping RPC (%s) from %s under memory pressure.\n", - rpc->srpc_scd->scd_svc->sv_name, - libcfs_id2str(rpc->srpc_peer)); + if (!bat) { + CERROR("dropping RPC %s from %s under memory pressure\n", + rpc->srpc_scd->scd_svc->sv_name, + libcfs_id2str(rpc->srpc_peer)); return -ENOMEM; } @@ -1154,15 +1155,15 @@ sfw_add_test(struct srpc_server_rpc *rpc) return 0; } - if (request->tsr_is_client && rpc->srpc_bulk == NULL) { + if (request->tsr_is_client && !rpc->srpc_bulk) { /* rpc will be resumed later in sfw_bulk_ready */ int npg = sfw_id_pages(request->tsr_ndest); int len; - if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { - len = npg * PAGE_CACHE_SIZE; + if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { + len = npg * PAGE_SIZE; - } else { + } else { len = sizeof(lnet_process_id_packed_t) * request->tsr_ndest; } @@ -1171,11 +1172,11 @@ sfw_add_test(struct srpc_server_rpc *rpc) } rc = sfw_add_test_instance(bat, rpc); - CDEBUG(rc == 0 ? D_NET : D_WARNING, - "%s test: sv %d %s, loop %d, concur %d, ndest %d\n", - rc == 0 ? "Added" : "Failed to add", request->tsr_service, - request->tsr_is_client ? "client" : "server", - request->tsr_loop, request->tsr_concur, request->tsr_ndest); + CDEBUG(!rc ? D_NET : D_WARNING, + "%s test: sv %d %s, loop %d, concur %d, ndest %d\n", + !rc ? "Added" : "Failed to add", request->tsr_service, + request->tsr_is_client ? "client" : "server", + request->tsr_loop, request->tsr_concur, request->tsr_ndest); reply->tsr_status = (rc < 0) ? -rc : rc; return 0; @@ -1188,15 +1189,15 @@ sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) int rc = 0; sfw_batch_t *bat; - reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; + reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id; - if (sn == NULL || !sfw_sid_equal(request->bar_sid, sn->sn_id)) { + if (!sn || !sfw_sid_equal(request->bar_sid, sn->sn_id)) { reply->bar_status = ESRCH; return 0; } bat = sfw_find_batch(request->bar_bid); - if (bat == NULL) { + if (!bat) { reply->bar_status = ENOENT; return 0; } @@ -1231,7 +1232,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) unsigned features = LST_FEATS_MASK; int rc = 0; - LASSERT(sfw_data.fw_active_srpc == NULL); + LASSERT(!sfw_data.fw_active_srpc); LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID); spin_lock(&sfw_data.fw_lock); @@ -1242,7 +1243,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) } /* Remove timer to avoid racing with it or expiring active session */ - if (sfw_del_session_timer() != 0) { + if (sfw_del_session_timer()) { CERROR("Dropping RPC (%s) from %s: racing with expiry timer.", sv->sv_name, libcfs_id2str(rpc->srpc_peer)); spin_unlock(&sfw_data.fw_lock); @@ -1262,19 +1263,21 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) sv->sv_id != SRPC_SERVICE_DEBUG) { sfw_session_t *sn = sfw_data.fw_session; - if (sn != NULL && + if (sn && sn->sn_features != request->msg_ses_feats) { CNETERR("Features of framework RPC don't match features of current session: %x/%x\n", request->msg_ses_feats, sn->sn_features); reply->msg_body.reply.status = EPROTO; - reply->msg_body.reply.sid = sn->sn_id; + reply->msg_body.reply.sid = sn->sn_id; goto out; } - } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) { - /* NB: at this point, old version will ignore features and + } else if (request->msg_ses_feats & ~LST_FEATS_MASK) { + /** + * NB: at this point, old version will ignore features and * create new session anyway, so console should be able - * to handle this */ + * to handle this + */ reply->msg_body.reply.status = EPROTO; goto out; } @@ -1312,7 +1315,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) break; } - if (sfw_data.fw_session != NULL) + if (sfw_data.fw_session) features = sfw_data.fw_session->sn_features; out: reply->msg_ses_feats = features; @@ -1333,14 +1336,14 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) struct srpc_service *sv = rpc->srpc_scd->scd_svc; int rc; - LASSERT(rpc->srpc_bulk != NULL); + LASSERT(rpc->srpc_bulk); LASSERT(sv->sv_id == SRPC_SERVICE_TEST); - LASSERT(sfw_data.fw_active_srpc == NULL); + LASSERT(!sfw_data.fw_active_srpc); LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client); spin_lock(&sfw_data.fw_lock); - if (status != 0) { + if (status) { CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n", sv->sv_name, libcfs_id2str(rpc->srpc_peer), status); spin_unlock(&sfw_data.fw_lock); @@ -1352,8 +1355,8 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) return -ESHUTDOWN; } - if (sfw_del_session_timer() != 0) { - CERROR("Dropping RPC (%s) from %s: racing with expiry timer", + if (sfw_del_session_timer()) { + CERROR("dropping RPC %s from %s: racing with expiry timer\n", sv->sv_name, libcfs_id2str(rpc->srpc_peer)); spin_unlock(&sfw_data.fw_lock); return -EAGAIN; @@ -1386,9 +1389,9 @@ sfw_create_rpc(lnet_process_id_t peer, int service, LASSERT(!sfw_data.fw_shuttingdown); LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID); - if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) { + if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) { rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - srpc_client_rpc_t, crpc_list); + srpc_client_rpc_t, crpc_list); list_del(&rpc->crpc_list); srpc_init_client_rpc(rpc, peer, service, 0, 0, @@ -1397,15 +1400,15 @@ sfw_create_rpc(lnet_process_id_t peer, int service, spin_unlock(&sfw_data.fw_lock); - if (rpc == NULL) { + if (!rpc) { rpc = srpc_create_client_rpc(peer, service, nbulkiov, bulklen, done, - nbulkiov != 0 ? NULL : + nbulkiov ? NULL : sfw_client_rpc_fini, priv); } - if (rpc != NULL) /* "session" is concept in framework */ + if (rpc) /* "session" is concept in framework */ rpc->crpc_reqstmsg.msg_ses_feats = features; return rpc; @@ -1552,7 +1555,6 @@ sfw_unpack_message(srpc_msg_t *msg) } LBUG(); - return; } void @@ -1564,7 +1566,6 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc) spin_lock(&rpc->crpc_lock); srpc_abort_rpc(rpc, -EINTR); spin_unlock(&rpc->crpc_lock); - return; } void @@ -1581,7 +1582,6 @@ sfw_post_rpc(srpc_client_rpc_t *rpc) srpc_post_rpc(rpc); spin_unlock(&rpc->crpc_lock); - return; } static srpc_service_t sfw_services[] = { @@ -1622,16 +1622,6 @@ static srpc_service_t sfw_services[] = { } }; -extern sfw_test_client_ops_t ping_test_client; -extern srpc_service_t ping_test_service; -extern void ping_init_test_client(void); -extern void ping_init_test_service(void); - -extern sfw_test_client_ops_t brw_test_client; -extern srpc_service_t brw_test_service; -extern void brw_init_test_client(void); -extern void brw_init_test_service(void); - int sfw_startup(void) { @@ -1643,25 +1633,25 @@ sfw_startup(void) if (session_timeout < 0) { CERROR("Session timeout must be non-negative: %d\n", - session_timeout); + session_timeout); return -EINVAL; } if (rpc_timeout < 0) { CERROR("RPC timeout must be non-negative: %d\n", - rpc_timeout); + rpc_timeout); return -EINVAL; } - if (session_timeout == 0) + if (!session_timeout) CWARN("Zero session_timeout specified - test sessions never expire.\n"); - if (rpc_timeout == 0) + if (!rpc_timeout) CWARN("Zero rpc_timeout specified - test RPC never expire.\n"); memset(&sfw_data, 0, sizeof(struct smoketest_framework)); - sfw_data.fw_session = NULL; + sfw_data.fw_session = NULL; sfw_data.fw_active_srpc = NULL; spin_lock_init(&sfw_data.fw_lock); atomic_set(&sfw_data.fw_nzombies, 0); @@ -1672,12 +1662,12 @@ sfw_startup(void) brw_init_test_client(); brw_init_test_service(); rc = sfw_register_test(&brw_test_service, &brw_test_client); - LASSERT(rc == 0); + LASSERT(!rc); ping_init_test_client(); ping_init_test_service(); rc = sfw_register_test(&ping_test_service, &ping_test_client); - LASSERT(rc == 0); + LASSERT(!rc); error = 0; list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) { @@ -1685,29 +1675,29 @@ sfw_startup(void) rc = srpc_add_service(sv); LASSERT(rc != -EBUSY); - if (rc != 0) { + if (rc) { CWARN("Failed to add %s service: %d\n", - sv->sv_name, rc); + sv->sv_name, rc); error = rc; } } for (i = 0; ; i++) { sv = &sfw_services[i]; - if (sv->sv_name == NULL) + if (!sv->sv_name) break; sv->sv_bulk_ready = NULL; - sv->sv_handler = sfw_handle_server_rpc; - sv->sv_wi_total = SFW_FRWK_WI_MAX; + sv->sv_handler = sfw_handle_server_rpc; + sv->sv_wi_total = SFW_FRWK_WI_MAX; if (sv->sv_id == SRPC_SERVICE_TEST) sv->sv_bulk_ready = sfw_bulk_ready; rc = srpc_add_service(sv); LASSERT(rc != -EBUSY); - if (rc != 0) { + if (rc) { CWARN("Failed to add %s service: %d\n", - sv->sv_name, rc); + sv->sv_name, rc); error = rc; } @@ -1716,14 +1706,14 @@ sfw_startup(void) continue; rc = srpc_service_add_buffers(sv, sv->sv_wi_total); - if (rc != 0) { + if (rc) { CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n", sv->sv_name, sv->sv_wi_total, rc); error = -ENOMEM; } } - if (error != 0) + if (error) sfw_shutdown(); return error; } @@ -1738,15 +1728,15 @@ sfw_shutdown(void) spin_lock(&sfw_data.fw_lock); sfw_data.fw_shuttingdown = 1; - lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock, + lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock, "waiting for active RPC to finish.\n"); - if (sfw_del_session_timer() != 0) - lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock, + if (sfw_del_session_timer()) + lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock, "waiting for session timer to explode.\n"); sfw_deactivate_session(); - lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0, + lst_wait_until(!atomic_read(&sfw_data.fw_nzombies), sfw_data.fw_lock, "waiting for %d zombie sessions to die.\n", atomic_read(&sfw_data.fw_nzombies)); @@ -1755,7 +1745,7 @@ sfw_shutdown(void) for (i = 0; ; i++) { sv = &sfw_services[i]; - if (sv->sv_name == NULL) + if (!sv->sv_name) break; srpc_shutdown_service(sv); @@ -1772,7 +1762,7 @@ sfw_shutdown(void) srpc_client_rpc_t *rpc; rpc = list_entry(sfw_data.fw_zombie_rpcs.next, - srpc_client_rpc_t, crpc_list); + srpc_client_rpc_t, crpc_list); list_del(&rpc->crpc_list); LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); @@ -1780,7 +1770,7 @@ sfw_shutdown(void) for (i = 0; ; i++) { sv = &sfw_services[i]; - if (sv->sv_name == NULL) + if (!sv->sv_name) break; srpc_wait_service_shutdown(sv); @@ -1788,13 +1778,11 @@ sfw_shutdown(void) while (!list_empty(&sfw_data.fw_tests)) { tsc = list_entry(sfw_data.fw_tests.next, - sfw_test_case_t, tsc_list); + sfw_test_case_t, tsc_list); srpc_wait_service_shutdown(tsc->tsc_srv_service); list_del(&tsc->tsc_list); LIBCFS_FREE(tsc, sizeof(*tsc)); } - - return; } diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c index 46cbdf045..cc046b1d4 100644 --- a/drivers/staging/lustre/lnet/selftest/module.c +++ b/drivers/staging/lustre/lnet/selftest/module.c @@ -37,9 +37,10 @@ #define DEBUG_SUBSYSTEM S_LNET #include "selftest.h" +#include "console.h" enum { - LST_INIT_NONE = 0, + LST_INIT_NONE = 0, LST_INIT_WI_SERIAL, LST_INIT_WI_TEST, LST_INIT_RPC, @@ -47,16 +48,13 @@ enum { LST_INIT_CONSOLE }; -extern int lstcon_console_init(void); -extern int lstcon_console_fini(void); - static int lst_init_step = LST_INIT_NONE; struct cfs_wi_sched *lst_sched_serial; struct cfs_wi_sched **lst_sched_test; static void -lnet_selftest_fini(void) +lnet_selftest_exit(void) { int i; @@ -70,7 +68,7 @@ lnet_selftest_fini(void) case LST_INIT_WI_TEST: for (i = 0; i < cfs_cpt_number(lnet_cpt_table()); i++) { - if (lst_sched_test[i] == NULL) + if (!lst_sched_test[i]) continue; cfs_wi_sched_destroy(lst_sched_test[i]); } @@ -98,7 +96,7 @@ lnet_selftest_init(void) rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY, 1, &lst_sched_serial); - if (rc != 0) { + if (rc) { CERROR("Failed to create serial WI scheduler for LST\n"); return rc; } @@ -106,7 +104,7 @@ lnet_selftest_init(void) nscheds = cfs_cpt_number(lnet_cpt_table()); LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds); - if (lst_sched_test == NULL) + if (!lst_sched_test) goto error; lst_init_step = LST_INIT_WI_TEST; @@ -117,42 +115,42 @@ lnet_selftest_init(void) nthrs = max(nthrs - 1, 1); rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i, nthrs, &lst_sched_test[i]); - if (rc != 0) { - CERROR("Failed to create CPT affinity WI scheduler %d for LST\n", - i); + if (rc) { + CERROR("Failed to create CPT affinity WI scheduler %d for LST\n", i); goto error; } } rc = srpc_startup(); - if (rc != 0) { + if (rc) { CERROR("LST can't startup rpc\n"); goto error; } lst_init_step = LST_INIT_RPC; rc = sfw_startup(); - if (rc != 0) { + if (rc) { CERROR("LST can't startup framework\n"); goto error; } lst_init_step = LST_INIT_FW; rc = lstcon_console_init(); - if (rc != 0) { + if (rc) { CERROR("LST can't startup console\n"); goto error; } lst_init_step = LST_INIT_CONSOLE; return 0; error: - lnet_selftest_fini(); + lnet_selftest_exit(); return rc; } +MODULE_AUTHOR("OpenSFS, Inc. "); MODULE_DESCRIPTION("LNet Selftest"); +MODULE_VERSION("2.7.0"); MODULE_LICENSE("GPL"); -MODULE_VERSION("0.9.0"); module_init(lnet_selftest_init); -module_exit(lnet_selftest_fini); +module_exit(lnet_selftest_exit); diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c index d42653654..81a45045e 100644 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c @@ -42,18 +42,18 @@ #include "selftest.h" -#define LST_PING_TEST_MAGIC 0xbabeface +#define LST_PING_TEST_MAGIC 0xbabeface static int ping_srv_workitems = SFW_TEST_WI_MAX; module_param(ping_srv_workitems, int, 0644); MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems"); -typedef struct { +struct lst_ping_data { spinlock_t pnd_lock; /* serialize */ int pnd_counter; /* sequence counter */ -} lst_ping_data_t; +}; -static lst_ping_data_t lst_ping_data; +static struct lst_ping_data lst_ping_data; static int ping_client_init(sfw_test_instance_t *tsi) @@ -61,7 +61,7 @@ ping_client_init(sfw_test_instance_t *tsi) sfw_session_t *sn = tsi->tsi_batch->bat_session; LASSERT(tsi->tsi_is_client); - LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0); + LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK)); spin_lock_init(&lst_ping_data.pnd_lock); lst_ping_data.pnd_counter = 0; @@ -75,7 +75,7 @@ ping_client_fini(sfw_test_instance_t *tsi) sfw_session_t *sn = tsi->tsi_batch->bat_session; int errors; - LASSERT(sn != NULL); + LASSERT(sn); LASSERT(tsi->tsi_is_client); errors = atomic_read(&sn->sn_ping_errors); @@ -95,11 +95,11 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu, struct timespec64 ts; int rc; - LASSERT(sn != NULL); - LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0); + LASSERT(sn); + LASSERT(!(sn->sn_features & ~LST_FEATS_MASK)); rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc); - if (rc != 0) + if (rc) return rc; req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst; @@ -111,7 +111,7 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu, spin_unlock(&lst_ping_data.pnd_lock); ktime_get_real_ts64(&ts); - req->pnr_time_sec = ts.tv_sec; + req->pnr_time_sec = ts.tv_sec; req->pnr_time_usec = ts.tv_nsec / NSEC_PER_USEC; return rc; @@ -126,14 +126,14 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; struct timespec64 ts; - LASSERT(sn != NULL); + LASSERT(sn); - if (rpc->crpc_status != 0) { + if (rpc->crpc_status) { if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_ping_errors); CERROR("Unable to ping %s (%d): %d\n", - libcfs_id2str(rpc->crpc_dest), - reqst->pnr_seq, rpc->crpc_status); + libcfs_id2str(rpc->crpc_dest), + reqst->pnr_seq, rpc->crpc_status); return; } @@ -147,8 +147,8 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR("Bad magic %u from %s, %u expected.\n", - reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), - LST_PING_TEST_MAGIC); + reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), + LST_PING_TEST_MAGIC); return; } @@ -156,8 +156,8 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR("Bad seq %u from %s, %u expected.\n", - reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), - reqst->pnr_seq); + reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), + reqst->pnr_seq); return; } @@ -165,13 +165,12 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq, (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 + (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec))); - return; } static int ping_server_handle(struct srpc_server_rpc *rpc) { - struct srpc_service *sv = rpc->srpc_scd->scd_svc; + struct srpc_service *sv = rpc->srpc_scd->scd_svc; srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; srpc_msg_t *replymsg = &rpc->srpc_replymsg; srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; @@ -191,14 +190,14 @@ ping_server_handle(struct srpc_server_rpc *rpc) if (req->pnr_magic != LST_PING_TEST_MAGIC) { CERROR("Unexpected magic %08x from %s\n", - req->pnr_magic, libcfs_id2str(rpc->srpc_peer)); + req->pnr_magic, libcfs_id2str(rpc->srpc_peer)); return -EINVAL; } - rep->pnr_seq = req->pnr_seq; + rep->pnr_seq = req->pnr_seq; rep->pnr_magic = LST_PING_TEST_MAGIC; - if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) { + if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) { replymsg->msg_ses_feats = LST_FEATS_MASK; rep->pnr_status = EPROTO; return 0; @@ -214,8 +213,8 @@ ping_server_handle(struct srpc_server_rpc *rpc) sfw_test_client_ops_t ping_test_client; void ping_init_test_client(void) { - ping_test_client.tso_init = ping_client_init; - ping_test_client.tso_fini = ping_client_fini; + ping_test_client.tso_init = ping_client_init; + ping_test_client.tso_fini = ping_client_fini; ping_test_client.tso_prep_rpc = ping_client_prep_rpc; ping_test_client.tso_done_rpc = ping_client_done_rpc; } @@ -223,8 +222,8 @@ void ping_init_test_client(void) srpc_service_t ping_test_service; void ping_init_test_service(void) { - ping_test_service.sv_id = SRPC_SERVICE_PING; - ping_test_service.sv_name = "ping_test"; - ping_test_service.sv_handler = ping_server_handle; + ping_test_service.sv_id = SRPC_SERVICE_PING; + ping_test_service.sv_name = "ping_test"; + ping_test_service.sv_handler = ping_server_handle; ping_test_service.sv_wi_total = ping_srv_workitems; } diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 2acf6ec71..7d7748d96 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -90,14 +90,14 @@ void srpc_set_counters(const srpc_counters_t *cnt) static int srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) { - nob = min_t(int, nob, PAGE_CACHE_SIZE); + nob = min_t(int, nob, PAGE_SIZE); LASSERT(nob > 0); LASSERT(i >= 0 && i < bk->bk_niov); bk->bk_iovs[i].kiov_offset = 0; - bk->bk_iovs[i].kiov_page = pg; - bk->bk_iovs[i].kiov_len = nob; + bk->bk_iovs[i].kiov_page = pg; + bk->bk_iovs[i].kiov_len = nob; return nob; } @@ -107,18 +107,17 @@ srpc_free_bulk(srpc_bulk_t *bk) int i; struct page *pg; - LASSERT(bk != NULL); + LASSERT(bk); for (i = 0; i < bk->bk_niov; i++) { pg = bk->bk_iovs[i].kiov_page; - if (pg == NULL) + if (!pg) break; __free_page(pg); } LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov])); - return; } srpc_bulk_t * @@ -131,15 +130,15 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); - if (bk == NULL) { + if (!bk) { CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); return NULL; } memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); - bk->bk_sink = sink; - bk->bk_len = bulk_len; - bk->bk_niov = bulk_npg; + bk->bk_sink = sink; + bk->bk_len = bulk_len; + bk->bk_niov = bulk_npg; for (i = 0; i < bulk_npg; i++) { struct page *pg; @@ -147,7 +146,7 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_KERNEL, 0); - if (pg == NULL) { + if (!pg) { CERROR("Can't allocate page %d of %d\n", i, bulk_npg); srpc_free_bulk(bk); return NULL; @@ -183,10 +182,10 @@ srpc_init_server_rpc(struct srpc_server_rpc *rpc, rpc->srpc_ev.ev_fired = 1; /* no event expected now */ - rpc->srpc_scd = scd; + rpc->srpc_scd = scd; rpc->srpc_reqstbuf = buffer; - rpc->srpc_peer = buffer->buf_peer; - rpc->srpc_self = buffer->buf_self; + rpc->srpc_peer = buffer->buf_peer; + rpc->srpc_self = buffer->buf_self; LNetInvalidateHandle(&rpc->srpc_replymdh); } @@ -199,7 +198,7 @@ srpc_service_fini(struct srpc_service *svc) struct list_head *q; int i; - if (svc->sv_cpt_data == NULL) + if (!svc->sv_cpt_data) return; cfs_percpt_for_each(scd, i, svc->sv_cpt_data) { @@ -212,9 +211,8 @@ srpc_service_fini(struct srpc_service *svc) break; while (!list_empty(q)) { - buf = list_entry(q->next, - struct srpc_buffer, - buf_list); + buf = list_entry(q->next, struct srpc_buffer, + buf_list); list_del(&buf->buf_list); LIBCFS_FREE(buf, sizeof(*buf)); } @@ -224,8 +222,8 @@ srpc_service_fini(struct srpc_service *svc) while (!list_empty(&scd->scd_rpc_free)) { rpc = list_entry(scd->scd_rpc_free.next, - struct srpc_server_rpc, - srpc_list); + struct srpc_server_rpc, + srpc_list); list_del(&rpc->srpc_list); LIBCFS_FREE(rpc, sizeof(*rpc)); } @@ -259,7 +257,7 @@ srpc_service_init(struct srpc_service *svc) svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(), sizeof(struct srpc_service_cd)); - if (svc->sv_cpt_data == NULL) + if (!svc->sv_cpt_data) return -ENOMEM; svc->sv_ncpts = srpc_serv_is_framework(svc) ? @@ -278,23 +276,27 @@ srpc_service_init(struct srpc_service *svc) scd->scd_ev.ev_data = scd; scd->scd_ev.ev_type = SRPC_REQUEST_RCVD; - /* NB: don't use lst_sched_serial for adding buffer, - * see details in srpc_service_add_buffers() */ + /* + * NB: don't use lst_sched_serial for adding buffer, + * see details in srpc_service_add_buffers() + */ swi_init_workitem(&scd->scd_buf_wi, scd, srpc_add_buffer, lst_sched_test[i]); - if (i != 0 && srpc_serv_is_framework(svc)) { - /* NB: framework service only needs srpc_service_cd for + if (i && srpc_serv_is_framework(svc)) { + /* + * NB: framework service only needs srpc_service_cd for * one partition, but we allocate for all to make * it easier to implement, it will waste a little - * memory but nobody should care about this */ + * memory but nobody should care about this + */ continue; } for (j = 0; j < nrpcs; j++) { LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(), i, sizeof(*rpc)); - if (rpc == NULL) { + if (!rpc) { srpc_service_fini(svc); return -ENOMEM; } @@ -312,14 +314,14 @@ srpc_add_service(struct srpc_service *sv) LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID); - if (srpc_service_init(sv) != 0) + if (srpc_service_init(sv)) return -ENOMEM; spin_lock(&srpc_data.rpc_glock); LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); - if (srpc_data.rpc_services[id] != NULL) { + if (srpc_data.rpc_services[id]) { spin_unlock(&srpc_data.rpc_glock); goto failed; } @@ -363,32 +365,31 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK, local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh); - if (rc != 0) { + if (rc) { CERROR("LNetMEAttach failed: %d\n", rc); LASSERT(rc == -ENOMEM); return -ENOMEM; } md.threshold = 1; - md.user_ptr = ev; - md.start = buf; - md.length = len; - md.options = options; + md.user_ptr = ev; + md.start = buf; + md.length = len; + md.options = options; md.eq_handle = srpc_data.rpc_lnet_eq; rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh); - if (rc != 0) { + if (rc) { CERROR("LNetMDAttach failed: %d\n", rc); LASSERT(rc == -ENOMEM); rc = LNetMEUnlink(meh); - LASSERT(rc == 0); + LASSERT(!rc); return -ENOMEM; } - CDEBUG(D_NET, - "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n", - libcfs_id2str(peer), portal, matchbits); + CDEBUG(D_NET, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n", + libcfs_id2str(peer), portal, matchbits); return 0; } @@ -400,46 +401,48 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, int rc; lnet_md_t md; - md.user_ptr = ev; - md.start = buf; - md.length = len; + md.user_ptr = ev; + md.start = buf; + md.length = len; md.eq_handle = srpc_data.rpc_lnet_eq; - md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1; - md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET); + md.threshold = options & LNET_MD_OP_GET ? 2 : 1; + md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET); rc = LNetMDBind(md, LNET_UNLINK, mdh); - if (rc != 0) { + if (rc) { CERROR("LNetMDBind failed: %d\n", rc); LASSERT(rc == -ENOMEM); return -ENOMEM; } - /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options. + /* + * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options. * they're only meaningful for MDs attached to an ME (i.e. passive - * buffers... */ - if ((options & LNET_MD_OP_PUT) != 0) { + * buffers... + */ + if (options & LNET_MD_OP_PUT) { rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer, portal, matchbits, 0, 0); } else { - LASSERT((options & LNET_MD_OP_GET) != 0); + LASSERT(options & LNET_MD_OP_GET); rc = LNetGet(self, *mdh, peer, portal, matchbits, 0); } - if (rc != 0) { + if (rc) { CERROR("LNet%s(%s, %d, %lld) failed: %d\n", - ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get", - libcfs_id2str(peer), portal, matchbits, rc); + options & LNET_MD_OP_PUT ? "Put" : "Get", + libcfs_id2str(peer), portal, matchbits, rc); - /* The forthcoming unlink event will complete this operation + /* + * The forthcoming unlink event will complete this operation * with failure, so fall through and return success here. */ rc = LNetMDUnlink(*mdh); - LASSERT(rc == 0); + LASSERT(!rc); } else { - CDEBUG(D_NET, - "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n", - libcfs_id2str(peer), portal, matchbits); + CDEBUG(D_NET, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n", + libcfs_id2str(peer), portal, matchbits); } return 0; } @@ -448,7 +451,7 @@ static int srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, lnet_handle_md_t *mdh, srpc_event_t *ev) { - lnet_process_id_t any = {0}; + lnet_process_id_t any = { 0 }; any.nid = LNET_NID_ANY; any.pid = LNET_PID_ANY; @@ -460,10 +463,10 @@ srpc_post_passive_rqtbuf(int service, int local, void *buf, int len, static int srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) - __must_hold(&scd->scd_lock) +__must_hold(&scd->scd_lock) { struct srpc_service *sv = scd->scd_svc; - struct srpc_msg *msg = &buf->buf_msg; + struct srpc_msg *msg = &buf->buf_msg; int rc; LNetInvalidateHandle(&buf->buf_mdh); @@ -476,19 +479,22 @@ srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) msg, sizeof(*msg), &buf->buf_mdh, &scd->scd_ev); - /* At this point, a RPC (new or delayed) may have arrived in + /* + * At this point, a RPC (new or delayed) may have arrived in * msg and its event handler has been called. So we must add - * buf to scd_buf_posted _before_ dropping scd_lock */ - + * buf to scd_buf_posted _before_ dropping scd_lock + */ spin_lock(&scd->scd_lock); - if (rc == 0) { + if (!rc) { if (!sv->sv_shuttingdown) return 0; spin_unlock(&scd->scd_lock); - /* srpc_shutdown_service might have tried to unlink me - * when my buf_mdh was still invalid */ + /* + * srpc_shutdown_service might have tried to unlink me + * when my buf_mdh was still invalid + */ LNetMDUnlink(buf->buf_mdh); spin_lock(&scd->scd_lock); return 0; @@ -514,9 +520,11 @@ srpc_add_buffer(struct swi_workitem *wi) struct srpc_buffer *buf; int rc = 0; - /* it's called by workitem scheduler threads, these threads + /* + * it's called by workitem scheduler threads, these threads * should have been set CPT affinity, so buffers will be posted - * on CPT local list of Portal */ + * on CPT local list of Portal + */ spin_lock(&scd->scd_lock); while (scd->scd_buf_adjust > 0 && @@ -527,7 +535,7 @@ srpc_add_buffer(struct swi_workitem *wi) spin_unlock(&scd->scd_lock); LIBCFS_ALLOC(buf, sizeof(*buf)); - if (buf == NULL) { + if (!buf) { CERROR("Failed to add new buf to service: %s\n", scd->scd_svc->sv_name); spin_lock(&scd->scd_lock); @@ -546,7 +554,7 @@ srpc_add_buffer(struct swi_workitem *wi) } rc = srpc_service_post_buffer(scd, buf); - if (rc != 0) + if (rc) break; /* buf has been freed inside */ LASSERT(scd->scd_buf_posting > 0); @@ -555,7 +563,7 @@ srpc_add_buffer(struct swi_workitem *wi) scd->scd_buf_low = max(2, scd->scd_buf_total / 4); } - if (rc != 0) { + if (rc) { scd->scd_buf_err_stamp = ktime_get_real_seconds(); scd->scd_buf_err = rc; @@ -607,12 +615,12 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer) * block all WIs pending on lst_sched_serial for a moment * which is not good but not fatal. */ - lst_wait_until(scd->scd_buf_err != 0 || - (scd->scd_buf_adjust == 0 && - scd->scd_buf_posting == 0), + lst_wait_until(scd->scd_buf_err || + (!scd->scd_buf_adjust && + !scd->scd_buf_posting), scd->scd_lock, "waiting for adding buffer\n"); - if (scd->scd_buf_err != 0 && rc == 0) + if (scd->scd_buf_err && !rc) rc = scd->scd_buf_err; spin_unlock(&scd->scd_lock); @@ -658,7 +666,7 @@ srpc_finish_service(struct srpc_service *sv) } if (scd->scd_buf_nposted > 0) { - CDEBUG(D_NET, "waiting for %d posted buffers to unlink", + CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n", scd->scd_buf_nposted); spin_unlock(&scd->scd_lock); return 0; @@ -670,7 +678,7 @@ srpc_finish_service(struct srpc_service *sv) } rpc = list_entry(scd->scd_rpc_active.next, - struct srpc_server_rpc, srpc_list); + struct srpc_server_rpc, srpc_list); CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n", rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), swi_state2str(rpc->srpc_wi.swi_state), @@ -690,10 +698,10 @@ srpc_finish_service(struct srpc_service *sv) /* called with sv->sv_lock held */ static void srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) - __must_hold(&scd->scd_lock) +__must_hold(&scd->scd_lock) { if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) { - if (srpc_service_post_buffer(scd, buf) != 0) { + if (srpc_service_post_buffer(scd, buf)) { CWARN("Failed to post %s buffer\n", scd->scd_svc->sv_name); } @@ -706,7 +714,7 @@ srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) if (scd->scd_buf_adjust < 0) { scd->scd_buf_adjust++; if (scd->scd_buf_adjust < 0 && - scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) { + !scd->scd_buf_total && !scd->scd_buf_posting) { CDEBUG(D_INFO, "Try to recycle %d buffers but nothing left\n", scd->scd_buf_adjust); @@ -732,9 +740,11 @@ srpc_abort_service(struct srpc_service *sv) cfs_percpt_for_each(scd, i, sv->sv_cpt_data) { spin_lock(&scd->scd_lock); - /* schedule in-flight RPCs to notice the abort, NB: + /* + * schedule in-flight RPCs to notice the abort, NB: * racing with incoming RPCs; complete fix should make test - * RPCs carry session ID in its headers */ + * RPCs carry session ID in its headers + */ list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) { rpc->srpc_aborted = 1; swi_schedule_workitem(&rpc->srpc_wi); @@ -772,8 +782,10 @@ srpc_shutdown_service(srpc_service_t *sv) spin_unlock(&scd->scd_lock); - /* OK to traverse scd_buf_posted without lock, since no one - * touches scd_buf_posted now */ + /* + * OK to traverse scd_buf_posted without lock, since no one + * touches scd_buf_posted now + */ list_for_each_entry(buf, &scd->scd_buf_posted, buf_list) LNetMDUnlink(buf->buf_mdh); } @@ -786,15 +798,15 @@ srpc_send_request(srpc_client_rpc_t *rpc) int rc; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REQUEST_SENT; + ev->ev_data = rpc; + ev->ev_type = SRPC_REQUEST_SENT; rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service), rpc->crpc_service, &rpc->crpc_reqstmsg, sizeof(srpc_msg_t), LNET_MD_OP_PUT, rpc->crpc_dest, LNET_NID_ANY, &rpc->crpc_reqstmdh, ev); - if (rc != 0) { + if (rc) { LASSERT(rc == -ENOMEM); ev->ev_fired = 1; /* no more event expected */ } @@ -809,8 +821,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc) int rc; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REPLY_RCVD; + ev->ev_data = rpc; + ev->ev_type = SRPC_REPLY_RCVD; *id = srpc_next_id(); @@ -818,7 +830,7 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc) &rpc->crpc_replymsg, sizeof(srpc_msg_t), LNET_MD_OP_PUT, rpc->crpc_dest, &rpc->crpc_replymdh, ev); - if (rc != 0) { + if (rc) { LASSERT(rc == -ENOMEM); ev->ev_fired = 1; /* no more event expected */ } @@ -830,28 +842,28 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc) { srpc_bulk_t *bk = &rpc->crpc_bulk; srpc_event_t *ev = &rpc->crpc_bulkev; - __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; + __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; int rc; int opt; LASSERT(bk->bk_niov <= LNET_MAX_IOV); - if (bk->bk_niov == 0) + if (!bk->bk_niov) return 0; /* nothing to do */ opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET; opt |= LNET_MD_KIOV; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_BULK_REQ_RCVD; + ev->ev_data = rpc; + ev->ev_type = SRPC_BULK_REQ_RCVD; *id = srpc_next_id(); rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id, &bk->bk_iovs[0], bk->bk_niov, opt, rpc->crpc_dest, &bk->bk_mdh, ev); - if (rc != 0) { + if (rc) { LASSERT(rc == -ENOMEM); ev->ev_fired = 1; /* no more event expected */ } @@ -867,20 +879,20 @@ srpc_do_bulk(struct srpc_server_rpc *rpc) int rc; int opt; - LASSERT(bk != NULL); + LASSERT(bk); opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT; opt |= LNET_MD_KIOV; ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT; + ev->ev_data = rpc; + ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT; rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id, &bk->bk_iovs[0], bk->bk_niov, opt, rpc->srpc_peer, rpc->srpc_self, &bk->bk_mdh, ev); - if (rc != 0) + if (rc) ev->ev_fired = 1; /* no more event expected */ return rc; } @@ -890,33 +902,35 @@ static void srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) { struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; + struct srpc_service *sv = scd->scd_svc; srpc_buffer_t *buffer; - LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE); + LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE); rpc->srpc_status = status; - CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR, - "Server RPC %p done: service %s, peer %s, status %s:%d\n", - rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), - swi_state2str(rpc->srpc_wi.swi_state), status); + CDEBUG_LIMIT(!status ? D_NET : D_NETERROR, + "Server RPC %p done: service %s, peer %s, status %s:%d\n", + rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer), + swi_state2str(rpc->srpc_wi.swi_state), status); - if (status != 0) { + if (status) { spin_lock(&srpc_data.rpc_glock); srpc_data.rpc_counters.rpcs_dropped++; spin_unlock(&srpc_data.rpc_glock); } - if (rpc->srpc_done != NULL) + if (rpc->srpc_done) (*rpc->srpc_done) (rpc); - LASSERT(rpc->srpc_bulk == NULL); + LASSERT(!rpc->srpc_bulk); spin_lock(&scd->scd_lock); - if (rpc->srpc_reqstbuf != NULL) { - /* NB might drop sv_lock in srpc_service_recycle_buffer, but - * sv won't go away for scd_rpc_active must not be empty */ + if (rpc->srpc_reqstbuf) { + /* + * NB might drop sv_lock in srpc_service_recycle_buffer, but + * sv won't go away for scd_rpc_active must not be empty + */ srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf); rpc->srpc_reqstbuf = NULL; } @@ -934,7 +948,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) { buffer = list_entry(scd->scd_buf_blocked.next, - srpc_buffer_t, buf_list); + srpc_buffer_t, buf_list); list_del(&buffer->buf_list); srpc_init_server_rpc(rpc, scd, buffer); @@ -945,7 +959,6 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status) } spin_unlock(&scd->scd_lock); - return; } /* handles an incoming RPC */ @@ -965,7 +978,7 @@ srpc_handle_rpc(swi_workitem_t *wi) if (sv->sv_shuttingdown || rpc->srpc_aborted) { spin_unlock(&scd->scd_lock); - if (rpc->srpc_bulk != NULL) + if (rpc->srpc_bulk) LNetMDUnlink(rpc->srpc_bulk->bk_mdh); LNetMDUnlink(rpc->srpc_replymdh); @@ -988,7 +1001,7 @@ srpc_handle_rpc(swi_workitem_t *wi) msg = &rpc->srpc_reqstbuf->buf_msg; reply = &rpc->srpc_replymsg.msg_body.reply; - if (msg->msg_magic == 0) { + if (!msg->msg_magic) { /* moaned already in srpc_lnet_ev_handler */ srpc_server_rpc_done(rpc, EBADMSG); return 1; @@ -1004,8 +1017,8 @@ srpc_handle_rpc(swi_workitem_t *wi) } else { reply->status = 0; rc = (*sv->sv_handler)(rpc); - LASSERT(reply->status == 0 || !rpc->srpc_bulk); - if (rc != 0) { + LASSERT(!reply->status || !rpc->srpc_bulk); + if (rc) { srpc_server_rpc_done(rpc, rc); return 1; } @@ -1013,9 +1026,9 @@ srpc_handle_rpc(swi_workitem_t *wi) wi->swi_state = SWI_STATE_BULK_STARTED; - if (rpc->srpc_bulk != NULL) { + if (rpc->srpc_bulk) { rc = srpc_do_bulk(rpc); - if (rc == 0) + if (!rc) return 0; /* wait for bulk */ LASSERT(ev->ev_fired); @@ -1023,15 +1036,15 @@ srpc_handle_rpc(swi_workitem_t *wi) } } case SWI_STATE_BULK_STARTED: - LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired); + LASSERT(!rpc->srpc_bulk || ev->ev_fired); - if (rpc->srpc_bulk != NULL) { + if (rpc->srpc_bulk) { rc = ev->ev_status; - if (sv->sv_bulk_ready != NULL) + if (sv->sv_bulk_ready) rc = (*sv->sv_bulk_ready) (rpc, rc); - if (rc != 0) { + if (rc) { srpc_server_rpc_done(rpc, rc); return 1; } @@ -1039,7 +1052,7 @@ srpc_handle_rpc(swi_workitem_t *wi) wi->swi_state = SWI_STATE_REPLY_SUBMITTED; rc = srpc_send_reply(rpc); - if (rc == 0) + if (!rc) return 0; /* wait for reply */ srpc_server_rpc_done(rpc, rc); return 1; @@ -1067,8 +1080,8 @@ srpc_client_rpc_expired(void *data) srpc_client_rpc_t *rpc = data; CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - rpc->crpc_timeout); + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + rpc->crpc_timeout); spin_lock(&rpc->crpc_lock); @@ -1082,32 +1095,32 @@ srpc_client_rpc_expired(void *data) spin_unlock(&srpc_data.rpc_glock); } -inline void +static void srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc) { - stt_timer_t *timer = &rpc->crpc_timer; + struct stt_timer *timer = &rpc->crpc_timer; - if (rpc->crpc_timeout == 0) + if (!rpc->crpc_timeout) return; INIT_LIST_HEAD(&timer->stt_list); - timer->stt_data = rpc; - timer->stt_func = srpc_client_rpc_expired; + timer->stt_data = rpc; + timer->stt_func = srpc_client_rpc_expired; timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout; stt_add_timer(timer); - return; } /* * Called with rpc->crpc_lock held. * * Upon exit the RPC expiry timer is not queued and the handler is not - * running on any CPU. */ + * running on any CPU. + */ static void srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) { /* timer not planted or already exploded */ - if (rpc->crpc_timeout == 0) + if (!rpc->crpc_timeout) return; /* timer successfully defused */ @@ -1115,7 +1128,7 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc) return; /* timer detonated, wait for it to explode */ - while (rpc->crpc_timeout != 0) { + while (rpc->crpc_timeout) { spin_unlock(&rpc->crpc_lock); schedule(); @@ -1129,20 +1142,20 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) { swi_workitem_t *wi = &rpc->crpc_wi; - LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE); + LASSERT(status || wi->swi_state == SWI_STATE_DONE); spin_lock(&rpc->crpc_lock); rpc->crpc_closed = 1; - if (rpc->crpc_status == 0) + if (!rpc->crpc_status) rpc->crpc_status = status; srpc_del_client_rpc_timer(rpc); - CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR, - "Client RPC done: service %d, peer %s, status %s:%d:%d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(wi->swi_state), rpc->crpc_aborted, status); + CDEBUG_LIMIT(!status ? D_NET : D_NETERROR, + "Client RPC done: service %d, peer %s, status %s:%d:%d\n", + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + swi_state2str(wi->swi_state), rpc->crpc_aborted, status); /* * No one can schedule me now since: @@ -1158,7 +1171,6 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) spin_unlock(&rpc->crpc_lock); (*rpc->crpc_done)(rpc); - return; } /* sends an outgoing RPC */ @@ -1170,11 +1182,11 @@ srpc_send_rpc(swi_workitem_t *wi) srpc_msg_t *reply; int do_bulk; - LASSERT(wi != NULL); + LASSERT(wi); rpc = wi->swi_workitem.wi_data; - LASSERT(rpc != NULL); + LASSERT(rpc); LASSERT(wi == &rpc->crpc_wi); reply = &rpc->crpc_replymsg; @@ -1196,13 +1208,13 @@ srpc_send_rpc(swi_workitem_t *wi) LASSERT(!srpc_event_pending(rpc)); rc = srpc_prepare_reply(rpc); - if (rc != 0) { + if (rc) { srpc_client_rpc_done(rpc, rc); return 1; } rc = srpc_prepare_bulk(rpc); - if (rc != 0) + if (rc) break; wi->swi_state = SWI_STATE_REQUEST_SUBMITTED; @@ -1210,14 +1222,16 @@ srpc_send_rpc(swi_workitem_t *wi) break; case SWI_STATE_REQUEST_SUBMITTED: - /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any + /* + * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any * order; however, they're processed in a strict order: - * rqt, rpy, and bulk. */ + * rqt, rpy, and bulk. + */ if (!rpc->crpc_reqstev.ev_fired) break; rc = rpc->crpc_reqstev.ev_status; - if (rc != 0) + if (rc) break; wi->swi_state = SWI_STATE_REQUEST_SENT; @@ -1229,7 +1243,7 @@ srpc_send_rpc(swi_workitem_t *wi) break; rc = rpc->crpc_replyev.ev_status; - if (rc != 0) + if (rc) break; srpc_unpack_msg_hdr(reply); @@ -1244,7 +1258,7 @@ srpc_send_rpc(swi_workitem_t *wi) break; } - if (do_bulk && reply->msg_body.reply.status != 0) { + if (do_bulk && reply->msg_body.reply.status) { CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n", reply->msg_body.reply.status, libcfs_id2str(rpc->crpc_dest)); @@ -1259,12 +1273,14 @@ srpc_send_rpc(swi_workitem_t *wi) rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0; - /* Bulk buffer was unlinked due to remote error. Clear error + /* + * Bulk buffer was unlinked due to remote error. Clear error * since reply buffer still contains valid data. * NB rpc->crpc_done shouldn't look into bulk data in case of - * remote error. */ + * remote error. + */ if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK && - rpc->crpc_status == 0 && reply->msg_body.reply.status != 0) + !rpc->crpc_status && reply->msg_body.reply.status) rc = 0; wi->swi_state = SWI_STATE_DONE; @@ -1272,7 +1288,7 @@ srpc_send_rpc(swi_workitem_t *wi) return 1; } - if (rc != 0) { + if (rc) { spin_lock(&rpc->crpc_lock); srpc_abort_rpc(rpc, rc); spin_unlock(&rpc->crpc_lock); @@ -1294,15 +1310,15 @@ abort: srpc_client_rpc_t * srpc_create_client_rpc(lnet_process_id_t peer, int service, - int nbulkiov, int bulklen, - void (*rpc_done)(srpc_client_rpc_t *), - void (*rpc_fini)(srpc_client_rpc_t *), void *priv) + int nbulkiov, int bulklen, + void (*rpc_done)(srpc_client_rpc_t *), + void (*rpc_fini)(srpc_client_rpc_t *), void *priv) { srpc_client_rpc_t *rpc; LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[nbulkiov])); - if (rpc == NULL) + if (!rpc) return NULL; srpc_init_client_rpc(rpc, peer, service, nbulkiov, @@ -1314,21 +1330,19 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service, void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why) { - LASSERT(why != 0); + LASSERT(why); if (rpc->crpc_aborted || /* already aborted */ - rpc->crpc_closed) /* callback imminent */ + rpc->crpc_closed) /* callback imminent */ return; - CDEBUG(D_NET, - "Aborting RPC: service %d, peer %s, state %s, why %d\n", - rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), - swi_state2str(rpc->crpc_wi.swi_state), why); + CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n", + rpc->crpc_service, libcfs_id2str(rpc->crpc_dest), + swi_state2str(rpc->crpc_wi.swi_state), why); rpc->crpc_aborted = 1; - rpc->crpc_status = why; + rpc->crpc_status = why; swi_schedule_workitem(&rpc->crpc_wi); - return; } /* called with rpc->crpc_lock held */ @@ -1339,12 +1353,11 @@ srpc_post_rpc(srpc_client_rpc_t *rpc) LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING); CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n", - libcfs_id2str(rpc->crpc_dest), rpc->crpc_service, - rpc->crpc_timeout); + libcfs_id2str(rpc->crpc_dest), rpc->crpc_service, + rpc->crpc_timeout); srpc_add_client_rpc_timer(rpc); swi_schedule_workitem(&rpc->crpc_wi); - return; } int @@ -1358,15 +1371,17 @@ srpc_send_reply(struct srpc_server_rpc *rpc) __u64 rpyid; int rc; - LASSERT(buffer != NULL); + LASSERT(buffer); rpyid = buffer->buf_msg.msg_body.reqst.rpyid; spin_lock(&scd->scd_lock); if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) { - /* Repost buffer before replying since test client - * might send me another RPC once it gets the reply */ - if (srpc_service_post_buffer(scd, buffer) != 0) + /* + * Repost buffer before replying since test client + * might send me another RPC once it gets the reply + */ + if (srpc_service_post_buffer(scd, buffer)) CWARN("Failed to repost %s buffer\n", sv->sv_name); rpc->srpc_reqstbuf = NULL; } @@ -1374,18 +1389,18 @@ srpc_send_reply(struct srpc_server_rpc *rpc) spin_unlock(&scd->scd_lock); ev->ev_fired = 0; - ev->ev_data = rpc; - ev->ev_type = SRPC_REPLY_SENT; + ev->ev_data = rpc; + ev->ev_type = SRPC_REPLY_SENT; - msg->msg_magic = SRPC_MSG_MAGIC; + msg->msg_magic = SRPC_MSG_MAGIC; msg->msg_version = SRPC_MSG_VERSION; - msg->msg_type = srpc_service2reply(sv->sv_id); + msg->msg_type = srpc_service2reply(sv->sv_id); rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg, sizeof(*msg), LNET_MD_OP_PUT, rpc->srpc_peer, rpc->srpc_self, &rpc->srpc_replymdh, ev); - if (rc != 0) + if (rc) ev->ev_fired = 1; /* no more event expected */ return rc; } @@ -1405,10 +1420,17 @@ srpc_lnet_ev_handler(lnet_event_t *ev) LASSERT(!in_interrupt()); - if (ev->status != 0) { + if (ev->status) { + __u32 errors; + spin_lock(&srpc_data.rpc_glock); - srpc_data.rpc_counters.errors++; + if (ev->status != -ECANCELED) /* cancellation is not error */ + srpc_data.rpc_counters.errors++; + errors = srpc_data.rpc_counters.errors; spin_unlock(&srpc_data.rpc_glock); + + CNETERR("LNet event status %d type %d, RPC errors %u\n", + ev->status, ev->type, errors); } rpcev->ev_lnet = ev->type; @@ -1419,7 +1441,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev) rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet); LBUG(); case SRPC_REQUEST_SENT: - if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) { + if (!ev->status && ev->type != LNET_EVENT_UNLINK) { spin_lock(&srpc_data.rpc_glock); srpc_data.rpc_counters.rpcs_sent++; spin_unlock(&srpc_data.rpc_glock); @@ -1441,8 +1463,8 @@ srpc_lnet_ev_handler(lnet_event_t *ev) spin_lock(&crpc->crpc_lock); - LASSERT(rpcev->ev_fired == 0); - rpcev->ev_fired = 1; + LASSERT(!rpcev->ev_fired); + rpcev->ev_fired = 1; rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? -EINTR : ev->status; swi_schedule_workitem(&crpc->crpc_wi); @@ -1460,9 +1482,9 @@ srpc_lnet_ev_handler(lnet_event_t *ev) LASSERT(ev->unlinked); LASSERT(ev->type == LNET_EVENT_PUT || - ev->type == LNET_EVENT_UNLINK); + ev->type == LNET_EVENT_UNLINK); LASSERT(ev->type != LNET_EVENT_UNLINK || - sv->sv_shuttingdown); + sv->sv_shuttingdown); buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg); buffer->buf_peer = ev->initiator; @@ -1472,21 +1494,23 @@ srpc_lnet_ev_handler(lnet_event_t *ev) scd->scd_buf_nposted--; if (sv->sv_shuttingdown) { - /* Leave buffer on scd->scd_buf_nposted since - * srpc_finish_service needs to traverse it. */ + /* + * Leave buffer on scd->scd_buf_nposted since + * srpc_finish_service needs to traverse it. + */ spin_unlock(&scd->scd_lock); break; } - if (scd->scd_buf_err_stamp != 0 && + if (scd->scd_buf_err_stamp && scd->scd_buf_err_stamp < ktime_get_real_seconds()) { /* re-enable adding buffer */ scd->scd_buf_err_stamp = 0; scd->scd_buf_err = 0; } - if (scd->scd_buf_err == 0 && /* adding buffer is enabled */ - scd->scd_buf_adjust == 0 && + if (!scd->scd_buf_err && /* adding buffer is enabled */ + !scd->scd_buf_adjust && scd->scd_buf_nposted < scd->scd_buf_low) { scd->scd_buf_adjust = max(scd->scd_buf_total / 2, SFW_TEST_WI_MIN); @@ -1497,7 +1521,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev) msg = &buffer->buf_msg; type = srpc_service2request(sv->sv_id); - if (ev->status != 0 || ev->mlength != sizeof(*msg) || + if (ev->status || ev->mlength != sizeof(*msg) || (msg->msg_type != type && msg->msg_type != __swab32(type)) || (msg->msg_magic != SRPC_MSG_MAGIC && @@ -1507,25 +1531,27 @@ srpc_lnet_ev_handler(lnet_event_t *ev) ev->status, ev->mlength, msg->msg_type, msg->msg_magic); - /* NB can't call srpc_service_recycle_buffer here since + /* + * NB can't call srpc_service_recycle_buffer here since * it may call LNetM[DE]Attach. The invalid magic tells - * srpc_handle_rpc to drop this RPC */ + * srpc_handle_rpc to drop this RPC + */ msg->msg_magic = 0; } if (!list_empty(&scd->scd_rpc_free)) { srpc = list_entry(scd->scd_rpc_free.next, - struct srpc_server_rpc, - srpc_list); + struct srpc_server_rpc, + srpc_list); list_del(&srpc->srpc_list); srpc_init_server_rpc(srpc, scd, buffer); list_add_tail(&srpc->srpc_list, - &scd->scd_rpc_active); + &scd->scd_rpc_active); swi_schedule_workitem(&srpc->srpc_wi); } else { list_add_tail(&buffer->buf_list, - &scd->scd_buf_blocked); + &scd->scd_buf_blocked); } spin_unlock(&scd->scd_lock); @@ -1537,14 +1563,14 @@ srpc_lnet_ev_handler(lnet_event_t *ev) case SRPC_BULK_GET_RPLD: LASSERT(ev->type == LNET_EVENT_SEND || - ev->type == LNET_EVENT_REPLY || - ev->type == LNET_EVENT_UNLINK); + ev->type == LNET_EVENT_REPLY || + ev->type == LNET_EVENT_UNLINK); if (!ev->unlinked) break; /* wait for final event */ case SRPC_BULK_PUT_SENT: - if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) { + if (!ev->status && ev->type != LNET_EVENT_UNLINK) { spin_lock(&srpc_data.rpc_glock); if (rpcev->ev_type == SRPC_BULK_GET_RPLD) @@ -1556,13 +1582,13 @@ srpc_lnet_ev_handler(lnet_event_t *ev) } case SRPC_REPLY_SENT: srpc = rpcev->ev_data; - scd = srpc->srpc_scd; + scd = srpc->srpc_scd; LASSERT(rpcev == &srpc->srpc_ev); spin_lock(&scd->scd_lock); - rpcev->ev_fired = 1; + rpcev->ev_fired = 1; rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ? -EINTR : ev->status; swi_schedule_workitem(&srpc->srpc_wi); @@ -1587,7 +1613,7 @@ srpc_startup(void) srpc_data.rpc_state = SRPC_STATE_NONE; - rc = LNetNIInit(LUSTRE_SRV_LNET_PID); + rc = LNetNIInit(LNET_PID_LUSTRE); if (rc < 0) { CERROR("LNetNIInit() has failed: %d\n", rc); return rc; @@ -1597,22 +1623,22 @@ srpc_startup(void) LNetInvalidateHandle(&srpc_data.rpc_lnet_eq); rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq); - if (rc != 0) { + if (rc) { CERROR("LNetEQAlloc() has failed: %d\n", rc); goto bail; } rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL); - LASSERT(rc == 0); + LASSERT(!rc); rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL); - LASSERT(rc == 0); + LASSERT(!rc); srpc_data.rpc_state = SRPC_STATE_EQ_INIT; rc = stt_startup(); bail: - if (rc != 0) + if (rc) srpc_shutdown(); else srpc_data.rpc_state = SRPC_STATE_RUNNING; @@ -1639,9 +1665,8 @@ srpc_shutdown(void) for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) { srpc_service_t *sv = srpc_data.rpc_services[i]; - LASSERTF(sv == NULL, - "service not empty: id %d, name %s\n", - i, sv->sv_name); + LASSERTF(!sv, "service not empty: id %d, name %s\n", + i, sv->sv_name); } spin_unlock(&srpc_data.rpc_glock); @@ -1651,13 +1676,11 @@ srpc_shutdown(void) case SRPC_STATE_EQ_INIT: rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL); rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL); - LASSERT(rc == 0); + LASSERT(!rc); rc = LNetEQFree(srpc_data.rpc_lnet_eq); - LASSERT(rc == 0); /* the EQ should have no user by now */ + LASSERT(!rc); /* the EQ should have no user by now */ case SRPC_STATE_NI_INIT: LNetNIFini(); } - - return; } diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h index 6b4a32a90..a79c315f2 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.h +++ b/drivers/staging/lustre/lnet/selftest/rpc.h @@ -45,24 +45,24 @@ * XXX: *REPLY == *REQST + 1 */ typedef enum { - SRPC_MSG_MKSN_REQST = 0, - SRPC_MSG_MKSN_REPLY = 1, - SRPC_MSG_RMSN_REQST = 2, - SRPC_MSG_RMSN_REPLY = 3, - SRPC_MSG_BATCH_REQST = 4, - SRPC_MSG_BATCH_REPLY = 5, - SRPC_MSG_STAT_REQST = 6, - SRPC_MSG_STAT_REPLY = 7, - SRPC_MSG_TEST_REQST = 8, - SRPC_MSG_TEST_REPLY = 9, - SRPC_MSG_DEBUG_REQST = 10, - SRPC_MSG_DEBUG_REPLY = 11, - SRPC_MSG_BRW_REQST = 12, - SRPC_MSG_BRW_REPLY = 13, - SRPC_MSG_PING_REQST = 14, - SRPC_MSG_PING_REPLY = 15, - SRPC_MSG_JOIN_REQST = 16, - SRPC_MSG_JOIN_REPLY = 17, + SRPC_MSG_MKSN_REQST = 0, + SRPC_MSG_MKSN_REPLY = 1, + SRPC_MSG_RMSN_REQST = 2, + SRPC_MSG_RMSN_REPLY = 3, + SRPC_MSG_BATCH_REQST = 4, + SRPC_MSG_BATCH_REPLY = 5, + SRPC_MSG_STAT_REQST = 6, + SRPC_MSG_STAT_REPLY = 7, + SRPC_MSG_TEST_REQST = 8, + SRPC_MSG_TEST_REPLY = 9, + SRPC_MSG_DEBUG_REQST = 10, + SRPC_MSG_DEBUG_REPLY = 11, + SRPC_MSG_BRW_REQST = 12, + SRPC_MSG_BRW_REPLY = 13, + SRPC_MSG_PING_REQST = 14, + SRPC_MSG_PING_REPLY = 15, + SRPC_MSG_JOIN_REQST = 16, + SRPC_MSG_JOIN_REPLY = 17, } srpc_msg_type_t; /* CAVEAT EMPTOR: @@ -78,127 +78,127 @@ typedef struct { } WIRE_ATTR srpc_generic_reqst_t; typedef struct { - __u32 status; - lst_sid_t sid; + __u32 status; + lst_sid_t sid; } WIRE_ATTR srpc_generic_reply_t; /* FRAMEWORK RPCs */ typedef struct { - __u64 mksn_rpyid; /* reply buffer matchbits */ - lst_sid_t mksn_sid; /* session id */ - __u32 mksn_force; /* use brute force */ + __u64 mksn_rpyid; /* reply buffer matchbits */ + lst_sid_t mksn_sid; /* session id */ + __u32 mksn_force; /* use brute force */ char mksn_name[LST_NAME_SIZE]; } WIRE_ATTR srpc_mksn_reqst_t; /* make session request */ typedef struct { - __u32 mksn_status; /* session status */ - lst_sid_t mksn_sid; /* session id */ - __u32 mksn_timeout; /* session timeout */ - char mksn_name[LST_NAME_SIZE]; + __u32 mksn_status; /* session status */ + lst_sid_t mksn_sid; /* session id */ + __u32 mksn_timeout; /* session timeout */ + char mksn_name[LST_NAME_SIZE]; } WIRE_ATTR srpc_mksn_reply_t; /* make session reply */ typedef struct { - __u64 rmsn_rpyid; /* reply buffer matchbits */ - lst_sid_t rmsn_sid; /* session id */ + __u64 rmsn_rpyid; /* reply buffer matchbits */ + lst_sid_t rmsn_sid; /* session id */ } WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */ typedef struct { - __u32 rmsn_status; - lst_sid_t rmsn_sid; /* session id */ + __u32 rmsn_status; + lst_sid_t rmsn_sid; /* session id */ } WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */ typedef struct { - __u64 join_rpyid; /* reply buffer matchbits */ - lst_sid_t join_sid; /* session id to join */ - char join_group[LST_NAME_SIZE]; /* group name */ + __u64 join_rpyid; /* reply buffer matchbits */ + lst_sid_t join_sid; /* session id to join */ + char join_group[LST_NAME_SIZE]; /* group name */ } WIRE_ATTR srpc_join_reqst_t; typedef struct { - __u32 join_status; /* returned status */ - lst_sid_t join_sid; /* session id */ - __u32 join_timeout; /* # seconds' inactivity to + __u32 join_status; /* returned status */ + lst_sid_t join_sid; /* session id */ + __u32 join_timeout; /* # seconds' inactivity to * expire */ - char join_session[LST_NAME_SIZE]; /* session name */ + char join_session[LST_NAME_SIZE]; /* session name */ } WIRE_ATTR srpc_join_reply_t; typedef struct { - __u64 dbg_rpyid; /* reply buffer matchbits */ - lst_sid_t dbg_sid; /* session id */ - __u32 dbg_flags; /* bitmap of debug */ + __u64 dbg_rpyid; /* reply buffer matchbits */ + lst_sid_t dbg_sid; /* session id */ + __u32 dbg_flags; /* bitmap of debug */ } WIRE_ATTR srpc_debug_reqst_t; typedef struct { - __u32 dbg_status; /* returned code */ - lst_sid_t dbg_sid; /* session id */ - __u32 dbg_timeout; /* session timeout */ - __u32 dbg_nbatch; /* # of batches in the node */ - char dbg_name[LST_NAME_SIZE]; /* session name */ + __u32 dbg_status; /* returned code */ + lst_sid_t dbg_sid; /* session id */ + __u32 dbg_timeout; /* session timeout */ + __u32 dbg_nbatch; /* # of batches in the node */ + char dbg_name[LST_NAME_SIZE]; /* session name */ } WIRE_ATTR srpc_debug_reply_t; -#define SRPC_BATCH_OPC_RUN 1 -#define SRPC_BATCH_OPC_STOP 2 -#define SRPC_BATCH_OPC_QUERY 3 +#define SRPC_BATCH_OPC_RUN 1 +#define SRPC_BATCH_OPC_STOP 2 +#define SRPC_BATCH_OPC_QUERY 3 typedef struct { - __u64 bar_rpyid; /* reply buffer matchbits */ - lst_sid_t bar_sid; /* session id */ - lst_bid_t bar_bid; /* batch id */ - __u32 bar_opc; /* create/start/stop batch */ - __u32 bar_testidx; /* index of test */ - __u32 bar_arg; /* parameters */ + __u64 bar_rpyid; /* reply buffer matchbits */ + lst_sid_t bar_sid; /* session id */ + lst_bid_t bar_bid; /* batch id */ + __u32 bar_opc; /* create/start/stop batch */ + __u32 bar_testidx; /* index of test */ + __u32 bar_arg; /* parameters */ } WIRE_ATTR srpc_batch_reqst_t; typedef struct { - __u32 bar_status; /* status of request */ - lst_sid_t bar_sid; /* session id */ - __u32 bar_active; /* # of active tests in batch/test */ - __u32 bar_time; /* remained time */ + __u32 bar_status; /* status of request */ + lst_sid_t bar_sid; /* session id */ + __u32 bar_active; /* # of active tests in batch/test */ + __u32 bar_time; /* remained time */ } WIRE_ATTR srpc_batch_reply_t; typedef struct { - __u64 str_rpyid; /* reply buffer matchbits */ - lst_sid_t str_sid; /* session id */ - __u32 str_type; /* type of stat */ + __u64 str_rpyid; /* reply buffer matchbits */ + lst_sid_t str_sid; /* session id */ + __u32 str_type; /* type of stat */ } WIRE_ATTR srpc_stat_reqst_t; typedef struct { - __u32 str_status; - lst_sid_t str_sid; - sfw_counters_t str_fw; + __u32 str_status; + lst_sid_t str_sid; + sfw_counters_t str_fw; srpc_counters_t str_rpc; lnet_counters_t str_lnet; } WIRE_ATTR srpc_stat_reply_t; typedef struct { - __u32 blk_opc; /* bulk operation code */ - __u32 blk_npg; /* # of pages */ - __u32 blk_flags; /* reserved flags */ + __u32 blk_opc; /* bulk operation code */ + __u32 blk_npg; /* # of pages */ + __u32 blk_flags; /* reserved flags */ } WIRE_ATTR test_bulk_req_t; typedef struct { - __u16 blk_opc; /* bulk operation code */ - __u16 blk_flags; /* data check flags */ - __u32 blk_len; /* data length */ - __u32 blk_offset; /* reserved: offset */ + __u16 blk_opc; /* bulk operation code */ + __u16 blk_flags; /* data check flags */ + __u32 blk_len; /* data length */ + __u32 blk_offset; /* reserved: offset */ } WIRE_ATTR test_bulk_req_v1_t; typedef struct { - __u32 png_size; /* size of ping message */ - __u32 png_flags; /* reserved flags */ + __u32 png_size; /* size of ping message */ + __u32 png_flags; /* reserved flags */ } WIRE_ATTR test_ping_req_t; typedef struct { - __u64 tsr_rpyid; /* reply buffer matchbits */ - __u64 tsr_bulkid; /* bulk buffer matchbits */ + __u64 tsr_rpyid; /* reply buffer matchbits */ + __u64 tsr_bulkid; /* bulk buffer matchbits */ lst_sid_t tsr_sid; /* session id */ lst_bid_t tsr_bid; /* batch id */ - __u32 tsr_service; /* test type: bulk|ping|... */ - __u32 tsr_loop; /* test client loop count or + __u32 tsr_service; /* test type: bulk|ping|... */ + __u32 tsr_loop; /* test client loop count or * # server buffers needed */ - __u32 tsr_concur; /* concurrency of test */ - __u8 tsr_is_client; /* is test client or not */ + __u32 tsr_concur; /* concurrency of test */ + __u8 tsr_is_client; /* is test client or not */ __u8 tsr_stop_onerr; /* stop on error */ - __u32 tsr_ndest; /* # of dest nodes */ + __u32 tsr_ndest; /* # of dest nodes */ union { test_ping_req_t ping; @@ -208,7 +208,7 @@ typedef struct { } WIRE_ATTR srpc_test_reqst_t; typedef struct { - __u32 tsr_status; /* returned code */ + __u32 tsr_status; /* returned code */ lst_sid_t tsr_sid; } WIRE_ATTR srpc_test_reply_t; @@ -228,19 +228,19 @@ typedef struct { } WIRE_ATTR srpc_ping_reply_t; typedef struct { - __u64 brw_rpyid; /* reply buffer matchbits */ - __u64 brw_bulkid; /* bulk buffer matchbits */ - __u32 brw_rw; /* read or write */ - __u32 brw_len; /* bulk data len */ - __u32 brw_flags; /* bulk data patterns */ + __u64 brw_rpyid; /* reply buffer matchbits */ + __u64 brw_bulkid; /* bulk buffer matchbits */ + __u32 brw_rw; /* read or write */ + __u32 brw_len; /* bulk data len */ + __u32 brw_flags; /* bulk data patterns */ } WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */ typedef struct { __u32 brw_status; } WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */ -#define SRPC_MSG_MAGIC 0xeeb0f00d -#define SRPC_MSG_VERSION 1 +#define SRPC_MSG_MAGIC 0xeeb0f00d +#define SRPC_MSG_VERSION 1 typedef struct srpc_msg { __u32 msg_magic; /* magic number */ @@ -281,8 +281,10 @@ srpc_unpack_msg_hdr(srpc_msg_t *msg) if (msg->msg_magic == SRPC_MSG_MAGIC) return; /* no flipping needed */ - /* We do not swap the magic number here as it is needed to - determine whether the body needs to be swapped. */ + /* + * We do not swap the magic number here as it is needed to + * determine whether the body needs to be swapped. + */ /* __swab32s(&msg->msg_magic); */ __swab32s(&msg->msg_type); __swab32s(&msg->msg_version); diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h index 870498339..e689ca184 100644 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ b/drivers/staging/lustre/lnet/selftest/selftest.h @@ -56,14 +56,14 @@ #define MADE_WITHOUT_COMPROMISE #endif -#define SWI_STATE_NEWBORN 0 -#define SWI_STATE_REPLY_SUBMITTED 1 -#define SWI_STATE_REPLY_SENT 2 -#define SWI_STATE_REQUEST_SUBMITTED 3 -#define SWI_STATE_REQUEST_SENT 4 -#define SWI_STATE_REPLY_RECEIVED 5 -#define SWI_STATE_BULK_STARTED 6 -#define SWI_STATE_DONE 10 +#define SWI_STATE_NEWBORN 0 +#define SWI_STATE_REPLY_SUBMITTED 1 +#define SWI_STATE_REPLY_SENT 2 +#define SWI_STATE_REQUEST_SUBMITTED 3 +#define SWI_STATE_REQUEST_SENT 4 +#define SWI_STATE_REPLY_RECEIVED 5 +#define SWI_STATE_BULK_STARTED 6 +#define SWI_STATE_DONE 10 /* forward refs */ struct srpc_service; @@ -74,31 +74,31 @@ struct sfw_test_instance; /* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework * services, e.g. create/modify session. */ -#define SRPC_SERVICE_DEBUG 0 -#define SRPC_SERVICE_MAKE_SESSION 1 -#define SRPC_SERVICE_REMOVE_SESSION 2 -#define SRPC_SERVICE_BATCH 3 -#define SRPC_SERVICE_TEST 4 -#define SRPC_SERVICE_QUERY_STAT 5 -#define SRPC_SERVICE_JOIN 6 -#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10 +#define SRPC_SERVICE_DEBUG 0 +#define SRPC_SERVICE_MAKE_SESSION 1 +#define SRPC_SERVICE_REMOVE_SESSION 2 +#define SRPC_SERVICE_BATCH 3 +#define SRPC_SERVICE_TEST 4 +#define SRPC_SERVICE_QUERY_STAT 5 +#define SRPC_SERVICE_JOIN 6 +#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10 /* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */ -#define SRPC_SERVICE_BRW 11 -#define SRPC_SERVICE_PING 12 -#define SRPC_SERVICE_MAX_ID 12 +#define SRPC_SERVICE_BRW 11 +#define SRPC_SERVICE_PING 12 +#define SRPC_SERVICE_MAX_ID 12 -#define SRPC_REQUEST_PORTAL 50 +#define SRPC_REQUEST_PORTAL 50 /* a lazy portal for framework RPC requests */ -#define SRPC_FRAMEWORK_REQUEST_PORTAL 51 +#define SRPC_FRAMEWORK_REQUEST_PORTAL 51 /* all reply/bulk RDMAs go to this portal */ -#define SRPC_RDMA_PORTAL 52 +#define SRPC_RDMA_PORTAL 52 static inline srpc_msg_type_t -srpc_service2request (int service) +srpc_service2request(int service) { switch (service) { default: - LBUG (); + LBUG(); case SRPC_SERVICE_DEBUG: return SRPC_MSG_DEBUG_REQST; @@ -129,7 +129,7 @@ srpc_service2request (int service) } static inline srpc_msg_type_t -srpc_service2reply (int service) +srpc_service2reply(int service) { return srpc_service2request(service) + 1; } @@ -149,25 +149,25 @@ typedef enum { typedef struct { srpc_event_type_t ev_type; /* what's up */ lnet_event_kind_t ev_lnet; /* LNet event type */ - int ev_fired; /* LNet event fired? */ - int ev_status; /* LNet event status */ - void *ev_data; /* owning server/client RPC */ + int ev_fired; /* LNet event fired? */ + int ev_status; /* LNet event status */ + void *ev_data; /* owning server/client RPC */ } srpc_event_t; typedef struct { - int bk_len; /* len of bulk data */ + int bk_len; /* len of bulk data */ lnet_handle_md_t bk_mdh; - int bk_sink; /* sink/source */ - int bk_niov; /* # iov in bk_iovs */ - lnet_kiov_t bk_iovs[0]; + int bk_sink; /* sink/source */ + int bk_niov; /* # iov in bk_iovs */ + lnet_kiov_t bk_iovs[0]; } srpc_bulk_t; /* bulk descriptor */ /* message buffer descriptor */ typedef struct srpc_buffer { struct list_head buf_list; /* chain on srpc_service::*_msgq */ - srpc_msg_t buf_msg; + srpc_msg_t buf_msg; lnet_handle_md_t buf_mdh; - lnet_nid_t buf_self; + lnet_nid_t buf_self; lnet_process_id_t buf_peer; } srpc_buffer_t; @@ -176,9 +176,9 @@ typedef int (*swi_action_t) (struct swi_workitem *); typedef struct swi_workitem { struct cfs_wi_sched *swi_sched; - cfs_workitem_t swi_workitem; - swi_action_t swi_action; - int swi_state; + cfs_workitem_t swi_workitem; + swi_action_t swi_action; + int swi_state; } swi_workitem_t; /* server-side state of a RPC */ @@ -186,78 +186,78 @@ struct srpc_server_rpc { /* chain on srpc_service::*_rpcq */ struct list_head srpc_list; struct srpc_service_cd *srpc_scd; - swi_workitem_t srpc_wi; - srpc_event_t srpc_ev; /* bulk/reply event */ - lnet_nid_t srpc_self; + swi_workitem_t srpc_wi; + srpc_event_t srpc_ev; /* bulk/reply event */ + lnet_nid_t srpc_self; lnet_process_id_t srpc_peer; - srpc_msg_t srpc_replymsg; + srpc_msg_t srpc_replymsg; lnet_handle_md_t srpc_replymdh; - srpc_buffer_t *srpc_reqstbuf; - srpc_bulk_t *srpc_bulk; + srpc_buffer_t *srpc_reqstbuf; + srpc_bulk_t *srpc_bulk; - unsigned int srpc_aborted; /* being given up */ - int srpc_status; - void (*srpc_done)(struct srpc_server_rpc *); + unsigned int srpc_aborted; /* being given up */ + int srpc_status; + void (*srpc_done)(struct srpc_server_rpc *); }; /* client-side state of a RPC */ typedef struct srpc_client_rpc { - struct list_head crpc_list; /* chain on user's lists */ - spinlock_t crpc_lock; /* serialize */ - int crpc_service; - atomic_t crpc_refcount; - int crpc_timeout; /* # seconds to wait for reply */ - stt_timer_t crpc_timer; - swi_workitem_t crpc_wi; + struct list_head crpc_list; /* chain on user's lists */ + spinlock_t crpc_lock; /* serialize */ + int crpc_service; + atomic_t crpc_refcount; + int crpc_timeout; /* # seconds to wait for reply */ + struct stt_timer crpc_timer; + swi_workitem_t crpc_wi; lnet_process_id_t crpc_dest; - void (*crpc_done)(struct srpc_client_rpc *); - void (*crpc_fini)(struct srpc_client_rpc *); - int crpc_status; /* completion status */ - void *crpc_priv; /* caller data */ + void (*crpc_done)(struct srpc_client_rpc *); + void (*crpc_fini)(struct srpc_client_rpc *); + int crpc_status; /* completion status */ + void *crpc_priv; /* caller data */ /* state flags */ - unsigned int crpc_aborted:1; /* being given up */ - unsigned int crpc_closed:1; /* completed */ + unsigned int crpc_aborted:1; /* being given up */ + unsigned int crpc_closed:1; /* completed */ /* RPC events */ - srpc_event_t crpc_bulkev; /* bulk event */ - srpc_event_t crpc_reqstev; /* request event */ - srpc_event_t crpc_replyev; /* reply event */ + srpc_event_t crpc_bulkev; /* bulk event */ + srpc_event_t crpc_reqstev; /* request event */ + srpc_event_t crpc_replyev; /* reply event */ /* bulk, request(reqst), and reply exchanged on wire */ - srpc_msg_t crpc_reqstmsg; - srpc_msg_t crpc_replymsg; + srpc_msg_t crpc_reqstmsg; + srpc_msg_t crpc_replymsg; lnet_handle_md_t crpc_reqstmdh; lnet_handle_md_t crpc_replymdh; - srpc_bulk_t crpc_bulk; + srpc_bulk_t crpc_bulk; } srpc_client_rpc_t; -#define srpc_client_rpc_size(rpc) \ +#define srpc_client_rpc_size(rpc) \ offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) -#define srpc_client_rpc_addref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - atomic_inc(&(rpc)->crpc_refcount); \ +#define srpc_client_rpc_addref(rpc) \ +do { \ + CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ + (rpc), libcfs_id2str((rpc)->crpc_dest), \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + atomic_inc(&(rpc)->crpc_refcount); \ } while (0) -#define srpc_client_rpc_decref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ - srpc_destroy_client_rpc(rpc); \ +#define srpc_client_rpc_decref(rpc) \ +do { \ + CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ + (rpc), libcfs_id2str((rpc)->crpc_dest), \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ + srpc_destroy_client_rpc(rpc); \ } while (0) -#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \ - (rpc)->crpc_reqstev.ev_fired == 0 || \ - (rpc)->crpc_replyev.ev_fired == 0) +#define srpc_event_pending(rpc) (!(rpc)->crpc_bulkev.ev_fired || \ + !(rpc)->crpc_reqstev.ev_fired || \ + !(rpc)->crpc_replyev.ev_fired) /* CPU partition data of srpc service */ struct srpc_service_cd { @@ -268,9 +268,9 @@ struct srpc_service_cd { /** event buffer */ srpc_event_t scd_ev; /** free RPC descriptors */ - struct list_head scd_rpc_free; + struct list_head scd_rpc_free; /** in-flight RPCs */ - struct list_head scd_rpc_active; + struct list_head scd_rpc_active; /** workitem for posting buffer */ swi_workitem_t scd_buf_wi; /** CPT id */ @@ -278,7 +278,7 @@ struct srpc_service_cd { /** error code for scd_buf_wi */ int scd_buf_err; /** timestamp for scd_buf_err */ - time64_t scd_buf_err_stamp; + time64_t scd_buf_err_stamp; /** total # request buffers */ int scd_buf_total; /** # posted request buffers */ @@ -290,16 +290,16 @@ struct srpc_service_cd { /** increase/decrease some buffers */ int scd_buf_adjust; /** posted message buffers */ - struct list_head scd_buf_posted; + struct list_head scd_buf_posted; /** blocked for RPC descriptor */ - struct list_head scd_buf_blocked; + struct list_head scd_buf_blocked; }; /* number of server workitems (mini-thread) for testing service */ #define SFW_TEST_WI_MIN 256 #define SFW_TEST_WI_MAX 2048 /* extra buffers for tolerating buggy peers, or unbalanced number - * of peers between partitions */ + * of peers between partitions */ #define SFW_TEST_WI_EXTRA 64 /* number of server workitems (mini-thread) for framework service */ @@ -324,29 +324,29 @@ typedef struct srpc_service { typedef struct { struct list_head sn_list; /* chain on fw_zombie_sessions */ - lst_sid_t sn_id; /* unique identifier */ - unsigned int sn_timeout; /* # seconds' inactivity to expire */ - int sn_timer_active; - unsigned int sn_features; - stt_timer_t sn_timer; + lst_sid_t sn_id; /* unique identifier */ + unsigned int sn_timeout; /* # seconds' inactivity to expire */ + int sn_timer_active; + unsigned int sn_features; + struct stt_timer sn_timer; struct list_head sn_batches; /* list of batches */ - char sn_name[LST_NAME_SIZE]; - atomic_t sn_refcount; - atomic_t sn_brw_errors; - atomic_t sn_ping_errors; - unsigned long sn_started; + char sn_name[LST_NAME_SIZE]; + atomic_t sn_refcount; + atomic_t sn_brw_errors; + atomic_t sn_ping_errors; + unsigned long sn_started; } sfw_session_t; #define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \ (sid0).ses_stamp == (sid1).ses_stamp) typedef struct { - struct list_head bat_list; /* chain on sn_batches */ - lst_bid_t bat_id; /* batch id */ - int bat_error; /* error code of batch */ - sfw_session_t *bat_session; /* batch's session */ - atomic_t bat_nactive; /* # of active tests */ - struct list_head bat_tests; /* test instances */ + struct list_head bat_list; /* chain on sn_batches */ + lst_bid_t bat_id; /* batch id */ + int bat_error; /* error code of batch */ + sfw_session_t *bat_session; /* batch's session */ + atomic_t bat_nactive; /* # of active tests */ + struct list_head bat_tests; /* test instances */ } sfw_batch_t; typedef struct { @@ -356,32 +356,32 @@ typedef struct { * client */ int (*tso_prep_rpc)(struct sfw_test_unit *tsu, lnet_process_id_t dest, - srpc_client_rpc_t **rpc); /* prep a tests rpc */ + srpc_client_rpc_t **rpc); /* prep a tests rpc */ void (*tso_done_rpc)(struct sfw_test_unit *tsu, - srpc_client_rpc_t *rpc); /* done a test rpc */ + srpc_client_rpc_t *rpc); /* done a test rpc */ } sfw_test_client_ops_t; typedef struct sfw_test_instance { - struct list_head tsi_list; /* chain on batch */ - int tsi_service; /* test type */ - sfw_batch_t *tsi_batch; /* batch */ - sfw_test_client_ops_t *tsi_ops; /* test client operation + struct list_head tsi_list; /* chain on batch */ + int tsi_service; /* test type */ + sfw_batch_t *tsi_batch; /* batch */ + sfw_test_client_ops_t *tsi_ops; /* test client operation */ /* public parameter for all test units */ - unsigned int tsi_is_client:1; /* is test client */ - unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ - int tsi_concur; /* concurrency */ - int tsi_loop; /* loop count */ + unsigned int tsi_is_client:1; /* is test client */ + unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ + int tsi_concur; /* concurrency */ + int tsi_loop; /* loop count */ /* status of test instance */ - spinlock_t tsi_lock; /* serialize */ - unsigned int tsi_stopping:1; /* test is stopping */ - atomic_t tsi_nactive; /* # of active test + spinlock_t tsi_lock; /* serialize */ + unsigned int tsi_stopping:1; /* test is stopping */ + atomic_t tsi_nactive; /* # of active test * unit */ - struct list_head tsi_units; /* test units */ - struct list_head tsi_free_rpcs; /* free rpcs */ - struct list_head tsi_active_rpcs; /* active rpcs */ + struct list_head tsi_units; /* test units */ + struct list_head tsi_free_rpcs; /* free rpcs */ + struct list_head tsi_active_rpcs; /* active rpcs */ union { test_ping_req_t ping; /* ping parameter */ @@ -390,32 +390,32 @@ typedef struct sfw_test_instance { } tsi_u; } sfw_test_instance_t; -/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at - * the end of pages are not used */ -#define SFW_MAX_CONCUR LST_MAX_CONCUR -#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) -#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) +/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of + * pages are not used */ +#define SFW_MAX_CONCUR LST_MAX_CONCUR +#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) +#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) typedef struct sfw_test_unit { - struct list_head tsu_list; /* chain on lst_test_instance */ - lnet_process_id_t tsu_dest; /* id of dest node */ - int tsu_loop; /* loop count of the test */ + struct list_head tsu_list; /* chain on lst_test_instance */ + lnet_process_id_t tsu_dest; /* id of dest node */ + int tsu_loop; /* loop count of the test */ sfw_test_instance_t *tsu_instance; /* pointer to test instance */ - void *tsu_private; /* private data */ - swi_workitem_t tsu_worker; /* workitem of the test unit */ + void *tsu_private; /* private data */ + swi_workitem_t tsu_worker; /* workitem of the test unit */ } sfw_test_unit_t; typedef struct sfw_test_case { - struct list_head tsc_list; /* chain on fw_tests */ - srpc_service_t *tsc_srv_service; /* test service */ - sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ + struct list_head tsc_list; /* chain on fw_tests */ + srpc_service_t *tsc_srv_service; /* test service */ + sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ } sfw_test_case_t; srpc_client_rpc_t * sfw_create_rpc(lnet_process_id_t peer, int service, unsigned features, int nbulkiov, int bulklen, - void (*done) (srpc_client_rpc_t *), void *priv); + void (*done)(srpc_client_rpc_t *), void *priv); int sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, unsigned features, int nblk, int blklen, srpc_client_rpc_t **rpc); @@ -427,7 +427,7 @@ void sfw_free_pages(struct srpc_server_rpc *rpc); void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i); int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, int sink); -int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); +int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); srpc_client_rpc_t * srpc_create_client_rpc(lnet_process_id_t peer, int service, @@ -472,9 +472,9 @@ static inline void swi_init_workitem(swi_workitem_t *swi, void *data, swi_action_t action, struct cfs_wi_sched *sched) { - swi->swi_sched = sched; + swi->swi_sched = sched; swi->swi_action = action; - swi->swi_state = SWI_STATE_NEWBORN; + swi->swi_state = SWI_STATE_NEWBORN; cfs_wi_init(&swi->swi_workitem, data, swi_wi_action); } @@ -502,26 +502,23 @@ void sfw_shutdown(void); void srpc_shutdown(void); static inline void -srpc_destroy_client_rpc (srpc_client_rpc_t *rpc) +srpc_destroy_client_rpc(srpc_client_rpc_t *rpc) { - LASSERT(rpc != NULL); + LASSERT(rpc); LASSERT(!srpc_event_pending(rpc)); - LASSERT(atomic_read(&rpc->crpc_refcount) == 0); + LASSERT(!atomic_read(&rpc->crpc_refcount)); - if (rpc->crpc_fini == NULL) { + if (!rpc->crpc_fini) LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); - } else { - (*rpc->crpc_fini) (rpc); - } - - return; + else + (*rpc->crpc_fini)(rpc); } static inline void -srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, - int service, int nbulkiov, int bulklen, - void (*rpc_done)(srpc_client_rpc_t *), - void (*rpc_fini)(srpc_client_rpc_t *), void *priv) +srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer, + int service, int nbulkiov, int bulklen, + void (*rpc_done)(srpc_client_rpc_t *), + void (*rpc_fini)(srpc_client_rpc_t *), void *priv) { LASSERT(nbulkiov <= LNET_MAX_IOV); @@ -534,30 +531,29 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, spin_lock_init(&rpc->crpc_lock); atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */ - rpc->crpc_dest = peer; - rpc->crpc_priv = priv; - rpc->crpc_service = service; - rpc->crpc_bulk.bk_len = bulklen; + rpc->crpc_dest = peer; + rpc->crpc_priv = priv; + rpc->crpc_service = service; + rpc->crpc_bulk.bk_len = bulklen; rpc->crpc_bulk.bk_niov = nbulkiov; - rpc->crpc_done = rpc_done; - rpc->crpc_fini = rpc_fini; + rpc->crpc_done = rpc_done; + rpc->crpc_fini = rpc_fini; LNetInvalidateHandle(&rpc->crpc_reqstmdh); LNetInvalidateHandle(&rpc->crpc_replymdh); LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh); /* no event is expected at this point */ - rpc->crpc_bulkev.ev_fired = - rpc->crpc_reqstev.ev_fired = + rpc->crpc_bulkev.ev_fired = 1; + rpc->crpc_reqstev.ev_fired = 1; rpc->crpc_replyev.ev_fired = 1; - rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC; + rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC; rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION; - rpc->crpc_reqstmsg.msg_type = srpc_service2request(service); - return; + rpc->crpc_reqstmsg.msg_type = srpc_service2request(service); } static inline const char * -swi_state2str (int state) +swi_state2str(int state) { #define STATE2STR(x) case x: return #x switch (state) { @@ -602,11 +598,11 @@ srpc_wait_service_shutdown(srpc_service_t *sv) LASSERT(sv->sv_shuttingdown); - while (srpc_finish_service(sv) == 0) { + while (!srpc_finish_service(sv)) { i++; - CDEBUG (((i & -i) == i) ? D_WARNING : D_NET, - "Waiting for %s service to shutdown...\n", - sv->sv_name); + CDEBUG(((i & -i) == i) ? D_WARNING : D_NET, + "Waiting for %s service to shutdown...\n", + sv->sv_name); selftest_wait_events(); } } diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c index b98c08a10..8be52526a 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.c +++ b/drivers/staging/lustre/lnet/selftest/timer.c @@ -57,17 +57,17 @@ (STTIMER_NSLOTS - 1))]) static struct st_timer_data { - spinlock_t stt_lock; - unsigned long stt_prev_slot; /* start time of the slot processed + spinlock_t stt_lock; + unsigned long stt_prev_slot; /* start time of the slot processed * previously */ struct list_head stt_hash[STTIMER_NSLOTS]; - int stt_shuttingdown; + int stt_shuttingdown; wait_queue_head_t stt_waitq; - int stt_nthreads; + int stt_nthreads; } stt_data; void -stt_add_timer(stt_timer_t *timer) +stt_add_timer(struct stt_timer *timer) { struct list_head *pos; @@ -75,13 +75,14 @@ stt_add_timer(stt_timer_t *timer) LASSERT(stt_data.stt_nthreads > 0); LASSERT(!stt_data.stt_shuttingdown); - LASSERT(timer->stt_func != NULL); + LASSERT(timer->stt_func); LASSERT(list_empty(&timer->stt_list)); LASSERT(timer->stt_expires > ktime_get_real_seconds()); /* a simple insertion sort */ list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) { - stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list); + struct stt_timer *old = list_entry(pos, struct stt_timer, + stt_list); if (timer->stt_expires >= old->stt_expires) break; @@ -101,7 +102,7 @@ stt_add_timer(stt_timer_t *timer) * another CPU. */ int -stt_del_timer(stt_timer_t *timer) +stt_del_timer(struct stt_timer *timer) { int ret = 0; @@ -124,10 +125,10 @@ static int stt_expire_list(struct list_head *slot, time64_t now) { int expired = 0; - stt_timer_t *timer; + struct stt_timer *timer; while (!list_empty(slot)) { - timer = list_entry(slot->next, stt_timer_t, stt_list); + timer = list_entry(slot->next, struct stt_timer, stt_list); if (timer->stt_expires > now) break; @@ -218,7 +219,7 @@ stt_startup(void) stt_data.stt_nthreads = 0; init_waitqueue_head(&stt_data.stt_waitq); rc = stt_start_timer_thread(); - if (rc != 0) + if (rc) CERROR("Can't spawn timer thread: %d\n", rc); return rc; @@ -237,7 +238,7 @@ stt_shutdown(void) stt_data.stt_shuttingdown = 1; wake_up(&stt_data.stt_waitq); - lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock, + lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock, "waiting for %d threads to terminate\n", stt_data.stt_nthreads); diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h index 03e2ee294..f1fbebd8a 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.h +++ b/drivers/staging/lustre/lnet/selftest/timer.h @@ -38,15 +38,15 @@ #ifndef __SELFTEST_TIMER_H__ #define __SELFTEST_TIMER_H__ -typedef struct { +struct stt_timer { struct list_head stt_list; - time64_t stt_expires; - void (*stt_func) (void *); - void *stt_data; -} stt_timer_t; + time64_t stt_expires; + void (*stt_func)(void *); + void *stt_data; +}; -void stt_add_timer(stt_timer_t *timer); -int stt_del_timer(stt_timer_t *timer); +void stt_add_timer(struct stt_timer *timer); +int stt_del_timer(struct stt_timer *timer); int stt_startup(void); void stt_shutdown(void); diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig index 62c7bba75..8ac7cd4d6 100644 --- a/drivers/staging/lustre/lustre/Kconfig +++ b/drivers/staging/lustre/lustre/Kconfig @@ -1,7 +1,7 @@ config LUSTRE_FS tristate "Lustre file system client support" - depends on INET && m && !MIPS && !XTENSA && !SUPERH - select LNET + depends on m && !MIPS && !XTENSA && !SUPERH + depends on LNET select CRYPTO select CRYPTO_CRC32 select CRYPTO_CRC32_PCLMUL if X86 diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile index 35d8b0b2d..331e4fcdd 100644 --- a/drivers/staging/lustre/lustre/Makefile +++ b/drivers/staging/lustre/lustre/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_LUSTRE_FS) += libcfs/ obdclass/ ptlrpc/ fld/ osc/ mgc/ \ +obj-$(CONFIG_LUSTRE_FS) += obdclass/ ptlrpc/ fld/ osc/ mgc/ \ fid/ lov/ mdc/ lmv/ llite/ obdecho/ diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c index ff8f38dc1..39269c3c5 100644 --- a/drivers/staging/lustre/lustre/fid/fid_request.c +++ b/drivers/staging/lustre/lustre/fid/fid_request.c @@ -68,7 +68,7 @@ static int seq_client_rpc(struct lu_client_seq *seq, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY, LUSTRE_MDS_VERSION, SEQ_QUERY); - if (req == NULL) + if (!req) return -ENOMEM; /* Init operation code */ @@ -95,7 +95,8 @@ static int seq_client_rpc(struct lu_client_seq *seq, * precreating objects on this OST), and it will send the * request to MDT0 here, so we can not keep resending the * request here, otherwise if MDT0 is failed(umounted), - * it can not release the export of MDT0 */ + * it can not release the export of MDT0 + */ if (seq->lcs_type == LUSTRE_SEQ_DATA) req->rq_no_delay = req->rq_no_resend = 1; debug_mask = D_CONSOLE; @@ -152,7 +153,8 @@ static int seq_client_alloc_meta(const struct lu_env *env, /* If meta server return -EINPROGRESS or EAGAIN, * it means meta server might not be ready to * allocate super sequence from sequence controller - * (MDT0)yet */ + * (MDT0)yet + */ rc = seq_client_rpc(seq, &seq->lcs_space, SEQ_ALLOC_META, "meta"); } while (rc == -EINPROGRESS || rc == -EAGAIN); @@ -226,8 +228,8 @@ int seq_client_alloc_fid(const struct lu_env *env, wait_queue_t link; int rc; - LASSERT(seq != NULL); - LASSERT(fid != NULL); + LASSERT(seq); + LASSERT(fid); init_waitqueue_entry(&link, current); mutex_lock(&seq->lcs_mutex); @@ -292,7 +294,7 @@ void seq_client_flush(struct lu_client_seq *seq) { wait_queue_t link; - LASSERT(seq != NULL); + LASSERT(seq); init_waitqueue_entry(&link, current); mutex_lock(&seq->lcs_mutex); @@ -375,8 +377,8 @@ static int seq_client_init(struct lu_client_seq *seq, { int rc; - LASSERT(seq != NULL); - LASSERT(prefix != NULL); + LASSERT(seq); + LASSERT(prefix); seq->lcs_type = type; @@ -438,7 +440,7 @@ int client_fid_fini(struct obd_device *obd) { struct client_obd *cli = &obd->u.cli; - if (cli->cl_seq != NULL) { + if (cli->cl_seq) { seq_client_fini(cli->cl_seq); kfree(cli->cl_seq); cli->cl_seq = NULL; @@ -448,7 +450,7 @@ int client_fid_fini(struct obd_device *obd) } EXPORT_SYMBOL(client_fid_fini); -static int __init fid_mod_init(void) +static int __init fid_init(void) { seq_debugfs_dir = ldebugfs_register(LUSTRE_SEQ_NAME, debugfs_lustre_root, @@ -456,16 +458,16 @@ static int __init fid_mod_init(void) return PTR_ERR_OR_ZERO(seq_debugfs_dir); } -static void __exit fid_mod_exit(void) +static void __exit fid_exit(void) { if (!IS_ERR_OR_NULL(seq_debugfs_dir)) ldebugfs_remove(&seq_debugfs_dir); } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre FID Module"); +MODULE_DESCRIPTION("Lustre File IDentifier"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -MODULE_VERSION("0.1.0"); -module_init(fid_mod_init); -module_exit(fid_mod_exit); +module_init(fid_init); +module_exit(fid_exit); diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c index 39f2aa32e..1f0e78686 100644 --- a/drivers/staging/lustre/lustre/fid/lproc_fid.c +++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c @@ -66,7 +66,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count, int rc; char kernbuf[MAX_FID_RANGE_STRLEN]; - LASSERT(range != NULL); + LASSERT(range); if (count >= sizeof(kernbuf)) return -EINVAL; @@ -85,6 +85,8 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count, rc = sscanf(kernbuf, "[%llx - %llx]\n", (unsigned long long *)&tmp.lsr_start, (unsigned long long *)&tmp.lsr_end); + if (rc != 2) + return -EINVAL; if (!range_is_sane(&tmp) || range_is_zero(&tmp) || tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end) return -EINVAL; @@ -102,7 +104,6 @@ ldebugfs_fid_space_seq_write(struct file *file, int rc; seq = ((struct seq_file *)file->private_data)->private; - LASSERT(seq != NULL); mutex_lock(&seq->lcs_mutex); rc = ldebugfs_fid_write_common(buffer, count, &seq->lcs_space); @@ -122,8 +123,6 @@ ldebugfs_fid_space_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space)); mutex_unlock(&seq->lcs_mutex); @@ -141,7 +140,6 @@ ldebugfs_fid_width_seq_write(struct file *file, int rc, val; seq = ((struct seq_file *)file->private_data)->private; - LASSERT(seq != NULL); rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -170,8 +168,6 @@ ldebugfs_fid_width_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, "%llu\n", seq->lcs_width); mutex_unlock(&seq->lcs_mutex); @@ -184,8 +180,6 @@ ldebugfs_fid_fid_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, DFID "\n", PFID(&seq->lcs_fid)); mutex_unlock(&seq->lcs_mutex); @@ -199,9 +193,7 @@ ldebugfs_fid_server_seq_show(struct seq_file *m, void *unused) struct lu_client_seq *seq = (struct lu_client_seq *)m->private; struct client_obd *cli; - LASSERT(seq != NULL); - - if (seq->lcs_exp != NULL) { + if (seq->lcs_exp) { cli = &seq->lcs_exp->exp_obd->u.cli; seq_printf(m, "%s\n", cli->cl_target_uuid.uuid); } diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c index d9459e58e..062f388cf 100644 --- a/drivers/staging/lustre/lustre/fld/fld_cache.c +++ b/drivers/staging/lustre/lustre/fld/fld_cache.c @@ -65,7 +65,7 @@ struct fld_cache *fld_cache_init(const char *name, { struct fld_cache *cache; - LASSERT(name != NULL); + LASSERT(name); LASSERT(cache_threshold < cache_size); cache = kzalloc(sizeof(*cache), GFP_NOFS); @@ -100,7 +100,7 @@ void fld_cache_fini(struct fld_cache *cache) { __u64 pct; - LASSERT(cache != NULL); + LASSERT(cache); fld_cache_flush(cache); if (cache->fci_stat.fst_count > 0) { @@ -183,7 +183,8 @@ restart_fixup: } /* we could have overlap over next - * range too. better restart. */ + * range too. better restart. + */ goto restart_fixup; } @@ -218,8 +219,6 @@ static int fld_cache_shrink(struct fld_cache *cache) struct list_head *curr; int num = 0; - LASSERT(cache != NULL); - if (cache->fci_cache_count < cache->fci_cache_size) return 0; @@ -234,7 +233,7 @@ static int fld_cache_shrink(struct fld_cache *cache) } CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n", - cache->fci_name, num); + cache->fci_name, num); return 0; } @@ -295,8 +294,8 @@ static void fld_cache_punch_hole(struct fld_cache *cache, * handle range overlap in fld cache. */ static void fld_cache_overlap_handle(struct fld_cache *cache, - struct fld_cache_entry *f_curr, - struct fld_cache_entry *f_new) + struct fld_cache_entry *f_curr, + struct fld_cache_entry *f_new) { const struct lu_seq_range *range = &f_new->fce_range; const u64 new_start = range->lsr_start; @@ -304,7 +303,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache, const u32 mdt = range->lsr_index; /* this is overlap case, these case are checking overlapping with - * prev range only. fixup will handle overlapping with next range. */ + * prev range only. fixup will handle overlapping with next range. + */ if (f_curr->fce_range.lsr_index == mdt) { f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start, @@ -319,7 +319,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache, } else if (new_start <= f_curr->fce_range.lsr_start && f_curr->fce_range.lsr_end <= new_end) { /* case 1: new range completely overshadowed existing range. - * e.g. whole range migrated. update fld cache entry */ + * e.g. whole range migrated. update fld cache entry + */ f_curr->fce_range = *range; kfree(f_new); @@ -401,8 +402,8 @@ static int fld_cache_insert_nolock(struct fld_cache *cache, list_for_each_entry_safe(f_curr, n, head, fce_list) { /* add list if next is end of list */ if (new_end < f_curr->fce_range.lsr_start || - (new_end == f_curr->fce_range.lsr_start && - new_flags != f_curr->fce_range.lsr_flags)) + (new_end == f_curr->fce_range.lsr_start && + new_flags != f_curr->fce_range.lsr_flags)) break; prev = &f_curr->fce_list; @@ -414,7 +415,7 @@ static int fld_cache_insert_nolock(struct fld_cache *cache, } } - if (prev == NULL) + if (!prev) prev = head; CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range)); @@ -459,8 +460,8 @@ struct fld_cache_entry head = &cache->fci_entries_head; list_for_each_entry(flde, head, fce_list) { if (range->lsr_start == flde->fce_range.lsr_start || - (range->lsr_end == flde->fce_range.lsr_end && - range->lsr_flags == flde->fce_range.lsr_flags)) { + (range->lsr_end == flde->fce_range.lsr_end && + range->lsr_flags == flde->fce_range.lsr_flags)) { got = flde; break; } @@ -499,7 +500,7 @@ int fld_cache_lookup(struct fld_cache *cache, cache->fci_stat.fst_count++; list_for_each_entry(flde, head, fce_list) { if (flde->fce_range.lsr_start > seq) { - if (prev != NULL) + if (prev) *range = prev->fce_range; break; } diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h index 12eb1647b..e8a3caf20 100644 --- a/drivers/staging/lustre/lustre/fld/fld_internal.h +++ b/drivers/staging/lustre/lustre/fld/fld_internal.h @@ -58,22 +58,16 @@ struct fld_stats { __u64 fst_inflight; }; -typedef int (*fld_hash_func_t) (struct lu_client_fld *, __u64); - -typedef struct lu_fld_target * -(*fld_scan_func_t) (struct lu_client_fld *, __u64); - struct lu_fld_hash { const char *fh_name; - fld_hash_func_t fh_hash_func; - fld_scan_func_t fh_scan_func; + int (*fh_hash_func)(struct lu_client_fld *, __u64); + struct lu_fld_target *(*fh_scan_func)(struct lu_client_fld *, __u64); }; struct fld_cache_entry { struct list_head fce_lru; struct list_head fce_list; - /** - * fld cache entries are sorted on range->lsr_start field. */ + /** fld cache entries are sorted on range->lsr_start field. */ struct lu_seq_range fce_range; }; @@ -84,32 +78,25 @@ struct fld_cache { */ rwlock_t fci_lock; - /** - * Cache shrink threshold */ + /** Cache shrink threshold */ int fci_threshold; - /** - * Preferred number of cached entries */ + /** Preferred number of cached entries */ int fci_cache_size; - /** - * Current number of cached entries. Protected by \a fci_lock */ + /** Current number of cached entries. Protected by \a fci_lock */ int fci_cache_count; - /** - * LRU list fld entries. */ + /** LRU list fld entries. */ struct list_head fci_lru; - /** - * sorted fld entries. */ + /** sorted fld entries. */ struct list_head fci_entries_head; - /** - * Cache statistics. */ + /** Cache statistics. */ struct fld_stats fci_stat; - /** - * Cache name used for debug and messages. */ + /** Cache name used for debug and messages. */ char fci_name[LUSTRE_MDT_MAXNAMELEN]; unsigned int fci_no_shrink:1; }; @@ -169,7 +156,7 @@ struct fld_cache_entry static inline const char * fld_target_name(struct lu_fld_target *tar) { - if (tar->ft_srv != NULL) + if (tar->ft_srv) return tar->ft_srv->lsf_name; return (const char *)tar->ft_exp->exp_obd->obd_name; diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c index d92c01b74..a3d122d85 100644 --- a/drivers/staging/lustre/lustre/fld/fld_request.c +++ b/drivers/staging/lustre/lustre/fld/fld_request.c @@ -58,7 +58,8 @@ #include "fld_internal.h" /* TODO: these 3 functions are copies of flow-control code from mdc_lib.c - * It should be common thing. The same about mdc RPC lock */ + * It should be common thing. The same about mdc RPC lock + */ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw) { int rc; @@ -124,7 +125,8 @@ fld_rrb_scan(struct lu_client_fld *fld, u64 seq) * it should go to index 0 directly, instead of calculating * hash again, and also if other MDTs is not being connected, * the fld lookup requests(for seq on MDT0) should not be - * blocked because of other MDTs */ + * blocked because of other MDTs + */ if (fid_seq_is_norm(seq)) hash = fld_rrb_hash(fld, seq); else @@ -139,18 +141,19 @@ again: if (hash != 0) { /* It is possible the remote target(MDT) are not connected to * with client yet, so we will refer this to MDT0, which should - * be connected during mount */ + * be connected during mount + */ hash = 0; goto again; } CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n", - fld->lcf_name, hash, seq, fld->lcf_count); + fld->lcf_name, hash, seq, fld->lcf_count); list_for_each_entry(target, &fld->lcf_targets, ft_chain) { - const char *srv_name = target->ft_srv != NULL ? + const char *srv_name = target->ft_srv ? target->ft_srv->lsf_name : ""; - const char *exp_name = target->ft_exp != NULL ? + const char *exp_name = target->ft_exp ? (char *)target->ft_exp->exp_obd->obd_uuid.uuid : ""; @@ -183,13 +186,13 @@ fld_client_get_target(struct lu_client_fld *fld, u64 seq) { struct lu_fld_target *target; - LASSERT(fld->lcf_hash != NULL); + LASSERT(fld->lcf_hash); spin_lock(&fld->lcf_lock); target = fld->lcf_hash->fh_scan_func(fld, seq); spin_unlock(&fld->lcf_lock); - if (target != NULL) { + if (target) { CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n", fld->lcf_name, target->ft_idx, seq); } @@ -207,18 +210,18 @@ int fld_client_add_target(struct lu_client_fld *fld, const char *name; struct lu_fld_target *target, *tmp; - LASSERT(tar != NULL); + LASSERT(tar); name = fld_target_name(tar); - LASSERT(name != NULL); - LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL); + LASSERT(name); + LASSERT(tar->ft_srv || tar->ft_exp); if (fld->lcf_flags != LUSTRE_FLD_INIT) { CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n", - fld->lcf_name, name, tar->ft_idx); + fld->lcf_name, name, tar->ft_idx); return 0; } CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n", - fld->lcf_name, name, tar->ft_idx); + fld->lcf_name, name, tar->ft_idx); target = kzalloc(sizeof(*target), GFP_NOFS); if (!target) @@ -236,13 +239,12 @@ int fld_client_add_target(struct lu_client_fld *fld, } target->ft_exp = tar->ft_exp; - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_get(target->ft_exp); target->ft_srv = tar->ft_srv; target->ft_idx = tar->ft_idx; - list_add_tail(&target->ft_chain, - &fld->lcf_targets); + list_add_tail(&target->ft_chain, &fld->lcf_targets); fld->lcf_count++; spin_unlock(&fld->lcf_lock); @@ -257,14 +259,13 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx) struct lu_fld_target *target, *tmp; spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { if (target->ft_idx == idx) { fld->lcf_count--; list_del(&target->ft_chain); spin_unlock(&fld->lcf_lock); - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_put(target->ft_exp); kfree(target); @@ -326,8 +327,6 @@ int fld_client_init(struct lu_client_fld *fld, int cache_size, cache_threshold; int rc; - LASSERT(fld != NULL); - snprintf(fld->lcf_name, sizeof(fld->lcf_name), "cli-%s", prefix); @@ -375,17 +374,16 @@ void fld_client_fini(struct lu_client_fld *fld) struct lu_fld_target *target, *tmp; spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { fld->lcf_count--; list_del(&target->ft_chain); - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_put(target->ft_exp); kfree(target); } spin_unlock(&fld->lcf_lock); - if (fld->lcf_cache != NULL) { + if (fld->lcf_cache) { if (!IS_ERR(fld->lcf_cache)) fld_cache_fini(fld->lcf_cache); fld->lcf_cache = NULL; @@ -402,12 +400,12 @@ int fld_client_rpc(struct obd_export *exp, int rc; struct obd_import *imp; - LASSERT(exp != NULL); + LASSERT(exp); imp = class_exp2cliimp(exp); req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION, FLD_QUERY); - if (req == NULL) + if (!req) return -ENOMEM; op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC); @@ -436,7 +434,7 @@ int fld_client_rpc(struct obd_export *exp, goto out_req; prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD); - if (prange == NULL) { + if (!prange) { rc = -EFAULT; goto out_req; } @@ -463,10 +461,10 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds, /* Can not find it in the cache */ target = fld_client_get_target(fld, seq); - LASSERT(target != NULL); + LASSERT(target); CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n", - fld->lcf_name, seq, fld_target_name(target), target->ft_idx); + fld->lcf_name, seq, fld_target_name(target), target->ft_idx); res.lsr_start = seq; fld_range_set_type(&res, flags); @@ -487,7 +485,7 @@ void fld_client_flush(struct lu_client_fld *fld) } EXPORT_SYMBOL(fld_client_flush); -static int __init fld_mod_init(void) +static int __init fld_init(void) { fld_debugfs_dir = ldebugfs_register(LUSTRE_FLD_NAME, debugfs_lustre_root, @@ -495,15 +493,16 @@ static int __init fld_mod_init(void) return PTR_ERR_OR_ZERO(fld_debugfs_dir); } -static void __exit fld_mod_exit(void) +static void __exit fld_exit(void) { if (!IS_ERR_OR_NULL(fld_debugfs_dir)) ldebugfs_remove(&fld_debugfs_dir); } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre FLD"); +MODULE_DESCRIPTION("Lustre FID Location Database"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -module_init(fld_mod_init) -module_exit(fld_mod_exit) +module_init(fld_init) +module_exit(fld_exit) diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c index 41ceaa819..ca898befe 100644 --- a/drivers/staging/lustre/lustre/fld/lproc_fld.c +++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c @@ -60,11 +60,8 @@ fld_debugfs_targets_seq_show(struct seq_file *m, void *unused) struct lu_client_fld *fld = (struct lu_client_fld *)m->private; struct lu_fld_target *target; - LASSERT(fld != NULL); - spin_lock(&fld->lcf_lock); - list_for_each_entry(target, - &fld->lcf_targets, ft_chain) + list_for_each_entry(target, &fld->lcf_targets, ft_chain) seq_printf(m, "%s\n", fld_target_name(target)); spin_unlock(&fld->lcf_lock); @@ -76,8 +73,6 @@ fld_debugfs_hash_seq_show(struct seq_file *m, void *unused) { struct lu_client_fld *fld = (struct lu_client_fld *)m->private; - LASSERT(fld != NULL); - spin_lock(&fld->lcf_lock); seq_printf(m, "%s\n", fld->lcf_hash->fh_name); spin_unlock(&fld->lcf_lock); @@ -102,9 +97,8 @@ fld_debugfs_hash_seq_write(struct file *file, return -EFAULT; fld = ((struct seq_file *)file->private_data)->private; - LASSERT(fld != NULL); - for (i = 0; fld_hash[i].fh_name != NULL; i++) { + for (i = 0; fld_hash[i].fh_name; i++) { if (count != strlen(fld_hash[i].fh_name)) continue; @@ -114,7 +108,7 @@ fld_debugfs_hash_seq_write(struct file *file, } } - if (hash != NULL) { + if (hash) { spin_lock(&fld->lcf_lock); fld->lcf_hash = hash; spin_unlock(&fld->lcf_lock); @@ -132,8 +126,6 @@ fld_debugfs_cache_flush_write(struct file *file, const char __user *buffer, { struct lu_client_fld *fld = file->private_data; - LASSERT(fld != NULL); - fld_cache_flush(fld->lcf_cache); CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name); diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h index bd7acc2a1..fb971ded5 100644 --- a/drivers/staging/lustre/lustre/include/cl_object.h +++ b/drivers/staging/lustre/lustre/include/cl_object.h @@ -157,7 +157,8 @@ struct cl_device { }; /** \addtogroup cl_object cl_object - * @{ */ + * @{ + */ /** * "Data attributes" of cl_object. Data attributes can be updated * independently for a sub-object, and top-object's attributes are calculated @@ -288,13 +289,14 @@ struct cl_object_conf { enum { /** configure layout, set up a new stripe, must be called while - * holding layout lock. */ + * holding layout lock. + */ OBJECT_CONF_SET = 0, /** invalidate the current stripe configuration due to losing - * layout lock. */ + * layout lock. + */ OBJECT_CONF_INVALIDATE = 1, - /** wait for old layout to go away so that new layout can be - * set up. */ + /** wait for old layout to go away so that new layout can be set up. */ OBJECT_CONF_WAIT = 2 }; @@ -320,7 +322,7 @@ struct cl_object_operations { * to be used instead of newly created. */ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -393,7 +395,8 @@ struct cl_object_operations { */ struct cl_object_header { /** Standard lu_object_header. cl_object::co_lu::lo_header points - * here. */ + * here. + */ struct lu_object_header coh_lu; /** \name locks * \todo XXX move locks below to the separate cache-lines, they are @@ -464,7 +467,8 @@ struct cl_object_header { #define CL_PAGE_EOF ((pgoff_t)~0ull) /** \addtogroup cl_page cl_page - * @{ */ + * @{ + */ /** \struct cl_page * Layered client page. @@ -687,12 +691,14 @@ enum cl_page_state { enum cl_page_type { /** Host page, the page is from the host inode which the cl_page - * belongs to. */ + * belongs to. + */ CPT_CACHEABLE = 1, /** Transient page, the transient cl_page is used to bind a cl_page * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO, lockless IO and liblustre. */ + * it is used in DirectIO and lockless IO. + */ CPT_TRANSIENT, }; @@ -728,7 +734,8 @@ struct cl_page { /** Parent page, NULL for top-level page. Immutable after creation. */ struct cl_page *cp_parent; /** Lower-layer page. NULL for bottommost page. Immutable after - * creation. */ + * creation. + */ struct cl_page *cp_child; /** * Page state. This field is const to avoid accidental update, it is @@ -842,7 +849,7 @@ struct cl_page_operations { * \return the underlying VM page. Optional. */ struct page *(*cpo_vmpage)(const struct lu_env *env, - const struct cl_page_slice *slice); + const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive * ownership. When this method returns, it is guaranteed that the is @@ -1126,7 +1133,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc) /** @} cl_page */ /** \addtogroup cl_lock cl_lock - * @{ */ + * @{ + */ /** \struct cl_lock * * Extent locking on the client. @@ -1641,7 +1649,8 @@ struct cl_lock { struct cl_lock_slice { struct cl_lock *cls_lock; /** Object slice corresponding to this lock slice. Immutable after - * creation. */ + * creation. + */ struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ @@ -1885,7 +1894,8 @@ struct cl_2queue { /** @} cl_page_list */ /** \addtogroup cl_io cl_io - * @{ */ + * @{ + */ /** \struct cl_io * I/O * @@ -2041,8 +2051,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_fini() */ - int (*cio_iter_init) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_iter_init)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize io iteration. * @@ -2052,8 +2062,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_init() */ - void (*cio_iter_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_iter_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Collect locks for the current iteration of io. * @@ -2063,8 +2073,8 @@ struct cl_io_operations { * cl_io_lock_add(). Once all locks are collected, they are * sorted and enqueued in the proper order. */ - int (*cio_lock) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_lock)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize unlocking. * @@ -2089,8 +2099,8 @@ struct cl_io_operations { * Called top-to-bottom at the end of io loop. Here layer * might wait for an unfinished asynchronous io. */ - void (*cio_end) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_end)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Called bottom-to-top to notify layers that read/write IO * iteration finished, with \a nob bytes transferred. @@ -2101,8 +2111,8 @@ struct cl_io_operations { /** * Called once per io, bottom-to-top to release io resources. */ - void (*cio_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); } op[CIT_OP_NR]; struct { /** @@ -2222,7 +2232,7 @@ struct cl_io_lock_link { struct cl_lock *cill_lock; /** optional destructor */ void (*cill_fini)(const struct lu_env *env, - struct cl_io_lock_link *link); + struct cl_io_lock_link *link); }; /** @@ -2272,7 +2282,7 @@ enum cl_io_lock_dmd { CILR_MANDATORY = 0, /** Layers are free to decide between local and global locking. */ CILR_MAYBE, - /** Never lock: there is no cache (e.g., liblustre). */ + /** Never lock: there is no cache (e.g., lockless IO). */ CILR_NEVER }; @@ -2284,7 +2294,8 @@ enum cl_fsync_mode { /** discard all of dirty pages in a specific file range */ CL_FSYNC_DISCARD = 2, /** start writeback and make sure they have reached storage before - * return. OST_SYNC RPC must be issued and finished */ + * return. OST_SYNC RPC must be issued and finished + */ CL_FSYNC_ALL = 3 }; @@ -2403,7 +2414,8 @@ struct cl_io { /** @} cl_io */ /** \addtogroup cl_req cl_req - * @{ */ + * @{ + */ /** \struct cl_req * Transfer. * @@ -2582,7 +2594,8 @@ enum cache_stats_item { /** how many entities are in the cache right now */ CS_total, /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ + * evicted) right now + */ CS_busy, /** how many entities were created at all */ CS_create, @@ -2600,7 +2613,7 @@ struct cache_stats { }; /** These are not exported so far */ -void cache_stats_init (struct cache_stats *cs, const char *name); +void cache_stats_init(struct cache_stats *cs, const char *name); /** * Client-side site. This represents particular client stack. "Global" @@ -2613,7 +2626,7 @@ struct cl_site { * Statistical counters. Atomics do not scale, something better like * per-cpu counters is needed. * - * These are exported as /proc/fs/lustre/llite/.../site + * These are exported as /sys/kernel/debug/lustre/llite/.../site * * When interpreting keep in mind that both sub-locks (and sub-pages) * and top-locks (and top-pages) are accounted here. @@ -2624,8 +2637,8 @@ struct cl_site { atomic_t cs_locks_state[CLS_NR]; }; -int cl_site_init (struct cl_site *s, struct cl_device *top); -void cl_site_fini (struct cl_site *s); +int cl_site_init(struct cl_site *s, struct cl_device *top); +void cl_site_fini(struct cl_site *s); void cl_stack_fini(const struct lu_env *env, struct cl_device *cl); /** @@ -2653,7 +2666,7 @@ static inline int lu_device_is_cl(const struct lu_device *d) static inline struct cl_device *lu2cl_dev(const struct lu_device *d) { - LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); + LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d)); return container_of0(d, struct cl_device, cd_lu_dev); } @@ -2664,7 +2677,7 @@ static inline struct lu_device *cl2lu_dev(struct cl_device *d) static inline struct cl_object *lu2cl(const struct lu_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); return container_of0(o, struct cl_object, co_lu); } @@ -2681,7 +2694,7 @@ static inline struct cl_object *cl_object_next(const struct cl_object *obj) static inline struct cl_device *cl_object_device(const struct cl_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev); } @@ -2725,27 +2738,28 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, /** @} helpers */ /** \defgroup cl_object cl_object - * @{ */ -struct cl_object *cl_object_top (struct cl_object *o); + * @{ + */ +struct cl_object *cl_object_top(struct cl_object *o); struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, const struct cl_object_conf *c); int cl_object_header_init(struct cl_object_header *h); -void cl_object_put (const struct lu_env *env, struct cl_object *o); -void cl_object_get (struct cl_object *o); -void cl_object_attr_lock (struct cl_object *o); +void cl_object_put(const struct lu_env *env, struct cl_object *o); +void cl_object_get(struct cl_object *o); +void cl_object_attr_lock(struct cl_object *o); void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); -int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned valid); -int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, - struct ost_lvb *lvb); -int cl_conf_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf); -void cl_object_prune (const struct lu_env *env, struct cl_object *obj); -void cl_object_kill (const struct lu_env *env, struct cl_object *obj); +int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); +int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_attr *attr, unsigned valid); +int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, + struct ost_lvb *lvb); +int cl_conf_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_object_conf *conf); +void cl_object_prune(const struct lu_env *env, struct cl_object *obj); +void cl_object_kill(const struct lu_env *env, struct cl_object *obj); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2770,7 +2784,8 @@ static inline void *cl_object_page_slice(struct cl_object *clob, /** @} cl_object */ /** \defgroup cl_page cl_page - * @{ */ + * @{ + */ enum { CLP_GANG_OKAY = 0, CLP_GANG_RESCHED, @@ -2781,34 +2796,26 @@ enum { /* callback of cl_page_gang_lookup() */ typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *, struct cl_page *, void *); -int cl_page_gang_lookup (const struct lu_env *env, - struct cl_object *obj, - struct cl_io *io, - pgoff_t start, pgoff_t end, - cl_page_gang_cb_t cb, void *cbdata); -struct cl_page *cl_page_lookup (struct cl_object_header *hdr, - pgoff_t index); -struct cl_page *cl_page_find (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, - enum cl_page_type type); -struct cl_page *cl_page_find_sub (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, +int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io, pgoff_t start, pgoff_t end, + cl_page_gang_cb_t cb, void *cbdata); +struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index); +struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj, + pgoff_t idx, struct page *vmpage, + enum cl_page_type type); +struct cl_page *cl_page_find_sub(const struct lu_env *env, + struct cl_object *obj, + pgoff_t idx, struct page *vmpage, struct cl_page *parent); -void cl_page_get (struct cl_page *page); -void cl_page_put (const struct lu_env *env, - struct cl_page *page); -void cl_page_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -void cl_page_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -struct page *cl_page_vmpage (const struct lu_env *env, - struct cl_page *page); -struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); -struct cl_page *cl_page_top (struct cl_page *page); +void cl_page_get(struct cl_page *page); +void cl_page_put(const struct lu_env *env, struct cl_page *page); +void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer, + const struct cl_page *pg); +void cl_page_header_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_page *pg); +struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page); +struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj); +struct cl_page *cl_page_top(struct cl_page *page); const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype); @@ -2820,17 +2827,17 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page, */ /** @{ */ -int cl_page_own (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_own_try (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_assume (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_unassume (const struct lu_env *env, - struct cl_io *io, struct cl_page *pg); -void cl_page_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); +int cl_page_own(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_own_try(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_assume(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_unassume(const struct lu_env *env, + struct cl_io *io, struct cl_page *pg); +void cl_page_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io); /** @} ownership */ @@ -2841,19 +2848,19 @@ int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); * tracking transfer state. */ /** @{ */ -int cl_page_prep (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_completion (const struct lu_env *env, - struct cl_page *pg, enum cl_req_type crt, int ioret); -int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg, - enum cl_req_type crt); -int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_clip (const struct lu_env *env, struct cl_page *pg, - int from, int to); -int cl_page_cancel (const struct lu_env *env, struct cl_page *page); -int cl_page_flush (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); +int cl_page_prep(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_completion(const struct lu_env *env, + struct cl_page *pg, enum cl_req_type crt, int ioret); +int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg, + enum cl_req_type crt); +int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_clip(const struct lu_env *env, struct cl_page *pg, + int from, int to); +int cl_page_cancel(const struct lu_env *env, struct cl_page *page); +int cl_page_flush(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); /** @} transfer */ @@ -2862,24 +2869,22 @@ int cl_page_flush (const struct lu_env *env, struct cl_io *io, * Functions to discard, delete and export a cl_page. */ /** @{ */ -void cl_page_discard (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -void cl_page_delete (const struct lu_env *env, struct cl_page *pg); -int cl_page_unmap (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -int cl_page_is_vmlocked (const struct lu_env *env, - const struct cl_page *pg); -void cl_page_export (const struct lu_env *env, - struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -loff_t cl_offset (const struct cl_object *obj, pgoff_t idx); -pgoff_t cl_index (const struct cl_object *obj, loff_t offset); -int cl_page_size (const struct cl_object *obj); -int cl_pages_prune (const struct lu_env *env, struct cl_object *obj); - -void cl_lock_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_lock *lock); +void cl_page_discard(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +void cl_page_delete(const struct lu_env *env, struct cl_page *pg); +int cl_page_unmap(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); +void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); +int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); +pgoff_t cl_index(const struct cl_object *obj, loff_t offset); +int cl_page_size(const struct cl_object *obj); +int cl_pages_prune(const struct lu_env *env, struct cl_object *obj); + +void cl_lock_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_lock *lock); void cl_lock_descr_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock_descr *descr); @@ -2888,7 +2893,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie, /** @} cl_page */ /** \defgroup cl_lock cl_lock - * @{ */ + * @{ + */ struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, const struct cl_lock_descr *need, @@ -2917,19 +2923,19 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype); -void cl_lock_get (struct cl_lock *lock); -void cl_lock_get_trust (struct cl_lock *lock); -void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); +void cl_lock_get(struct cl_lock *lock); +void cl_lock_get_trust(struct cl_lock *lock); +void cl_lock_put(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); -void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); +void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock); int cl_lock_is_intransit(struct cl_lock *lock); @@ -2966,52 +2972,53 @@ int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, * * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD * - * @{ */ + * @{ + */ -int cl_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_unuse (const struct lu_env *env, struct cl_lock *lock); -int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, - struct cl_io *io, __u32 flags); -int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); -int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); -int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); +int cl_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_unuse(const struct lu_env *env, struct cl_lock *lock); +int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, + struct cl_io *io, __u32 flags); +int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock); +int cl_wait_try(const struct lu_env *env, struct cl_lock *lock); +int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic); /** @} statemachine */ -void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock, - enum cl_lock_state state); -int cl_queue_match (const struct list_head *queue, - const struct cl_lock_descr *need); - -void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_is_mutexed (struct cl_lock *lock); -int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); -int cl_lock_ext_match (const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_descr_match(const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need); -int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock, - const struct cl_lock_descr *desc); - -void cl_lock_closure_init (const struct lu_env *env, - struct cl_lock_closure *closure, - struct cl_lock *origin, int wait); -void cl_lock_closure_fini (struct cl_lock_closure *closure); -int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); -void cl_lock_disclosure (const struct lu_env *env, - struct cl_lock_closure *closure); -int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); +void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock, + enum cl_lock_state state); +int cl_queue_match(const struct list_head *queue, + const struct cl_lock_descr *need); + +void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_is_mutexed(struct cl_lock *lock); +int cl_lock_nr_mutexed(const struct lu_env *env); +int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_ext_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_descr_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need); +int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, + const struct cl_lock_descr *desc); + +void cl_lock_closure_init(const struct lu_env *env, + struct cl_lock_closure *closure, + struct cl_lock *origin, int wait); +void cl_lock_closure_fini(struct cl_lock_closure *closure); +int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); +void cl_lock_disclosure(const struct lu_env *env, + struct cl_lock_closure *closure); +int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); -void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); +void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error); void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); @@ -3019,39 +3026,40 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); /** @} cl_lock */ /** \defgroup cl_io cl_io - * @{ */ - -int cl_io_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_sub_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_rw_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, loff_t pos, size_t count); -int cl_io_loop (const struct lu_env *env, struct cl_io *io); - -void cl_io_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_iter_init (const struct lu_env *env, struct cl_io *io); -void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_lock (const struct lu_env *env, struct cl_io *io); -void cl_io_unlock (const struct lu_env *env, struct cl_io *io); -int cl_io_start (const struct lu_env *env, struct cl_io *io); -void cl_io_end (const struct lu_env *env, struct cl_io *io); -int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, - struct cl_io_lock_link *link); -int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr); -int cl_io_read_page (const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue); -int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - long timeout); -int cl_io_is_going (const struct lu_env *env); + * @{ + */ + +int cl_io_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, loff_t pos, size_t count); +int cl_io_loop(const struct lu_env *env, struct cl_io *io); + +void cl_io_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_iter_init(const struct lu_env *env, struct cl_io *io); +void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_lock(const struct lu_env *env, struct cl_io *io); +void cl_io_unlock(const struct lu_env *env, struct cl_io *io); +int cl_io_start(const struct lu_env *env, struct cl_io *io); +void cl_io_end(const struct lu_env *env, struct cl_io *io); +int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, + struct cl_io_lock_link *link); +int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, + struct cl_lock_descr *descr); +int cl_io_read_page(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue); +int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue, + long timeout); +int cl_io_is_going(const struct lu_env *env); /** * True, iff \a io is an O_APPEND write(2). @@ -3094,7 +3102,8 @@ do { \ /** @} cl_io */ /** \defgroup cl_page_list cl_page_list - * @{ */ + * @{ + */ /** * Last page in the page list. @@ -3117,40 +3126,41 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) #define cl_page_list_for_each_safe(page, temp, list) \ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) -void cl_page_list_init (struct cl_page_list *plist); -void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); -void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); -void cl_page_list_splice (struct cl_page_list *list, - struct cl_page_list *head); -void cl_page_list_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); - -void cl_2queue_init (struct cl_2queue *queue); -void cl_2queue_disown (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_discard (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue); +void cl_page_list_init(struct cl_page_list *plist); +void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page); +void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, + struct cl_page *page); +void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head); +void cl_page_list_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page_list *plist); + +void cl_2queue_init(struct cl_2queue *queue); +void cl_2queue_disown(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_discard(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue); void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ /** \defgroup cl_req cl_req - * @{ */ + * @{ + */ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, enum cl_req_type crt, int nr_objects); -void cl_req_page_add (const struct lu_env *env, struct cl_req *req, - struct cl_page *page); -void cl_req_page_done (const struct lu_env *env, struct cl_page *page); -int cl_req_prep (const struct lu_env *env, struct cl_req *req); -void cl_req_attr_set (const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, u64 flags); +void cl_req_page_add(const struct lu_env *env, struct cl_req *req, + struct cl_page *page); +void cl_req_page_done(const struct lu_env *env, struct cl_page *page); +int cl_req_prep(const struct lu_env *env, struct cl_req *req); +void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, + struct cl_req_attr *attr, u64 flags); void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); /** \defgroup cl_sync_io cl_sync_io - * @{ */ + * @{ + */ /** * Anchor for synchronous transfer. This is allocated on a stack by thread @@ -3214,22 +3224,23 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); * - cl_env_reexit(cl_env_reenter had to be called priorly) * * \see lu_env, lu_context, lu_context_key - * @{ */ + * @{ + */ struct cl_env_nest { int cen_refcheck; void *cen_cookie; }; -struct lu_env *cl_env_get (int *refcheck); -struct lu_env *cl_env_alloc (int *refcheck, __u32 tags); -struct lu_env *cl_env_nested_get (struct cl_env_nest *nest); -void cl_env_put (struct lu_env *env, int *refcheck); -void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env); -void *cl_env_reenter (void); -void cl_env_reexit (void *cookie); -void cl_env_implant (struct lu_env *env, int *refcheck); -void cl_env_unplant (struct lu_env *env, int *refcheck); +struct lu_env *cl_env_get(int *refcheck); +struct lu_env *cl_env_alloc(int *refcheck, __u32 tags); +struct lu_env *cl_env_nested_get(struct cl_env_nest *nest); +void cl_env_put(struct lu_env *env, int *refcheck); +void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env); +void *cl_env_reenter(void); +void cl_env_reexit(void *cookie); +void cl_env_implant(struct lu_env *env, int *refcheck); +void cl_env_unplant(struct lu_env *env, int *refcheck); /** @} cl_env */ diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h index 36e7a6767..5d839a9f7 100644 --- a/drivers/staging/lustre/lustre/include/lclient.h +++ b/drivers/staging/lustre/lustre/include/lclient.h @@ -127,7 +127,7 @@ static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env) struct ccc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &ccc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -156,7 +156,7 @@ static inline struct ccc_session *ccc_env_session(const struct lu_env *env) struct ccc_session *ses; ses = lu_context_key_get(env->le_ses, &ccc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -383,7 +383,8 @@ void cl_put_grouplock(struct ccc_grouplock *cg); * * NB: If you find you have to use these interfaces for your new code, please * think about it again. These interfaces may be removed in the future for - * better layering. */ + * better layering. + */ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); int lov_read_and_clear_async_rc(struct cl_object *clob); diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h index 33e0b99e1..c6c7f5463 100644 --- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h +++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h @@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) return; if (PagePrivate(page)) - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); cancel_dirty_page(page); ClearPageMappedToDisk(page); diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h index 468bc28be..3907bf4ce 100644 --- a/drivers/staging/lustre/lustre/include/linux/obd.h +++ b/drivers/staging/lustre/lustre/include/linux/obd.h @@ -57,23 +57,23 @@ struct ll_iattr { #define CLIENT_OBD_LIST_LOCK_DEBUG 1 -typedef struct { +struct client_obd_lock { spinlock_t lock; unsigned long time; struct task_struct *task; const char *func; int line; -} client_obd_lock_t; +}; -static inline void __client_obd_list_lock(client_obd_lock_t *lock, +static inline void __client_obd_list_lock(struct client_obd_lock *lock, const char *func, int line) { unsigned long cur = jiffies; while (1) { if (spin_trylock(&lock->lock)) { - LASSERT(lock->task == NULL); + LASSERT(!lock->task); lock->task = current; lock->func = func; lock->line = line; @@ -85,7 +85,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, time_before(lock->time + 5 * HZ, jiffies)) { struct task_struct *task = lock->task; - if (task == NULL) + if (!task) continue; LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n", @@ -106,20 +106,20 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, #define client_obd_list_lock(lock) \ __client_obd_list_lock(lock, __func__, __LINE__) -static inline void client_obd_list_unlock(client_obd_lock_t *lock) +static inline void client_obd_list_unlock(struct client_obd_lock *lock) { - LASSERT(lock->task != NULL); + LASSERT(lock->task); lock->task = NULL; lock->time = jiffies; spin_unlock(&lock->lock); } -static inline void client_obd_list_lock_init(client_obd_lock_t *lock) +static inline void client_obd_list_lock_init(struct client_obd_lock *lock) { spin_lock_init(&lock->lock); } -static inline void client_obd_list_lock_done(client_obd_lock_t *lock) +static inline void client_obd_list_lock_done(struct client_obd_lock *lock) {} #endif /* __LINUX_OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h index 0ac8e0edc..4146c9c39 100644 --- a/drivers/staging/lustre/lustre/include/lprocfs_status.h +++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h @@ -54,7 +54,7 @@ struct lprocfs_vars { struct file_operations *fops; void *data; /** - * /proc file mode. + * sysfs file mode. */ umode_t proc_mode; }; @@ -175,7 +175,8 @@ struct lprocfs_percpu { enum lprocfs_stats_flags { LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu - * area and need locking */ + * area and need locking + */ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */ }; @@ -196,7 +197,8 @@ struct lprocfs_stats { unsigned short ls_biggest_alloc_num; enum lprocfs_stats_flags ls_flags; /* Lock used when there are no percpu stats areas; For percpu stats, - * it is used to protect ls_biggest_alloc_num change */ + * it is used to protect ls_biggest_alloc_num change + */ spinlock_t ls_lock; /* has ls_num of counter headers */ @@ -274,20 +276,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(OST)); } else if (opc < FLD_LAST_OPC) { /* FLD opcode */ - return (opc - FLD_FIRST_OPC + - OPC_RANGE(SEC) + - OPC_RANGE(SEQ) + - OPC_RANGE(QUOTA) + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < UPDATE_LAST_OPC) { - /* update opcode */ - return (opc - UPDATE_FIRST_OPC + - OPC_RANGE(FLD) + + return (opc - FLD_FIRST_OPC + OPC_RANGE(SEC) + OPC_RANGE(SEQ) + OPC_RANGE(QUOTA) + @@ -312,8 +301,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(SEC) + \ OPC_RANGE(SEQ) + \ OPC_RANGE(SEC) + \ - OPC_RANGE(FLD) + \ - OPC_RANGE(UPDATE)) + OPC_RANGE(FLD)) #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \ OPC_RANGE(EXTRA)) @@ -407,7 +395,7 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc, } else { unsigned int cpuid = get_cpu(); - if (unlikely(stats->ls_percpu[cpuid] == NULL)) { + if (unlikely(!stats->ls_percpu[cpuid])) { rc = lprocfs_stats_alloc_one(stats, cpuid); if (rc < 0) { put_cpu(); @@ -438,12 +426,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_SMP_ID: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } else { put_cpu(); } @@ -451,12 +437,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_NUM_CPU: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } return; } @@ -521,11 +505,11 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, unsigned long flags = 0; __u64 ret = 0; - LASSERT(stats != NULL); + LASSERT(stats); num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_cpu; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; ret += lprocfs_read_helper( lprocfs_stats_counter_get(stats, i, idx), @@ -608,7 +592,7 @@ int lprocfs_write_helper(const char __user *buffer, unsigned long count, int *val); int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, __u64 *val); -int lprocfs_write_frac_u64_helper(const char *buffer, +int lprocfs_write_frac_u64_helper(const char __user *buffer, unsigned long count, __u64 *val, int mult); char *lprocfs_find_named_value(const char *buffer, const char *name, @@ -625,9 +609,10 @@ int lprocfs_single_release(struct inode *, struct file *); int lprocfs_seq_release(struct inode *, struct file *); /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only - proc entries; otherwise, you will define name##_seq_write function also for - a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, - call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */ + * proc entries; otherwise, you will define name##_seq_write function also for + * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, + * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); + */ #define __LPROC_SEQ_FOPS(name, custom_seq_write) \ static int name##_single_open(struct inode *inode, struct file *file) \ { \ diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h index 1d79341a4..242bb1ef6 100644 --- a/drivers/staging/lustre/lustre/include/lu_object.h +++ b/drivers/staging/lustre/lustre/include/lu_object.h @@ -164,11 +164,12 @@ struct lu_device_operations { /** * For lu_object_conf flags */ -typedef enum { +enum loc_flags { /* This is a new object to be allocated, or the file - * corresponding to the object does not exists. */ + * corresponding to the object does not exists. + */ LOC_F_NEW = 0x00000001, -} loc_flags_t; +}; /** * Object configuration, describing particulars of object being created. On @@ -179,7 +180,7 @@ struct lu_object_conf { /** * Some hints for obj find and alloc. */ - loc_flags_t loc_flags; + enum loc_flags loc_flags; }; /** @@ -392,7 +393,7 @@ struct lu_device_type_operations { static inline int lu_device_is_md(const struct lu_device *d) { - return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD); + return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD); } /** @@ -488,7 +489,7 @@ enum lu_object_header_flags { /** * Mark this object has already been taken out of cache. */ - LU_OBJECT_UNHASHED = 1 + LU_OBJECT_UNHASHED = 1, }; enum lu_object_header_attr { @@ -756,7 +757,7 @@ static inline const struct lu_fid *lu_object_fid(const struct lu_object *o) /** * return device operations vector for this object */ -static const inline struct lu_device_operations * +static inline const struct lu_device_operations * lu_object_ops(const struct lu_object *o) { return o->lo_dev->ld_ops; @@ -895,7 +896,8 @@ enum lu_xattr_flags { /** @} helpers */ /** \name lu_context - * @{ */ + * @{ + */ /** For lu_context health-checks */ enum lu_context_state { @@ -1116,10 +1118,10 @@ struct lu_context_key { { \ type *value; \ \ - CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ + CLASSERT(PAGE_SIZE >= sizeof (*value)); \ \ value = kzalloc(sizeof(*value), GFP_NOFS); \ - if (value == NULL) \ + if (!value) \ value = ERR_PTR(-ENOMEM); \ \ return value; \ @@ -1174,7 +1176,7 @@ void lu_context_key_revive (struct lu_context_key *key); do { \ LU_CONTEXT_KEY_INIT(key); \ key = va_arg(args, struct lu_context_key *); \ - } while (key != NULL); \ + } while (key); \ va_end(args); \ } diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h index 97cd157dd..f7dfd8395 100644 --- a/drivers/staging/lustre/lustre/include/lu_ref.h +++ b/drivers/staging/lustre/lustre/include/lu_ref.h @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #ifndef __LUSTRE_LU_REF_H diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h index 09088f40b..07d45de69 100644 --- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h +++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h @@ -47,9 +47,11 @@ struct ll_fiemap_extent { __u64 fe_logical; /* logical offset in bytes for the start of - * the extent from the beginning of the file */ + * the extent from the beginning of the file + */ __u64 fe_physical; /* physical offset in bytes for the start - * of the extent from the beginning of the disk */ + * of the extent from the beginning of the disk + */ __u64 fe_length; /* length in bytes for this extent */ __u64 fe_reserved64[2]; __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ @@ -59,9 +61,11 @@ struct ll_fiemap_extent { struct ll_user_fiemap { __u64 fm_start; /* logical offset (inclusive) at - * which to start mapping (in) */ + * which to start mapping (in) + */ __u64 fm_length; /* logical length of mapping which - * userspace wants (in) */ + * userspace wants (in) + */ __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ __u32 fm_extent_count; /* size of fm_extents array (in) */ @@ -71,28 +75,38 @@ struct ll_user_fiemap { #define FIEMAP_MAX_OFFSET (~0ULL) -#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ -#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ - -#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ -#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ -#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. - * Sets EXTENT_UNKNOWN. */ -#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read - * while fs is unmounted */ -#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. - * Sets EXTENT_NO_DIRECT. */ +#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before + * map + */ +#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute + * tree + */ +#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ +#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ +#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. + * Sets EXTENT_UNKNOWN. + */ +#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read + * while fs is unmounted + */ +#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. + * Sets EXTENT_NO_DIRECT. + */ #define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be - * block aligned. */ + * block aligned. + */ #define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata. * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. - * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but - * no data (i.e. zero). */ -#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively +#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. + * Sets EXTENT_NOT_ALIGNED. + */ +#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but + * no data (i.e. zero). + */ +#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively * support extents. Result - * merged for efficiency. */ + * merged for efficiency. + */ static inline size_t fiemap_count_to_size(size_t extent_count) { @@ -114,7 +128,8 @@ static inline unsigned fiemap_size_to_count(size_t array_size) /* Lustre specific flags - use a high bit, don't conflict with upstream flag */ #define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */ -#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. - * Sets NO_DIRECT flag */ +#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. + * Sets NO_DIRECT flag + */ #endif /* _LUSTRE_FIEMAP_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h deleted file mode 100644 index 93a3d7db3..000000000 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h +++ /dev/null @@ -1,2 +0,0 @@ -#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0" -#define LUSTRE_RELEASE 3.9.0_g6e62c21 diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index b064b5821..5aae1d06a 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -113,25 +113,25 @@ #define CONNMGR_REQUEST_PORTAL 1 #define CONNMGR_REPLY_PORTAL 2 -//#define OSC_REQUEST_PORTAL 3 +/*#define OSC_REQUEST_PORTAL 3 */ #define OSC_REPLY_PORTAL 4 -//#define OSC_BULK_PORTAL 5 +/*#define OSC_BULK_PORTAL 5 */ #define OST_IO_PORTAL 6 #define OST_CREATE_PORTAL 7 #define OST_BULK_PORTAL 8 -//#define MDC_REQUEST_PORTAL 9 +/*#define MDC_REQUEST_PORTAL 9 */ #define MDC_REPLY_PORTAL 10 -//#define MDC_BULK_PORTAL 11 +/*#define MDC_BULK_PORTAL 11 */ #define MDS_REQUEST_PORTAL 12 -//#define MDS_REPLY_PORTAL 13 +/*#define MDS_REPLY_PORTAL 13 */ #define MDS_BULK_PORTAL 14 #define LDLM_CB_REQUEST_PORTAL 15 #define LDLM_CB_REPLY_PORTAL 16 #define LDLM_CANCEL_REQUEST_PORTAL 17 #define LDLM_CANCEL_REPLY_PORTAL 18 -//#define PTLBD_REQUEST_PORTAL 19 -//#define PTLBD_REPLY_PORTAL 20 -//#define PTLBD_BULK_PORTAL 21 +/*#define PTLBD_REQUEST_PORTAL 19 */ +/*#define PTLBD_REPLY_PORTAL 20 */ +/*#define PTLBD_BULK_PORTAL 21 */ #define MDS_SETATTR_PORTAL 22 #define MDS_READPAGE_PORTAL 23 #define OUT_PORTAL 24 @@ -146,7 +146,9 @@ #define SEQ_CONTROLLER_PORTAL 32 #define MGS_BULK_PORTAL 33 -/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */ +/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, + * n8851@cray.com + */ /* packet types */ #define PTL_RPC_MSG_REQUEST 4711 @@ -295,7 +297,8 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, fld_range_is_mdt(range) ? "mdt" : "ost" /** \defgroup lu_fid lu_fid - * @{ */ + * @{ + */ /** * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. @@ -307,7 +310,8 @@ enum lma_compat { LMAC_SOM = 0x00000002, LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is - * under /O//d. */ + * under /O//d. + */ }; /** @@ -319,7 +323,8 @@ enum lma_incompat { LMAI_RELEASED = 0x00000001, /* file is released */ LMAI_AGENT = 0x00000002, /* agent inode */ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object - is on the remote MDT */ + * is on the remote MDT + */ }; #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) @@ -395,12 +400,14 @@ enum fid_seq { FID_SEQ_LOCAL_FILE = 0x200000001ULL, FID_SEQ_DOT_LUSTRE = 0x200000002ULL, /* sequence is used for local named objects FIDs generated - * by local_object_storage library */ + * by local_object_storage library + */ FID_SEQ_LOCAL_NAME = 0x200000003ULL, /* Because current FLD will only cache the fid sequence, instead * of oid on the client side, if the FID needs to be exposed to * clients sides, it needs to make sure all of fids under one - * sequence will be located in one MDT. */ + * sequence will be located in one MDT. + */ FID_SEQ_SPECIAL = 0x200000004ULL, FID_SEQ_QUOTA = 0x200000005ULL, FID_SEQ_QUOTA_GLB = 0x200000006ULL, @@ -601,7 +608,8 @@ static inline void ostid_set_seq(struct ost_id *oi, __u64 seq) oi->oi_fid.f_seq = seq; /* Note: if f_oid + f_ver is zero, we need init it * to be 1, otherwise, ostid_seq will treat this - * as old ostid (oi_seq == 0) */ + * as old ostid (oi_seq == 0) + */ if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0) oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID; } @@ -630,15 +638,13 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { if (fid_seq_is_mdt0(ostid_seq(oi))) { if (oid >= IDIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi.oi_id = oid; } else { if (oid > OBIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi_fid.f_oid = oid; @@ -689,11 +695,12 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, * that we map into the IDIF namespace. It allows up to 2^48 * objects per OST, as this is the object namespace that has * been in production for years. This can handle create rates - * of 1M objects/s/OST for 9 years, or combinations thereof. */ + * of 1M objects/s/OST for 9 years, or combinations thereof. + */ if (ostid_id(ostid) >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); - return -EBADF; + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; } fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); /* truncate to 32 bits by assignment */ @@ -704,10 +711,11 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, /* This is either an IDIF object, which identifies objects across * all OSTs, or a regular FID. The IDIF namespace maps legacy * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ + * pass the FID through, no conversion needed. + */ if (ostid->oi_fid.f_ver != 0) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); return -EBADF; } *fid = ostid->oi_fid; @@ -807,7 +815,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) static inline int fid_is_sane(const struct lu_fid *fid) { - return fid != NULL && + return fid && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || fid_is_igif(fid) || fid_is_idif(fid) || fid_seq_is_rsvd(fid_seq(fid))); @@ -868,7 +876,8 @@ static inline void ostid_le_to_cpu(const struct ost_id *src_oi, /** @} lu_fid */ /** \defgroup lu_dir lu_dir - * @{ */ + * @{ + */ /** * Enumeration of possible directory entry attributes. @@ -880,24 +889,8 @@ enum lu_dirent_attrs { LUDA_FID = 0x0001, LUDA_TYPE = 0x0002, LUDA_64BITHASH = 0x0004, - - /* The following attrs are used for MDT internal only, - * not visible to client */ - - /* Verify the dirent consistency */ - LUDA_VERIFY = 0x8000, - /* Only check but not repair the dirent inconsistency */ - LUDA_VERIFY_DRYRUN = 0x4000, - /* The dirent has been repaired, or to be repaired (dryrun). */ - LUDA_REPAIR = 0x2000, - /* The system is upgraded, has beed or to be repaired (dryrun). */ - LUDA_UPGRADE = 0x1000, - /* Ignore this record, go to next directly. */ - LUDA_IGNORE = 0x0800, }; -#define LU_DIRENT_ATTRS_MASK 0xf800 - /** * Layout of readdir pages, as transmitted on wire. */ @@ -1029,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent) * MDS_READPAGE page size * * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than PAGE_CACHE_SIZE because the client needs to + * It's different than PAGE_SIZE because the client needs to * access the struct lu_dirpage header packed at the beginning of * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. + * lu_dirpage header is if client and server PAGE_SIZE differ. */ #define LU_PAGE_SHIFT 12 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) -#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) +#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT)) /** @} lu_dir */ @@ -1128,7 +1121,8 @@ struct ptlrpc_body_v2 { __u32 pb_conn_cnt; __u32 pb_timeout; /* for req, the deadline, for rep, the service est */ __u32 pb_service_time; /* for rep, actual service time, also used for - net_latency of req */ + * net_latency of req + */ __u32 pb_limit; __u64 pb_slv; /* VBR: pre-versions */ @@ -1174,7 +1168,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* #define MSG_AT_SUPPORT 0x0008 * This was used in early prototypes of adaptive timeouts, and while there * shouldn't be any users of that code there also isn't a need for using this - * bits. Defer usage until at least 1.10 to avoid potential conflict. */ + * bits. Defer usage until at least 1.10 to avoid potential conflict. + */ #define MSG_DELAY_REPLAY 0x0010 #define MSG_VERSION_REPLAY 0x0020 #define MSG_REQ_REPLAY_DONE 0x0040 @@ -1187,7 +1182,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_RECOVERING 0x00000001 #define MSG_CONNECT_RECONNECT 0x00000002 #define MSG_CONNECT_REPLAYABLE 0x00000004 -//#define MSG_CONNECT_PEER 0x8 +/*#define MSG_CONNECT_PEER 0x8 */ #define MSG_CONNECT_LIBCLIENT 0x00000010 #define MSG_CONNECT_INITIAL 0x00000020 #define MSG_CONNECT_ASYNC 0x00000040 @@ -1195,60 +1190,65 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */ /* Connect flags */ -#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ -#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ -#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ -#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ -#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ -#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ -#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ -#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ -#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ +#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ +#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ +#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ +#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ +#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ +#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ +#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ +#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ +#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/ -#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ -#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ -#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ +#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ +#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ +#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated. *We do not support JOIN FILE *anymore, reserve this flags *just for preventing such bit - *to be reused.*/ -#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ -#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ -#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ + *to be reused. + */ +#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ +#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ +#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */ -#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ -#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ -#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ -#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ -#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ +#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ +#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ +#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ +#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ +#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ +#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ +#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ -#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ +#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ -#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ -#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ -#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ +#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ +#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ +#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ +#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */ #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */ #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */ #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */ #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */ #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits - * directory hash */ + * directory hash + */ #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */ #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */ #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */ #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */ #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS - * RPC error properly */ + * RPC error properly + */ #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for - * finer space reservation */ + * finer space reservation + */ #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8 - * policy and 2.x server */ + * policy and 2.x server + */ #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */ #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */ #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */ @@ -1264,61 +1264,19 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * submit a small patch against EVERY branch that ONLY adds the new flag, * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the * flag to check_obd_connect_data(), and updates wiretests accordingly, so it - * can be approved and landed easily to reserve the flag for future use. */ + * can be approved and landed easily to reserve the flag for future use. + */ /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS * connection. It is a temporary bug fix for Imperative Recovery interop * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for - * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */ + * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. + */ #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS #define OCD_HAS_FLAG(ocd, flg) \ (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg)) -#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE - -#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \ - OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \ - OBD_CONNECT_IBITS | \ - OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \ - OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \ - OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \ - OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \ - OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \ - OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ - OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ - OBD_CONNECT_FLOCK_DEAD | \ - OBD_CONNECT_DISP_STRIPE) - -#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ - OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ - OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \ - OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \ - OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \ - OBD_CONNECT_MAX_EASIZE | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ - OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ - OBD_CONNECT_PINGLESS) -#define ECHO_CONNECT_SUPPORTED (0) -#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ - OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ - OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS) - /* Features required for this version of the client to work with server */ #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ OBD_CONNECT_FULL20) @@ -1334,7 +1292,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* This structure is used for both request and reply. * * If we eventually have separate connect data for different types, which we - * almost certainly will, then perhaps we stick a union in here. */ + * almost certainly will, then perhaps we stick a union in here. + */ struct obd_connect_data_v1 { __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ __u32 ocd_version; /* lustre release version number */ @@ -1364,7 +1323,7 @@ struct obd_connect_data { __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */ - __u32 ocd_unused; /* also fix lustre_swab_connect */ + __u32 ocd_unused; /* also fix lustre_swab_connect */ __u64 ocd_transno; /* first transno from client to be replayed */ __u32 ocd_group; /* MDS group on OST */ __u32 ocd_cksum_types; /* supported checksum algorithms */ @@ -1374,7 +1333,8 @@ struct obd_connect_data { /* Fields after ocd_maxbytes are only accessible by the receiver * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. */ + * may result in out-of-bound memory access and kernel oops. + */ __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ @@ -1398,7 +1358,8 @@ struct obd_connect_data { * with senior engineers before starting to use a new field. Then, submit * a small patch against EVERY branch that ONLY adds the new field along with * the matching OBD_CONNECT flag, so that can be approved and landed easily to - * reserve the flag for future use. */ + * reserve the flag for future use. + */ void lustre_swab_connect(struct obd_connect_data *ocd); @@ -1408,18 +1369,18 @@ void lustre_swab_connect(struct obd_connect_data *ocd); * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new * algorithm and also the OBD_FL_CKSUM* flags. */ -typedef enum { +enum cksum_type { OBD_CKSUM_CRC32 = 0x00000001, OBD_CKSUM_ADLER = 0x00000002, OBD_CKSUM_CRC32C = 0x00000004, -} cksum_type_t; +}; /* * OST requests: OBDO & OBD request records */ /* opcodes */ -typedef enum { +enum ost_cmd { OST_REPLY = 0, /* reply ? */ OST_GETATTR = 1, OST_SETATTR = 2, @@ -1440,14 +1401,14 @@ typedef enum { OST_QUOTACTL = 19, OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ OST_LAST_OPC -} ost_cmd_t; +}; #define OST_FIRST_OPC OST_REPLY enum obdo_flags { OBD_FL_INLINEDATA = 0x00000001, OBD_FL_OBDMDEXISTS = 0x00000002, OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */ - OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ + OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/ OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */ OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */ @@ -1461,14 +1422,16 @@ enum obdo_flags { OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */ OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */ OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */ - OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. + OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. * XXX: obsoleted - reserved for old - * clients prior than 2.2 */ + * clients prior than 2.2 + */ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ /* Note that while these checksum values are currently separate bits, - * in 2.x we can actually allow all values from 1-31 if we wanted. */ + * in 2.x we can actually allow all values from 1-31 if we wanted. + */ OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER | OBD_FL_CKSUM_CRC32C, @@ -1657,7 +1620,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) } } -#define OBD_MD_FLID (0x00000001ULL) /* object ID */ +#define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */ #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */ @@ -1683,22 +1646,23 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ - /* ->mds if epoch opens or closes */ + /* ->mds if epoch opens or closes + */ #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */ #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */ #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */ -#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ +#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ -#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ +#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */ #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */ -#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ +#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */ #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */ #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */ @@ -1707,7 +1671,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes * under lock; for xattr * requests means the - * client holds the lock */ + * client holds the lock + */ #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */ #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ @@ -1727,7 +1692,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS) /* don't forget obdo_fid which is way down at the bottom so it can - * come after the definition of llog_cookie */ + * come after the definition of llog_cookie + */ enum hss_valid { HSS_SETMASK = 0x01, @@ -1749,19 +1715,20 @@ void lustre_swab_obd_statfs(struct obd_statfs *os); /* ost_body.data values for OST_BRW */ -#define OBD_BRW_READ 0x01 -#define OBD_BRW_WRITE 0x02 -#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) -#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous +#define OBD_BRW_READ 0x01 +#define OBD_BRW_WRITE 0x02 +#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) +#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous * transfer and is not accounted in - * the grant. */ -#define OBD_BRW_CHECK 0x10 + * the grant. + */ +#define OBD_BRW_CHECK 0x10 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ -#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ -#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ -#define OBD_BRW_NOQUOTA 0x100 -#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ -#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ +#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ +#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ +#define OBD_BRW_NOQUOTA 0x100 +#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ +#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ @@ -1775,7 +1742,8 @@ struct obd_ioobj { struct ost_id ioo_oid; /* object ID, if multi-obj BRW */ __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4, * now (PTLRPC_BULK_OPS_COUNT - 1) in - * high 16 bits in 2.4 and later */ + * high 16 bits in 2.4 and later + */ __u32 ioo_bufcnt; /* number of niobufs for this object */ }; @@ -1799,7 +1767,8 @@ void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); /* lock value block communicated between the filter and llite */ /* OST_LVB_ERR_INIT is needed because the return code in rc is - * negative, i.e. because ((MASK + rc) & MASK) != MASK. */ + * negative, i.e. because ((MASK + rc) & MASK) != MASK. + */ #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL #define OST_LVB_IS_ERR(blocks) \ @@ -1836,23 +1805,12 @@ void lustre_swab_ost_lvb(struct ost_lvb *lvb); * lquota data structures */ -#ifndef QUOTABLOCK_BITS -#define QUOTABLOCK_BITS 10 -#endif - -#ifndef QUOTABLOCK_SIZE -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) -#endif - -#ifndef toqb -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) -#endif - /* The lquota_id structure is an union of all the possible identifier types that * can be used with quota, this includes: * - 64-bit user ID * - 64-bit group ID - * - a FID which can be used for per-directory quota in the future */ + * - a FID which can be used for per-directory quota in the future + */ union lquota_id { struct lu_fid qid_fid; /* FID for per-directory quota */ __u64 qid_uid; /* user identifier */ @@ -1889,89 +1847,6 @@ do { \ Q_COPY(out, in, qc_dqblk); \ } while (0) -/* Body of quota request used for quota acquire/release RPCs between quota - * master (aka QMT) and slaves (ak QSD). */ -struct quota_body { - struct lu_fid qb_fid; /* FID of global index packing the pool ID - * and type (data or metadata) as well as - * the quota type (user or group). */ - union lquota_id qb_id; /* uid or gid or directory FID */ - __u32 qb_flags; /* see below */ - __u32 qb_padding; - __u64 qb_count; /* acquire/release count (kbytes/inodes) */ - __u64 qb_usage; /* current slave usage (kbytes/inodes) */ - __u64 qb_slv_ver; /* slave index file version */ - struct lustre_handle qb_lockh; /* per-ID lock handle */ - struct lustre_handle qb_glb_lockh; /* global lock handle */ - __u64 qb_padding1[4]; -}; - -/* When the quota_body is used in the reply of quota global intent - * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */ -#define qb_slv_fid qb_fid -/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in - * quota reply */ -#define qb_qunit qb_usage - -#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */ -#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */ -#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */ -#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */ - -void lustre_swab_quota_body(struct quota_body *b); - -/* Quota types currently supported */ -enum { - LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */ - LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */ - LQUOTA_TYPE_MAX -}; - -/* There are 2 different resource types on which a quota limit can be enforced: - * - inodes on the MDTs - * - blocks on the OSTs */ -enum { - LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */ - LQUOTA_RES_DT = 0x02, - LQUOTA_LAST_RES, - LQUOTA_FIRST_RES = LQUOTA_RES_MD -}; - -#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1) - -/* - * Space accounting support - * Format of an accounting record, providing disk usage information for a given - * user or group - */ -struct lquota_acct_rec { /* 16 bytes */ - __u64 bspace; /* current space in use */ - __u64 ispace; /* current # inodes in use */ -}; - -/* - * Global quota index support - * Format of a global record, providing global quota settings for a given quota - * identifier - */ -struct lquota_glb_rec { /* 32 bytes */ - __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */ - __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */ - __u64 qbr_time; /* grace time, in seconds */ - __u64 qbr_granted; /* how much is granted to slaves, in #inodes or - * kbytes */ -}; - -/* - * Slave index support - * Format of a slave record, recording how much space is granted to a given - * slave - */ -struct lquota_slv_rec { /* 8 bytes */ - __u64 qsr_granted; /* space granted to the slave for the key=ID, - * in #inodes or kbytes */ -}; - /* Data structures associated with the quota locks */ /* Glimpse descriptor used for the index & per-ID quota locks */ @@ -1985,9 +1860,6 @@ struct ldlm_gl_lquota_desc { __u64 gl_pad2; }; -#define gl_qunit gl_hardlimit /* current qunit value used when - * glimpsing per-ID quota locks */ - /* quota glimpse flags */ #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */ @@ -2002,15 +1874,12 @@ struct lquota_lvb { void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); -/* LVB used with global quota lock */ -#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */ - /* op codes */ -typedef enum { +enum quota_cmd { QUOTA_DQACQ = 601, QUOTA_DQREL = 602, QUOTA_LAST_OPC -} quota_cmd_t; +}; #define QUOTA_FIRST_OPC QUOTA_DQACQ /* @@ -2018,7 +1887,7 @@ typedef enum { */ /* opcodes */ -typedef enum { +enum mds_cmd { MDS_GETATTR = 33, MDS_GETATTR_NAME = 34, MDS_CLOSE = 35, @@ -2049,23 +1918,15 @@ typedef enum { MDS_HSM_CT_UNREGISTER = 60, MDS_SWAP_LAYOUTS = 61, MDS_LAST_OPC -} mds_cmd_t; +}; #define MDS_FIRST_OPC MDS_GETATTR -/* opcodes for object update */ -typedef enum { - UPDATE_OBJ = 1000, - UPDATE_LAST_OPC -} update_cmd_t; - -#define UPDATE_FIRST_OPC UPDATE_OBJ - /* * Do not exceed 63 */ -typedef enum { +enum mdt_reint_cmd { REINT_SETATTR = 1, REINT_CREATE = 2, REINT_LINK = 3, @@ -2074,9 +1935,9 @@ typedef enum { REINT_OPEN = 6, REINT_SETXATTR = 7, REINT_RMENTRY = 8, -// REINT_WRITE = 9, +/* REINT_WRITE = 9, */ REINT_MAX -} mds_reint_t, mdt_reint_t; +}; void lustre_swab_generic_32s(__u32 *val); @@ -2097,7 +1958,8 @@ void lustre_swab_generic_32s(__u32 *val); /* INODE LOCK PARTS */ #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also * was used to protect permission (mode, - * owner, group etc) before 2.4. */ + * owner, group etc) before 2.4. + */ #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ @@ -2110,7 +1972,8 @@ void lustre_swab_generic_32s(__u32 *val); * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together. * For Remote directory, the master MDT, where the remote directory is, will * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is, - * will grant LOOKUP_LOCK. */ + * will grant LOOKUP_LOCK. + */ #define MDS_INODELOCK_PERM 0x000010 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ @@ -2120,7 +1983,8 @@ void lustre_swab_generic_32s(__u32 *val); /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2], * but was moved into name[1] along with the OID to avoid consuming the - * name[2,3] fields that need to be used for the quota id (also a FID). */ + * name[2,3] fields that need to be used for the quota id (also a FID). + */ enum { LUSTRE_RES_ID_SEQ_OFF = 0, LUSTRE_RES_ID_VER_OID_OFF = 1, @@ -2156,7 +2020,8 @@ enum md_op_flags { #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1 /* these should be identical to their EXT4_*_FL counterparts, they are - * redefined here only to avoid dragging in fs/ext4/ext4.h */ + * redefined here only to avoid dragging in fs/ext4/ext4.h + */ #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */ #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */ @@ -2168,15 +2033,14 @@ enum md_op_flags { * protocol equivalents of LDISKFS_*_FL values stored on disk, while * the S_* flags are kernel-internal values that change between kernel * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS. - * See b=16526 for a full history. */ + * See b=16526 for a full history. + */ static inline int ll_ext_to_inode_flags(int flags) { return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) | ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) | ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) | -#if defined(S_DIRSYNC) ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) | -#endif ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0)); } @@ -2185,9 +2049,7 @@ static inline int ll_inode_to_ext_flags(int iflags) return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) | ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) | ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) | -#if defined(S_DIRSYNC) ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) | -#endif ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0)); } @@ -2207,9 +2069,10 @@ struct mdt_body { __s64 ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ __u64 ioepoch; - __u64 t_state; /* transient file state defined in - * enum md_transient_state - * was "ino" until 2.4.0 */ + __u64 t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 + */ __u32 fsuid; __u32 fsgid; __u32 capability; @@ -2219,7 +2082,7 @@ struct mdt_body { __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 unused2; /* was "generation" until 2.4.0 */ + __u32 unused2; /* was "generation" until 2.4.0 */ __u32 suppgid; __u32 eadatasize; __u32 aclsize; @@ -2256,7 +2119,8 @@ enum { }; /* inode access permission for remote user, the inode info are omitted, - * for client knows them. */ + * for client knows them. + */ struct mdt_remote_perm { __u32 rp_uid; __u32 rp_gid; @@ -2306,13 +2170,13 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * since the client and MDS may run different kernels (see bug 13828) * Therefore, we should only use MDS_ATTR_* attributes for sa_valid. */ -#define MDS_ATTR_MODE 0x1ULL /* = 1 */ -#define MDS_ATTR_UID 0x2ULL /* = 2 */ -#define MDS_ATTR_GID 0x4ULL /* = 4 */ -#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ -#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ -#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ -#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ +#define MDS_ATTR_MODE 0x1ULL /* = 1 */ +#define MDS_ATTR_UID 0x2ULL /* = 2 */ +#define MDS_ATTR_GID 0x4ULL /* = 4 */ +#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ +#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ +#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ +#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */ #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */ #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */ @@ -2320,14 +2184,11 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */ #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */ #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */ -#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */ +#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, + * ie O_TRUNC + */ #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */ -#ifndef FMODE_READ -#define FMODE_READ 00000001 -#define FMODE_WRITE 00000002 -#endif - #define MDS_FMODE_CLOSED 00000000 #define MDS_FMODE_EXEC 00000004 /* IO Epoch is opened on a closed file. */ @@ -2354,9 +2215,10 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * We do not support JOIN FILE * anymore, reserve this flags * just for preventing such bit - * to be reused. */ + * to be reused. + */ -#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ +#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */ #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */ #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */ @@ -2409,7 +2271,8 @@ struct mdt_rec_create { __u32 cr_bias; /* use of helpers set/get_mrc_cr_flags() is needed to access * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to - * extend cr_flags size without breaking 1.8 compat */ + * extend cr_flags size without breaking 1.8 compat + */ __u32 cr_flags_l; /* for use with open, low 32 bits */ __u32 cr_flags_h; /* for use with open, high 32 bits */ __u32 cr_umask; /* umask for create */ @@ -2630,7 +2493,8 @@ enum seq_op { #define LOV_MAX_UUID_BUFFER_SIZE 8192 /* The size of the buffer the lov/mdc reserves for the * array of UUIDs returned by the MDS. With the current - * protocol, this will limit the max number of OSTs per LOV */ + * protocol, this will limit the max number of OSTs per LOV + */ #define LOV_DESC_MAGIC 0xB0CCDE5C #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ @@ -2639,13 +2503,13 @@ enum seq_op { /* LOV settings descriptor (should only contain static info) */ struct lov_desc { __u32 ld_tgt_count; /* how many OBD's */ - __u32 ld_active_tgt_count; /* how many active */ - __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default PATTERN_RAID0 */ - __u64 ld_default_stripe_size; /* in bytes */ - __u64 ld_default_stripe_offset; /* in bytes */ + __u32 ld_active_tgt_count; /* how many active */ + __u32 ld_default_stripe_count; /* how many objects are used */ + __u32 ld_pattern; /* default PATTERN_RAID0 */ + __u64 ld_default_stripe_size; /* in bytes */ + __u64 ld_default_stripe_offset; /* in bytes */ __u32 ld_padding_0; /* unused */ - __u32 ld_qos_maxage; /* in second */ + __u32 ld_qos_maxage; /* in second */ __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */ __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */ struct obd_uuid ld_uuid; @@ -2659,7 +2523,7 @@ void lustre_swab_lov_desc(struct lov_desc *ld); * LDLM requests: */ /* opcodes -- MUST be distinct from OST/MDS opcodes */ -typedef enum { +enum ldlm_cmd { LDLM_ENQUEUE = 101, LDLM_CONVERT = 102, LDLM_CANCEL = 103, @@ -2668,7 +2532,7 @@ typedef enum { LDLM_GL_CALLBACK = 106, LDLM_SET_INFO = 107, LDLM_LAST_OPC -} ldlm_cmd_t; +}; #define LDLM_FIRST_OPC LDLM_ENQUEUE #define RES_NAME_SIZE 4 @@ -2687,7 +2551,7 @@ static inline int ldlm_res_eq(const struct ldlm_res_id *res0, } /* lock types */ -typedef enum { +enum ldlm_mode { LCK_MINMODE = 0, LCK_EX = 1, LCK_PW = 2, @@ -2698,17 +2562,17 @@ typedef enum { LCK_GROUP = 64, LCK_COS = 128, LCK_MAXMODE -} ldlm_mode_t; +}; #define LCK_MODE_NUM 8 -typedef enum { +enum ldlm_type { LDLM_PLAIN = 10, LDLM_EXTENT = 11, LDLM_FLOCK = 12, LDLM_IBITS = 13, LDLM_MAX_TYPE -} ldlm_type_t; +}; #define LDLM_MIN_TYPE LDLM_PLAIN @@ -2747,7 +2611,8 @@ struct ldlm_flock_wire { * the first fields of the ldlm_flock structure because there is only * one ldlm_swab routine to process the ldlm_policy_data_t union. if * this ever changes we will need to swab the union differently based - * on the resource type. */ + * on the resource type. + */ typedef union { struct ldlm_extent l_extent; @@ -2768,15 +2633,15 @@ struct ldlm_intent { void lustre_swab_ldlm_intent(struct ldlm_intent *i); struct ldlm_resource_desc { - ldlm_type_t lr_type; + enum ldlm_type lr_type; __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */ struct ldlm_res_id lr_name; }; struct ldlm_lock_desc { struct ldlm_resource_desc l_resource; - ldlm_mode_t l_req_mode; - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_req_mode; + enum ldlm_mode l_granted_mode; ldlm_wire_policy_data_t l_policy_data; }; @@ -2793,7 +2658,8 @@ struct ldlm_request { void lustre_swab_ldlm_request(struct ldlm_request *rq); /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available. - * Otherwise, 2 are available. */ + * Otherwise, 2 are available. + */ #define ldlm_request_bufsize(count, type) \ ({ \ int _avail = LDLM_LOCKREQ_HANDLES; \ @@ -2820,7 +2686,7 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r); /* * Opcodes for mountconf (mgs and mgc) */ -typedef enum { +enum mgs_cmd { MGS_CONNECT = 250, MGS_DISCONNECT, MGS_EXCEPTION, /* node died, etc. */ @@ -2829,7 +2695,7 @@ typedef enum { MGS_SET_INFO, MGS_CONFIG_READ, MGS_LAST_OPC -} mgs_cmd_t; +}; #define MGS_FIRST_OPC MGS_CONNECT #define MGS_PARAM_MAXLEN 1024 @@ -2918,13 +2784,13 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size); * Opcodes for multiple servers. */ -typedef enum { +enum obd_cmd { OBD_PING = 400, OBD_LOG_CANCEL, OBD_QC_CALLBACK, OBD_IDX_READ, OBD_LAST_OPC -} obd_cmd_t; +}; #define OBD_FIRST_OPC OBD_PING /* catalog of log objects */ @@ -2933,7 +2799,7 @@ typedef enum { struct llog_logid { struct ost_id lgl_oi; __u32 lgl_ogen; -} __attribute__((packed)); +} __packed; /** Records written to the CATALOGS list */ #define CATLIST "CATALOGS" @@ -2942,7 +2808,7 @@ struct llog_catid { __u32 lci_padding1; __u32 lci_padding2; __u32 lci_padding3; -} __attribute__((packed)); +} __packed; /* Log data record types - there is no specific reason that these need to * be related to the RPC opcodes, but no reason not to (may be handy later?) @@ -2950,7 +2816,7 @@ struct llog_catid { #define LLOG_OP_MAGIC 0x10600000 #define LLOG_OP_MASK 0xfff00000 -typedef enum { +enum llog_op_type { LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000, OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00, /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */ @@ -2970,7 +2836,7 @@ typedef enum { HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, -} llog_op_type; +}; #define LLOG_REC_HDR_NEEDS_SWABBING(r) \ (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC)) @@ -3006,7 +2872,7 @@ struct llog_logid_rec { __u64 lid_padding2; __u64 lid_padding3; struct llog_rec_tail lid_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink_rec { struct llog_rec_hdr lur_hdr; @@ -3014,7 +2880,7 @@ struct llog_unlink_rec { __u32 lur_oseq; __u32 lur_count; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink64_rec { struct llog_rec_hdr lur_hdr; @@ -3024,7 +2890,7 @@ struct llog_unlink64_rec { __u64 lur_padding2; __u64 lur_padding3; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_setattr64_rec { struct llog_rec_hdr lsr_hdr; @@ -3035,7 +2901,7 @@ struct llog_setattr64_rec { __u32 lsr_gid_h; __u64 lsr_padding; struct llog_rec_tail lsr_tail; -} __attribute__((packed)); +} __packed; struct llog_size_change_rec { struct llog_rec_hdr lsc_hdr; @@ -3045,16 +2911,7 @@ struct llog_size_change_rec { __u64 lsc_padding2; __u64 lsc_padding3; struct llog_rec_tail lsc_tail; -} __attribute__((packed)); - -#define CHANGELOG_MAGIC 0xca103000 - -/** \a changelog_rec_type's that can't be masked */ -#define CHANGELOG_MINMASK (1 << CL_MARK) -/** bits covering all \a changelog_rec_type's */ -#define CHANGELOG_ALLMASK 0XFFFFFFFF -/** default \a changelog_rec_type mask */ -#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE) +} __packed; /* changelog llog name, needed by client replicators */ #define CHANGELOG_CATALOG "changelog_catalog" @@ -3062,22 +2919,20 @@ struct llog_size_change_rec { struct changelog_setinfo { __u64 cs_recno; __u32 cs_id; -} __attribute__((packed)); +} __packed; /** changelog record */ struct llog_changelog_rec { struct llog_rec_hdr cr_hdr; struct changelog_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); +} __packed; struct llog_changelog_ext_rec { struct llog_rec_hdr cr_hdr; struct changelog_ext_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); - -#define CHANGELOG_USER_PREFIX "cl" +} __packed; struct llog_changelog_user_rec { struct llog_rec_hdr cur_hdr; @@ -3085,7 +2940,7 @@ struct llog_changelog_user_rec { __u32 cur_padding; __u64 cur_endrec; struct llog_rec_tail cur_tail; -} __attribute__((packed)); +} __packed; enum agent_req_status { ARS_WAITING, @@ -3123,21 +2978,22 @@ struct llog_agent_req_rec { struct llog_rec_hdr arr_hdr; /**< record header */ __u32 arr_status; /**< status of the request */ /* must match enum - * agent_req_status */ + * agent_req_status + */ __u32 arr_archive_id; /**< backend archive number */ __u64 arr_flags; /**< req flags */ - __u64 arr_compound_id; /**< compound cookie */ + __u64 arr_compound_id;/**< compound cookie */ __u64 arr_req_create; /**< req. creation time */ __u64 arr_req_change; /**< req. status change time */ struct hsm_action_item arr_hai; /**< req. to the agent */ - struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ -} __attribute__((packed)); + struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ +} __packed; /* Old llog gen for compatibility */ struct llog_gen { __u64 mnt_cnt; __u64 conn_cnt; -} __attribute__((packed)); +} __packed; struct llog_gen_rec { struct llog_rec_hdr lgr_hdr; @@ -3175,19 +3031,21 @@ struct llog_log_hdr { __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23]; __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)]; struct llog_rec_tail llh_tail; -} __attribute__((packed)); +} __packed; #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ llh->llh_bitmap_offset - \ sizeof(llh->llh_tail)) * 8) -/** log cookies are used to reference a specific log file and a record therein */ +/** log cookies are used to reference a specific log file and a record + * therein + */ struct llog_cookie { struct llog_logid lgc_lgl; __u32 lgc_subsys; __u32 lgc_index; __u32 lgc_padding; -} __attribute__((packed)); +} __packed; /** llog protocol */ enum llogd_rpc_ops { @@ -3196,7 +3054,7 @@ enum llogd_rpc_ops { LLOG_ORIGIN_HANDLE_READ_HEADER = 503, LLOG_ORIGIN_HANDLE_WRITE_REC = 504, LLOG_ORIGIN_HANDLE_CLOSE = 505, - LLOG_ORIGIN_CONNECT = 506, + LLOG_ORIGIN_CONNECT = 506, LLOG_CATINFO = 507, /* deprecated */ LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508, LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/ @@ -3212,13 +3070,13 @@ struct llogd_body { __u32 lgd_saved_index; __u32 lgd_len; __u64 lgd_cur_offset; -} __attribute__((packed)); +} __packed; struct llogd_conn_body { struct llog_gen lgdc_gen; struct llog_logid lgdc_logid; __u32 lgdc_ctxt_idx; -} __attribute__((packed)); +} __packed; /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { @@ -3245,17 +3103,18 @@ struct obdo { __u64 o_ioepoch; /* epoch in ost writes */ __u32 o_stripe_idx; /* holds stripe idx */ __u32 o_parent_ver; - struct lustre_handle o_handle; /* brw: lock handle to prolong - * locks */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from - * MDS */ + struct lustre_handle o_handle; /* brw: lock handle to prolong locks + */ + struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS + */ __u32 o_uid_h; __u32 o_gid_h; __u64 o_data_version; /* getattr: sum of iversion for * each stripe. * brw: grant space consumed on - * the client for the write */ + * the client for the write + */ __u64 o_padding_4; __u64 o_padding_5; __u64 o_padding_6; @@ -3273,13 +3132,14 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, { *wobdo = *lobdo; wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { /* Currently OBD_FL_OSTID will only be used when 2.4 echo - * client communicate with pre-2.4 server */ + * client communicate with pre-2.4 server + */ wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); } @@ -3292,7 +3152,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, __u32 local_flags = 0; if (lobdo->o_valid & OBD_MD_FLFLAGS) - local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; + local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; *lobdo = *wobdo; if (local_flags != 0) { @@ -3300,7 +3160,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; lobdo->o_flags |= local_flags; } - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && @@ -3349,100 +3209,14 @@ void dump_ioo(struct obd_ioobj *nb); void dump_ost_body(struct ost_body *ob); void dump_rcs(__u32 *rc); -#define IDX_INFO_MAGIC 0x3D37CC37 - -/* Index file transfer through the network. The server serializes the index into - * a byte stream which is sent to the client via a bulk transfer */ -struct idx_info { - __u32 ii_magic; - - /* reply: see idx_info_flags below */ - __u32 ii_flags; - - /* request & reply: number of lu_idxpage (to be) transferred */ - __u16 ii_count; - __u16 ii_pad0; - - /* request: requested attributes passed down to the iterator API */ - __u32 ii_attrs; - - /* request & reply: index file identifier (FID) */ - struct lu_fid ii_fid; - - /* reply: version of the index file before starting to walk the index. - * Please note that the version can be modified at any time during the - * transfer */ - __u64 ii_version; - - /* request: hash to start with: - * reply: hash of the first entry of the first lu_idxpage and hash - * of the entry to read next if any */ - __u64 ii_hash_start; - __u64 ii_hash_end; - - /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is - * set */ - __u16 ii_keysize; - - /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC - * is set */ - __u16 ii_recsize; - - __u32 ii_pad1; - __u64 ii_pad2; - __u64 ii_pad3; -}; - -void lustre_swab_idx_info(struct idx_info *ii); - -#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */ - -/* List of flags used in idx_info::ii_flags */ -enum idx_info_flags { - II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */ - II_FL_VARKEY = 1 << 1, /* keys can be of variable size */ - II_FL_VARREC = 1 << 2, /* records can be of variable size */ - II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */ -}; - -#define LIP_MAGIC 0x8A6D6B6C - -/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */ -struct lu_idxpage { - /* 16-byte header */ - __u32 lip_magic; - __u16 lip_flags; - __u16 lip_nr; /* number of entries in the container */ - __u64 lip_pad0; /* additional padding for future use */ - - /* key/record pairs are stored in the remaining 4080 bytes. - * depending upon the flags in idx_info::ii_flags, each key/record - * pair might be preceded by: - * - a hash value - * - the key size (II_FL_VARKEY is set) - * - the record size (II_FL_VARREC is set) - * - * For the time being, we only support fixed-size key & record. */ - char lip_entries[0]; -}; - -#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries)) - -/* Gather all possible type associated with a 4KB container */ -union lu_page { - struct lu_dirpage lp_dir; /* for MDS_READPAGE */ - struct lu_idxpage lp_idx; /* for OBD_IDX_READ */ - char lp_array[LU_PAGE_SIZE]; -}; - /* security opcodes */ -typedef enum { +enum sec_cmd { SEC_CTX_INIT = 801, SEC_CTX_INIT_CONT = 802, SEC_CTX_FINI = 803, SEC_LAST_OPC, SEC_FIRST_OPC = SEC_CTX_INIT -} sec_cmd_t; +}; /* * capa related definitions @@ -3451,7 +3225,8 @@ typedef enum { #define CAPA_HMAC_KEY_MAX_LEN 56 /* NB take care when changing the sequence of elements this struct, - * because the offset info is used in find_capa() */ + * because the offset info is used in find_capa() + */ struct lustre_capa { struct lu_fid lc_fid; /** fid */ __u64 lc_opc; /** operations allowed */ @@ -3463,7 +3238,7 @@ struct lustre_capa { /* FIXME: y2038 time_t overflow: */ __u32 lc_expiry; /** expiry time (sec) */ __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ -} __attribute__((packed)); +} __packed; void lustre_swab_lustre_capa(struct lustre_capa *c); @@ -3497,7 +3272,7 @@ struct lustre_capa_key { __u32 lk_keyid; /**< key# */ __u32 lk_padding; __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */ -} __attribute__((packed)); +} __packed; /** The link ea holds 1 \a link_ea_entry for each hardlink */ #define LINK_EA_MAGIC 0x11EAF1DFUL @@ -3518,7 +3293,7 @@ struct link_ea_entry { unsigned char lee_reclen[2]; unsigned char lee_parent_fid[sizeof(struct lu_fid)]; char lee_name[0]; -} __attribute__((packed)); +} __packed; /** fid2path request/reply structure */ struct getinfo_fid2path { @@ -3527,7 +3302,7 @@ struct getinfo_fid2path { __u32 gf_linkno; __u32 gf_pathlen; char gf_path[0]; -} __attribute__((packed)); +} __packed; void lustre_swab_fid2path (struct getinfo_fid2path *gf); @@ -3558,7 +3333,7 @@ void lustre_swab_layout_intent(struct layout_intent *li); */ struct hsm_progress_kernel { /* Field taken from struct hsm_progress */ - lustre_fid hpk_fid; + struct lu_fid hpk_fid; __u64 hpk_cookie; struct hsm_extent hpk_extent; __u16 hpk_flags; @@ -3567,7 +3342,7 @@ struct hsm_progress_kernel { /* Additional fields */ __u64 hpk_data_version; __u64 hpk_padding2; -} __attribute__((packed)); +} __packed; void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_current_action(struct hsm_current_action *action); @@ -3576,92 +3351,6 @@ void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_user_item(struct hsm_user_item *hui); void lustre_swab_hsm_request(struct hsm_request *hr); -/** - * These are object update opcode under UPDATE_OBJ, which is currently - * being used by cross-ref operations between MDT. - * - * During the cross-ref operation, the Master MDT, which the client send the - * request to, will disassembly the operation into object updates, then OSP - * will send these updates to the remote MDT to be executed. - * - * Update request format - * magic: UPDATE_BUFFER_MAGIC_V1 - * Count: How many updates in the req. - * bufs[0] : following are packets of object. - * update[0]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * update[1]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * .......... - * update[7]: type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * Current 8 maxim updates per object update request. - * - ******************************************************************* - * update reply format: - * - * ur_version: UPDATE_REPLY_V1 - * ur_count: The count of the reply, which is usually equal - * to the number of updates in the request. - * ur_lens: The reply lengths of each object update. - * - * replies: 1st update reply [4bytes_ret: other body] - * 2nd update reply [4bytes_ret: other body] - * ..... - * nth update reply [4bytes_ret: other body] - * - * For each reply of the update, the format would be - * result(4 bytes):Other stuff - */ - -#define UPDATE_MAX_OPS 10 -#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 -#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 -#define UPDATE_BUF_COUNT 8 -enum object_update_op { - OBJ_CREATE = 1, - OBJ_DESTROY = 2, - OBJ_REF_ADD = 3, - OBJ_REF_DEL = 4, - OBJ_ATTR_SET = 5, - OBJ_ATTR_GET = 6, - OBJ_XATTR_SET = 7, - OBJ_XATTR_GET = 8, - OBJ_INDEX_LOOKUP = 9, - OBJ_INDEX_INSERT = 10, - OBJ_INDEX_DELETE = 11, - OBJ_LAST -}; - -struct update { - __u32 u_type; - __u32 u_batchid; - struct lu_fid u_fid; - __u32 u_lens[UPDATE_BUF_COUNT]; - __u32 u_bufs[0]; -}; - -struct update_buf { - __u32 ub_magic; - __u32 ub_count; - __u32 ub_bufs[0]; -}; - -#define UPDATE_REPLY_V1 0x00BD0001 -struct update_reply { - __u32 ur_version; - __u32 ur_count; - __u32 ur_lens[0]; -}; - -void lustre_swab_update_buf(struct update_buf *ub); -void lustre_swab_update_reply_buf(struct update_reply *ur); - /** layout swap request structure * fid1 and fid2 are in mdt_body */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 2b4dd656d..276906e64 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -85,9 +85,8 @@ struct obd_statfs { __u32 os_namelen; __u64 os_maxbytes; __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */ - __u32 os_fprecreated; /* objs available now to the caller */ - /* used in QoS code to find preferred - * OSTs */ + __u32 os_fprecreated; /* objs available now to the caller */ + /* used in QoS code to find preferred OSTs */ __u32 os_spare2; __u32 os_spare3; __u32 os_spare4; @@ -135,8 +134,9 @@ struct filter_fid_old { /* Userspace should treat lu_fid as opaque, and only use the following methods * to print or parse them. Other functions (e.g. compare, swab) could be moved - * here from lustre_idl.h if needed. */ -typedef struct lu_fid lustre_fid; + * here from lustre_idl.h if needed. + */ +struct lu_fid; /** * Following struct for object attributes, that will be kept inode's EA. @@ -266,7 +266,8 @@ struct ost_id { /* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular * files, but are unlikely to be used in practice and are not harmful if * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character - * devices and are safe for use on new files (See LU-812, LU-4209). */ + * devices and are safe for use on new files (See LU-812, LU-4209). + */ #define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC) #define LL_FILE_IGNORE_LOCK 0x00000001 @@ -302,7 +303,8 @@ struct ost_id { * The limit of 12 pages is somewhat arbitrary, but is a reasonably large * allocation that is sufficient for the current generation of systems. * - * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */ + * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) + */ #define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */ #define LOV_ALL_STRIPES 0xffff /* only valid for directories */ #define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */ @@ -323,9 +325,11 @@ struct lov_user_md_v1 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ } __attribute__((packed, __may_alias__)); @@ -338,9 +342,11 @@ struct lov_user_md_v3 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */ struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ @@ -442,9 +448,13 @@ static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp) /* For printf's only, make sure uuid is terminated */ static inline char *obd_uuid2str(const struct obd_uuid *uuid) { + if (!uuid) + return NULL; + if (uuid->uuid[sizeof(*uuid) - 1] != '\0') { /* Obviously not safe, but for printfs, no real harm done... - we're always null-terminated, even in a race. */ + * we're always null-terminated, even in a race. + */ static char temp[sizeof(*uuid)]; memcpy(temp, uuid->uuid, sizeof(*uuid) - 1); @@ -455,8 +465,9 @@ static inline char *obd_uuid2str(const struct obd_uuid *uuid) } /* Extract fsname from uuid (or target name) of a target - e.g. (myfs-OST0007_UUID -> myfs) - see also deuuidify. */ + * e.g. (myfs-OST0007_UUID -> myfs) + * see also deuuidify. + */ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) { char *p; @@ -465,11 +476,12 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) buf[buflen - 1] = '\0'; p = strrchr(buf, '-'); if (p) - *p = '\0'; + *p = '\0'; } /* printf display format - e.g. printf("file FID is "DFID"\n", PFID(fid)); */ + * e.g. printf("file FID is "DFID"\n", PFID(fid)); + */ #define FID_NOBRACE_LEN 40 #define FID_LEN (FID_NOBRACE_LEN + 2) #define DFID_NOBRACE "%#llx:0x%x:0x%x" @@ -480,7 +492,8 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) (fid)->f_ver /* scanf input parse format -- strip '[' first. - e.g. sscanf(fidstr, SFID, RFID(&fid)); */ + * e.g. sscanf(fidstr, SFID, RFID(&fid)); + */ #define SFID "0x%llx:0x%x:0x%x" #define RFID(fid) \ &((fid)->f_seq), \ @@ -542,22 +555,6 @@ enum { RMT_RGETFACL = 4 }; -#ifdef NEED_QUOTA_DEFS -#ifndef QIF_BLIMITS -#define QIF_BLIMITS 1 -#define QIF_SPACE 2 -#define QIF_ILIMITS 4 -#define QIF_INODES 8 -#define QIF_BTIME 16 -#define QIF_ITIME 32 -#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) -#define QIF_USAGE (QIF_SPACE | QIF_INODES) -#define QIF_TIMES (QIF_BTIME | QIF_ITIME) -#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES) -#endif - -#endif /* !__KERNEL__ */ - /* lustre volatile file support * file name header: .^L^S^T^R:volatile" */ @@ -566,9 +563,9 @@ enum { /* hdr + MDT index */ #define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:" -typedef enum lustre_quota_version { +enum lustre_quota_version { LUSTRE_QUOTA_V2 = 1 -} lustre_quota_version_t; +}; /* XXX: same as if_dqinfo struct in kernel */ struct obd_dqinfo { @@ -698,7 +695,8 @@ static inline const char *changelog_type2str(int type) #define CLF_HSM_LAST 15 /* Remove bits higher than _h, then extract the value - * between _h and _l by shifting lower weigth to bit 0. */ + * between _h and _l by shifting lower weigth to bit 0. + */ #define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \ >> (CLF_HSM_LAST - _h + _l)) @@ -761,10 +759,10 @@ struct changelog_rec { __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< parent fid */ + struct lu_fid cr_pfid; /**< parent fid */ char cr_name[0]; /**< last element */ } __packed; @@ -775,18 +773,19 @@ struct changelog_rec { struct changelog_ext_rec { __u16 cr_namelen; __u16 cr_flags; /**< (flags & CLF_FLAGMASK) | - CLF_EXT_VERSION */ + * CLF_EXT_VERSION + */ __u32 cr_type; /**< \a changelog_rec_type */ __u64 cr_index; /**< changelog record number */ __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< target parent fid */ - lustre_fid cr_sfid; /**< source fid, or zero */ - lustre_fid cr_spfid; /**< source parent fid, or zero */ + struct lu_fid cr_pfid; /**< target parent fid */ + struct lu_fid cr_sfid; /**< source fid, or zero */ + struct lu_fid cr_spfid; /**< source parent fid, or zero */ char cr_name[0]; /**< last element */ } __packed; @@ -835,7 +834,8 @@ struct ioc_data_version { }; #define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling - version. Dirty caches are left unchanged. */ + * version. Dirty caches are left unchanged. + */ #ifndef offsetof # define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb))) @@ -976,8 +976,8 @@ struct hsm_request { }; struct hsm_user_item { - lustre_fid hui_fid; - struct hsm_extent hui_extent; + struct lu_fid hui_fid; + struct hsm_extent hui_extent; } __packed; struct hsm_user_request { @@ -1046,8 +1046,8 @@ static inline char *hsm_copytool_action2name(enum hsm_copytool_action a) struct hsm_action_item { __u32 hai_len; /* valid size of this struct */ __u32 hai_action; /* hsm_copytool_action, but use known size */ - lustre_fid hai_fid; /* Lustre FID to operated on */ - lustre_fid hai_dfid; /* fid used for data access */ + struct lu_fid hai_fid; /* Lustre FID to operated on */ + struct lu_fid hai_dfid; /* fid used for data access */ struct hsm_extent hai_extent; /* byte range to operate on */ __u64 hai_cookie; /* action cookie from coordinator */ __u64 hai_gid; /* grouplock id */ @@ -1095,7 +1095,8 @@ struct hsm_action_list { __u32 padding1; char hal_fsname[0]; /* null-terminated */ /* struct hsm_action_item[hal_count] follows, aligned on 8-byte - boundaries. See hai_zero */ + * boundaries. See hai_zero + */ } __packed; #ifndef HAVE_CFS_SIZE_ROUND @@ -1157,7 +1158,7 @@ struct hsm_user_import { #define HP_FLAG_RETRY 0x02 struct hsm_progress { - lustre_fid hp_fid; + struct lu_fid hp_fid; __u64 hp_cookie; struct hsm_extent hp_extent; __u16 hp_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h index eb6b292b7..bb16ae980 100644 --- a/drivers/staging/lustre/lustre/include/lustre_cfg.h +++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h @@ -50,12 +50,13 @@ #define LUSTRE_CFG_MAX_BUFCOUNT 8 #define LCFG_HDR_SIZE(count) \ - cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)])) + cfs_size_round(offsetof(struct lustre_cfg, lcfg_buflens[(count)])) /** If the LCFG_REQUIRED bit is set in a configuration command, * then the client is required to understand this parameter * in order to mount the filesystem. If it does not understand - * a REQUIRED command the client mount will fail. */ + * a REQUIRED command the client mount will fail. + */ #define LCFG_REQUIRED 0x0001000 enum lcfg_command_type { @@ -87,9 +88,11 @@ enum lcfg_command_type { LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */ LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */ LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre - * cleanup cleanup */ + * cleanup cleanup + */ LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set - *a proc parameters */ + * a proc parameters + */ }; struct lustre_cfg_bufs { @@ -128,7 +131,7 @@ static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs, { if (index >= LUSTRE_CFG_MAX_BUFCOUNT) return; - if (bufs == NULL) + if (!bufs) return; if (bufs->lcfg_bufcount <= index) @@ -158,7 +161,6 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index) int offset; int bufcount; - LASSERT (lcfg != NULL); LASSERT (index >= 0); bufcount = lcfg->lcfg_bufcount; @@ -191,7 +193,7 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index) return NULL; s = lustre_cfg_buf(lcfg, index); - if (s == NULL) + if (!s) return NULL; /* @@ -252,10 +254,6 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd, static inline void lustre_cfg_free(struct lustre_cfg *lcfg) { - int len; - - len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens); - kfree(lcfg); return; } diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h index 7c6933ffc..95fd36063 100644 --- a/drivers/staging/lustre/lustre/include/lustre_disk.h +++ b/drivers/staging/lustre/lustre/include/lustre_disk.h @@ -65,7 +65,8 @@ /****************** mount command *********************/ /* The lmd is only used internally by Lustre; mount simply passes - everything as string options */ + * everything as string options + */ #define LMD_MAGIC 0xbdacbd03 #define LMD_PARAMS_MAXLEN 4096 @@ -79,23 +80,26 @@ struct lustre_mount_data { int lmd_recovery_time_soft; int lmd_recovery_time_hard; char *lmd_dev; /* device name */ - char *lmd_profile; /* client only */ + char *lmd_profile; /* client only */ char *lmd_mgssec; /* sptlrpc flavor to mgs */ - char *lmd_opts; /* lustre mount options (as opposed to - _device_ mount options) */ + char *lmd_opts; /* lustre mount options (as opposed to + * _device_ mount options) + */ char *lmd_params; /* lustre params */ - __u32 *lmd_exclude; /* array of OSTs to ignore */ - char *lmd_mgs; /* MGS nid */ - char *lmd_osd_type; /* OSD type */ + __u32 *lmd_exclude; /* array of OSTs to ignore */ + char *lmd_mgs; /* MGS nid */ + char *lmd_osd_type; /* OSD type */ }; #define LMD_FLG_SERVER 0x0001 /* Mounting a server */ #define LMD_FLG_CLIENT 0x0002 /* Mounting a client */ #define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */ #define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers, - no other services */ -#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing - existing MGS services */ + * no other services + */ +#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, + * reusing existing MGS services + */ #define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */ #define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */ #define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */ @@ -116,231 +120,6 @@ struct lustre_mount_data { #define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */ #define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */ -#define LR_SERVER_SIZE 512 -#define LR_CLIENT_START 8192 -#define LR_CLIENT_SIZE 128 -#if LR_CLIENT_START < LR_SERVER_SIZE -#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE" -#endif - -/* - * This limit is arbitrary (131072 clients on x86), but it is convenient to use - * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation. - * If we need more than 131072 clients (order-2 allocation on x86) then this - * should become an array of single-page pointers that are allocated on demand. - */ -#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8) -#define LR_MAX_CLIENTS (128 * 1024UL) -#else -#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8) -#endif - -/** COMPAT_146: this is an OST (temporary) */ -#define OBD_COMPAT_OST 0x00000002 -/** COMPAT_146: this is an MDT (temporary) */ -#define OBD_COMPAT_MDT 0x00000004 -/** 2.0 server, interop flag to show server version is changed */ -#define OBD_COMPAT_20 0x00000008 - -/** MDS handles LOV_OBJID file */ -#define OBD_ROCOMPAT_LOVOBJID 0x00000001 - -/** OST handles group subdirs */ -#define OBD_INCOMPAT_GROUPS 0x00000001 -/** this is an OST */ -#define OBD_INCOMPAT_OST 0x00000002 -/** this is an MDT */ -#define OBD_INCOMPAT_MDT 0x00000004 -/** common last_rvcd format */ -#define OBD_INCOMPAT_COMMON_LR 0x00000008 -/** FID is enabled */ -#define OBD_INCOMPAT_FID 0x00000010 -/** Size-on-MDS is enabled */ -#define OBD_INCOMPAT_SOM 0x00000020 -/** filesystem using iam format to store directory entries */ -#define OBD_INCOMPAT_IAM_DIR 0x00000040 -/** LMA attribute contains per-inode incompatible flags */ -#define OBD_INCOMPAT_LMA 0x00000080 -/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16 - * bits are now used to store a generation. Once we start changing the layout - * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count - * will be confused by interpreting stripe_count | gen << 16 as the actual - * stripe count */ -#define OBD_INCOMPAT_LMM_VER 0x00000100 -/** multiple OI files for MDT */ -#define OBD_INCOMPAT_MULTI_OI 0x00000200 - -/* Data stored per server at the head of the last_rcvd file. In le32 order. - This should be common to filter_internal.h, lustre_mds.h */ -struct lr_server_data { - __u8 lsd_uuid[40]; /* server UUID */ - __u64 lsd_last_transno; /* last completed transaction ID */ - __u64 lsd_compat14; /* reserved - compat with old last_rcvd */ - __u64 lsd_mount_count; /* incarnation number */ - __u32 lsd_feature_compat; /* compatible feature flags */ - __u32 lsd_feature_rocompat;/* read-only compatible feature flags */ - __u32 lsd_feature_incompat;/* incompatible feature flags */ - __u32 lsd_server_size; /* size of server data area */ - __u32 lsd_client_start; /* start of per-client data area */ - __u16 lsd_client_size; /* size of per-client data area */ - __u16 lsd_subdir_count; /* number of subdirectories for objects */ - __u64 lsd_catalog_oid; /* recovery catalog object id */ - __u32 lsd_catalog_ogen; /* recovery catalog inode generation */ - __u8 lsd_peeruuid[40]; /* UUID of MDS associated with this OST */ - __u32 lsd_osd_index; /* index number of OST in LOV */ - __u32 lsd_padding1; /* was lsd_mdt_index, unused in 2.4.0 */ - __u32 lsd_start_epoch; /* VBR: start epoch from last boot */ - /** transaction values since lsd_trans_table_time */ - __u64 lsd_trans_table[LR_EXPIRE_INTERVALS]; - /** start point of transno table below */ - __u32 lsd_trans_table_time; /* time of first slot in table above */ - __u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */ - __u8 lsd_padding[LR_SERVER_SIZE - 288]; -}; - -/* Data stored per client in the last_rcvd file. In le32 order. */ -struct lsd_client_data { - __u8 lcd_uuid[40]; /* client UUID */ - __u64 lcd_last_transno; /* last completed transaction ID */ - __u64 lcd_last_xid; /* xid for the last transaction */ - __u32 lcd_last_result; /* result from last RPC */ - __u32 lcd_last_data; /* per-op data (disposition for open &c.) */ - /* for MDS_CLOSE requests */ - __u64 lcd_last_close_transno; /* last completed transaction ID */ - __u64 lcd_last_close_xid; /* xid for the last transaction */ - __u32 lcd_last_close_result; /* result from last RPC */ - __u32 lcd_last_close_data; /* per-op data */ - /* VBR: last versions */ - __u64 lcd_pre_versions[4]; - __u32 lcd_last_epoch; - /** orphans handling for delayed export rely on that */ - __u32 lcd_first_epoch; - __u8 lcd_padding[LR_CLIENT_SIZE - 128]; -}; - -/* bug20354: the lcd_uuid for export of clients may be wrong */ -static inline void check_lcd(char *obd_name, int index, - struct lsd_client_data *lcd) -{ - int length = sizeof(lcd->lcd_uuid); - - if (strnlen((char *)lcd->lcd_uuid, length) == length) { - lcd->lcd_uuid[length - 1] = '\0'; - - LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n", - lcd->lcd_uuid, obd_name, index); - } -} - -/* last_rcvd handling */ -static inline void lsd_le_to_cpu(struct lr_server_data *buf, - struct lr_server_data *lsd) -{ - int i; - - memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid)); - lsd->lsd_last_transno = le64_to_cpu(buf->lsd_last_transno); - lsd->lsd_compat14 = le64_to_cpu(buf->lsd_compat14); - lsd->lsd_mount_count = le64_to_cpu(buf->lsd_mount_count); - lsd->lsd_feature_compat = le32_to_cpu(buf->lsd_feature_compat); - lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat); - lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat); - lsd->lsd_server_size = le32_to_cpu(buf->lsd_server_size); - lsd->lsd_client_start = le32_to_cpu(buf->lsd_client_start); - lsd->lsd_client_size = le16_to_cpu(buf->lsd_client_size); - lsd->lsd_subdir_count = le16_to_cpu(buf->lsd_subdir_count); - lsd->lsd_catalog_oid = le64_to_cpu(buf->lsd_catalog_oid); - lsd->lsd_catalog_ogen = le32_to_cpu(buf->lsd_catalog_ogen); - memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid)); - lsd->lsd_osd_index = le32_to_cpu(buf->lsd_osd_index); - lsd->lsd_padding1 = le32_to_cpu(buf->lsd_padding1); - lsd->lsd_start_epoch = le32_to_cpu(buf->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]); - lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time); - lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals); -} - -static inline void lsd_cpu_to_le(struct lr_server_data *lsd, - struct lr_server_data *buf) -{ - int i; - - memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid)); - buf->lsd_last_transno = cpu_to_le64(lsd->lsd_last_transno); - buf->lsd_compat14 = cpu_to_le64(lsd->lsd_compat14); - buf->lsd_mount_count = cpu_to_le64(lsd->lsd_mount_count); - buf->lsd_feature_compat = cpu_to_le32(lsd->lsd_feature_compat); - buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat); - buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat); - buf->lsd_server_size = cpu_to_le32(lsd->lsd_server_size); - buf->lsd_client_start = cpu_to_le32(lsd->lsd_client_start); - buf->lsd_client_size = cpu_to_le16(lsd->lsd_client_size); - buf->lsd_subdir_count = cpu_to_le16(lsd->lsd_subdir_count); - buf->lsd_catalog_oid = cpu_to_le64(lsd->lsd_catalog_oid); - buf->lsd_catalog_ogen = cpu_to_le32(lsd->lsd_catalog_ogen); - memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid)); - buf->lsd_osd_index = cpu_to_le32(lsd->lsd_osd_index); - buf->lsd_padding1 = cpu_to_le32(lsd->lsd_padding1); - buf->lsd_start_epoch = cpu_to_le32(lsd->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]); - buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time); - buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals); -} - -static inline void lcd_le_to_cpu(struct lsd_client_data *buf, - struct lsd_client_data *lcd) -{ - memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid)); - lcd->lcd_last_transno = le64_to_cpu(buf->lcd_last_transno); - lcd->lcd_last_xid = le64_to_cpu(buf->lcd_last_xid); - lcd->lcd_last_result = le32_to_cpu(buf->lcd_last_result); - lcd->lcd_last_data = le32_to_cpu(buf->lcd_last_data); - lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno); - lcd->lcd_last_close_xid = le64_to_cpu(buf->lcd_last_close_xid); - lcd->lcd_last_close_result = le32_to_cpu(buf->lcd_last_close_result); - lcd->lcd_last_close_data = le32_to_cpu(buf->lcd_last_close_data); - lcd->lcd_pre_versions[0] = le64_to_cpu(buf->lcd_pre_versions[0]); - lcd->lcd_pre_versions[1] = le64_to_cpu(buf->lcd_pre_versions[1]); - lcd->lcd_pre_versions[2] = le64_to_cpu(buf->lcd_pre_versions[2]); - lcd->lcd_pre_versions[3] = le64_to_cpu(buf->lcd_pre_versions[3]); - lcd->lcd_last_epoch = le32_to_cpu(buf->lcd_last_epoch); - lcd->lcd_first_epoch = le32_to_cpu(buf->lcd_first_epoch); -} - -static inline void lcd_cpu_to_le(struct lsd_client_data *lcd, - struct lsd_client_data *buf) -{ - memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid)); - buf->lcd_last_transno = cpu_to_le64(lcd->lcd_last_transno); - buf->lcd_last_xid = cpu_to_le64(lcd->lcd_last_xid); - buf->lcd_last_result = cpu_to_le32(lcd->lcd_last_result); - buf->lcd_last_data = cpu_to_le32(lcd->lcd_last_data); - buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno); - buf->lcd_last_close_xid = cpu_to_le64(lcd->lcd_last_close_xid); - buf->lcd_last_close_result = cpu_to_le32(lcd->lcd_last_close_result); - buf->lcd_last_close_data = cpu_to_le32(lcd->lcd_last_close_data); - buf->lcd_pre_versions[0] = cpu_to_le64(lcd->lcd_pre_versions[0]); - buf->lcd_pre_versions[1] = cpu_to_le64(lcd->lcd_pre_versions[1]); - buf->lcd_pre_versions[2] = cpu_to_le64(lcd->lcd_pre_versions[2]); - buf->lcd_pre_versions[3] = cpu_to_le64(lcd->lcd_pre_versions[3]); - buf->lcd_last_epoch = cpu_to_le32(lcd->lcd_last_epoch); - buf->lcd_first_epoch = cpu_to_le32(lcd->lcd_first_epoch); -} - -static inline __u64 lcd_last_transno(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ? - lcd->lcd_last_transno : lcd->lcd_last_close_transno); -} - -static inline __u64 lcd_last_xid(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ? - lcd->lcd_last_xid : lcd->lcd_last_close_xid); -} - /****************** superblock additional info *********************/ struct ll_sb_info; @@ -360,7 +139,8 @@ struct lustre_sb_info { char lsi_osd_type[16]; char lsi_fstype[16]; struct backing_dev_info lsi_bdi; /* each client mountpoint needs - own backing_dev_info */ + * own backing_dev_info + */ }; #define LSI_UMOUNT_FAILOVER 0x00200000 diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h index 9b319f1df..8b0364f71 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h @@ -69,7 +69,7 @@ struct obd_device; /** * LDLM non-error return states */ -typedef enum { +enum ldlm_error { ELDLM_OK = 0, ELDLM_LOCK_CHANGED = 300, @@ -80,7 +80,7 @@ typedef enum { ELDLM_NAMESPACE_EXISTS = 400, ELDLM_BAD_NAMESPACE = 401 -} ldlm_error_t; +}; /** * LDLM namespace type. @@ -145,16 +145,17 @@ typedef enum { #define LCK_COMPAT_COS (LCK_COS) /** @} Lock Compatibility Matrix */ -extern ldlm_mode_t lck_compat_array[]; +extern enum ldlm_mode lck_compat_array[]; -static inline void lockmode_verify(ldlm_mode_t mode) +static inline void lockmode_verify(enum ldlm_mode mode) { - LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); + LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); } -static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode) +static inline int lockmode_compat(enum ldlm_mode exist_mode, + enum ldlm_mode new_mode) { - return (lck_compat_array[exist_mode] & new_mode); + return (lck_compat_array[exist_mode] & new_mode); } /* @@ -249,7 +250,8 @@ struct ldlm_pool { /** Current biggest client lock volume. Protected by pl_lock. */ __u64 pl_client_lock_volume; /** Lock volume factor. SLV on client is calculated as following: - * server_slv * lock_volume_factor. */ + * server_slv * lock_volume_factor. + */ atomic_t pl_lock_volume_factor; /** Time when last SLV from server was obtained. */ time64_t pl_recalc_time; @@ -295,10 +297,10 @@ struct ldlm_valblock_ops { * LDLM pools related, type of lock pool in the namespace. * Greedy means release cached locks aggressively */ -typedef enum { +enum ldlm_appetite { LDLM_NAMESPACE_GREEDY = 1 << 0, LDLM_NAMESPACE_MODEST = 1 << 1 -} ldlm_appetite_t; +}; struct ldlm_ns_bucket { /** back pointer to namespace */ @@ -317,7 +319,7 @@ enum { LDLM_NSS_LAST }; -typedef enum { +enum ldlm_ns_type { /** invalid type */ LDLM_NS_TYPE_UNKNOWN = 0, /** mdc namespace */ @@ -332,7 +334,7 @@ typedef enum { LDLM_NS_TYPE_MGC, /** mgs namespace */ LDLM_NS_TYPE_MGT, -} ldlm_ns_type_t; +}; /** * LDLM Namespace. @@ -373,7 +375,7 @@ struct ldlm_namespace { /** * Namespace connect flags supported by server (may be changed via - * /proc, LRU resize may be disabled/enabled). + * sysfs, LRU resize may be disabled/enabled). */ __u64 ns_connect_flags; @@ -439,7 +441,7 @@ struct ldlm_namespace { /** LDLM pool structure for this namespace */ struct ldlm_pool ns_pool; /** Definition of how eagerly unused locks will be released from LRU */ - ldlm_appetite_t ns_appetite; + enum ldlm_appetite ns_appetite; /** Limit of parallel AST RPC count. */ unsigned ns_max_parallel_ast; @@ -465,7 +467,6 @@ struct ldlm_namespace { */ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET); } @@ -474,14 +475,12 @@ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) */ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE); } static inline void ns_register_cancel(struct ldlm_namespace *ns, ldlm_cancel_for_recovery arg) { - LASSERT(ns != NULL); ns->ns_cancel_for_recovery = arg; } @@ -503,7 +502,8 @@ struct ldlm_glimpse_work { struct list_head gl_list; /* linkage to other gl work structs */ __u32 gl_flags;/* see LDLM_GL_WORK_* below */ union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in - * glimpse callback request */ + * glimpse callback request + */ }; /** The ldlm_glimpse_work is allocated on the stack and should not be freed. */ @@ -512,8 +512,9 @@ struct ldlm_glimpse_work { /** Interval node data for each LDLM_EXTENT lock. */ struct ldlm_interval { struct interval_node li_node; /* node for tree management */ - struct list_head li_group; /* the locks which have the same - * policy - group of the policy */ + struct list_head li_group; /* the locks which have the same + * policy - group of the policy + */ }; #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) @@ -527,7 +528,7 @@ struct ldlm_interval { struct ldlm_interval_tree { /** Tree size. */ int lit_size; - ldlm_mode_t lit_mode; /* lock mode */ + enum ldlm_mode lit_mode; /* lock mode */ struct interval_node *lit_root; /* actual ldlm_interval */ }; @@ -535,12 +536,13 @@ struct ldlm_interval_tree { #define LUSTRE_TRACKS_LOCK_EXP_REFS (0) /** Cancel flags. */ -typedef enum { +enum ldlm_cancel_flags { LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */ LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST - * in the same RPC */ -} ldlm_cancel_flags_t; + * in the same RPC + */ +}; struct ldlm_flock { __u64 start; @@ -559,7 +561,7 @@ typedef union { struct ldlm_inodebits l_inodebits; } ldlm_policy_data_t; -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy); @@ -637,11 +639,11 @@ struct ldlm_lock { * Requested mode. * Protected by lr_lock. */ - ldlm_mode_t l_req_mode; + enum ldlm_mode l_req_mode; /** * Granted mode, also protected by lr_lock. */ - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_granted_mode; /** Lock completion handler pointer. Called when lock is granted. */ ldlm_completion_callback l_completion_ast; /** @@ -841,20 +843,19 @@ struct ldlm_resource { /** * protected by lr_lock - * @{ */ + * @{ + */ /** List of locks in granted state */ struct list_head lr_granted; /** * List of locks that could not be granted due to conflicts and - * that are waiting for conflicts to go away */ + * that are waiting for conflicts to go away + */ struct list_head lr_waiting; /** @} */ - /* XXX No longer needed? Remove ASAP */ - ldlm_mode_t lr_most_restr; - /** Type of locks this resource can hold. Only one type per resource. */ - ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ + enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ /** Resource name */ struct ldlm_res_id lr_name; @@ -921,7 +922,7 @@ static inline int ldlm_lvbo_init(struct ldlm_resource *res) { struct ldlm_namespace *ns = ldlm_res_to_ns(res); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) return ns->ns_lvbo->lvbo_init(res); return 0; @@ -931,7 +932,7 @@ static inline int ldlm_lvbo_size(struct ldlm_lock *lock) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size) return ns->ns_lvbo->lvbo_size(lock); return 0; @@ -941,10 +942,9 @@ static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL) { - LASSERT(ns->ns_lvbo->lvbo_fill != NULL); + if (ns->ns_lvbo) return ns->ns_lvbo->lvbo_fill(lock, buf, len); - } + return 0; } @@ -1015,7 +1015,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, /** Non-rate-limited lock printing function for debugging purposes. */ #define LDLM_DEBUG(lock, fmt, a...) do { \ - if (likely(lock != NULL)) { \ + if (likely(lock)) { \ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \ ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \ "### " fmt, ##a); \ @@ -1025,7 +1025,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, } while (0) typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags, - int first_enq, ldlm_error_t *err, + int first_enq, enum ldlm_error *err, struct list_head *work_list); /** @@ -1042,7 +1042,8 @@ typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *); * * LDLM provides for a way to iterate through every lock on a resource or * namespace or every resource in a namespace. - * @{ */ + * @{ + */ int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *, ldlm_iterator_t iter, void *data); /** @} ldlm_iterator */ @@ -1091,7 +1092,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags) struct ldlm_lock *lock; lock = __ldlm_handle2lock(h, flags); - if (lock != NULL) + if (lock) LDLM_LOCK_REF_DEL(lock); return lock; } @@ -1111,7 +1112,7 @@ static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, return 0; } -int ldlm_error2errno(ldlm_error_t error); +int ldlm_error2errno(enum ldlm_error error); #if LUSTRE_TRACKS_LOCK_EXP_REFS void ldlm_dump_export_locks(struct obd_export *exp); @@ -1168,12 +1169,13 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); void ldlm_lock_allow_match(struct ldlm_lock *lock); void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *, ldlm_type_t type, - ldlm_policy_data_t *, ldlm_mode_t mode, - struct lustre_handle *, int unref); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits); +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *, + enum ldlm_type type, ldlm_policy_data_t *, + enum ldlm_mode mode, struct lustre_handle *, + int unref); +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits); void ldlm_lock_cancel(struct ldlm_lock *lock); void ldlm_lock_dump_handle(int level, struct lustre_handle *); void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); @@ -1181,8 +1183,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); /* resource.c */ struct ldlm_namespace * ldlm_namespace_new(struct obd_device *obd, char *name, - ldlm_side_t client, ldlm_appetite_t apt, - ldlm_ns_type_t ns_type); + ldlm_side_t client, enum ldlm_appetite apt, + enum ldlm_ns_type ns_type); int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags); void ldlm_namespace_get(struct ldlm_namespace *ns); void ldlm_namespace_put(struct ldlm_namespace *ns); @@ -1193,7 +1195,7 @@ void ldlm_debugfs_cleanup(void); struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, const struct ldlm_res_id *, - ldlm_type_t type, int create); + enum ldlm_type type, int create); int ldlm_resource_putref(struct ldlm_resource *res); void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, @@ -1219,7 +1221,8 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, * These AST handlers are typically used for server-side local locks and are * also used by client-side lock handlers to perform minimum level base * processing. - * @{ */ + * @{ + */ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data); int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** @} ldlm_local_ast */ @@ -1227,7 +1230,8 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users. * These are typically used by client and server (*_local versions) * to obtain and release locks. - * @{ */ + * @{ + */ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, @@ -1244,29 +1248,32 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct list_head *cancels, int count); int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, + enum ldlm_type type, __u8 with_policy, + enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, struct lustre_handle *lockh, int rc); int ldlm_cli_update_pool(struct ptlrpc_request *req); int ldlm_cli_cancel(struct lustre_handle *lockh, - ldlm_cancel_flags_t cancel_flags); + enum ldlm_cancel_flags cancel_flags); int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, - ldlm_cancel_flags_t flags, void *opaque); + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, - ldlm_mode_t mode, __u64 lock_flags, - ldlm_cancel_flags_t cancel_flags, void *opaque); + enum ldlm_mode mode, __u64 lock_flags, + enum ldlm_cancel_flags cancel_flags, + void *opaque); int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - ldlm_cancel_flags_t flags); + enum ldlm_cancel_flags flags); int ldlm_cli_cancel_list(struct list_head *head, int count, - struct ptlrpc_request *req, ldlm_cancel_flags_t flags); + struct ptlrpc_request *req, + enum ldlm_cancel_flags flags); /** @} ldlm_cli_api */ /* mds/handler.c */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h index 0d3ed87d3..7f2ba2ffe 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h @@ -57,7 +57,8 @@ /** * Server placed lock on granted list, or a recovering client wants the - * lock added to the granted list, no questions asked. */ + * lock added to the granted list, no questions asked. + */ #define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */ #define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1) #define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1) @@ -65,7 +66,8 @@ /** * Server placed lock on conv list, or a recovering client wants the lock - * added to the conv list, no questions asked. */ + * added to the conv list, no questions asked. + */ #define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */ #define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2) #define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2) @@ -73,7 +75,8 @@ /** * Server placed lock on wait list, or a recovering client wants the lock - * added to the wait list, no questions asked. */ + * added to the wait list, no questions asked. + */ #define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */ #define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3) #define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3) @@ -87,7 +90,8 @@ /** * Lock is being replayed. This could probably be implied by the fact that - * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */ + * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. + */ #define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */ #define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8) #define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8) @@ -125,7 +129,8 @@ /** * Server told not to wait if blocked. For AGL, OST will not send glimpse - * callback. */ + * callback. + */ #define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */ #define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18) #define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18) @@ -141,7 +146,8 @@ * Immediately cancel such locks when they block some other locks. Send * cancel notification to original lock holder, but expect no reply. This * is for clients (like liblustre) that cannot be expected to reliably - * response to blocking AST. */ + * response to blocking AST. + */ #define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */ #define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23) #define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23) @@ -164,7 +170,8 @@ /** * Used for marking lock as a target for -EINTR while cp_ast sleep emulation - * + race with upcoming bl_ast. */ + * + race with upcoming bl_ast. + */ #define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */ #define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32) #define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32) @@ -172,7 +179,8 @@ /** * Used while processing the unused list to know that we have already - * handled this lock and decided to skip it. */ + * handled this lock and decided to skip it. + */ #define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */ #define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33) #define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33) @@ -231,7 +239,8 @@ * The proper fix is to do the granting inside of the completion AST, * which can be replaced with a LVB-aware wrapping function for OSC locks. * That change is pretty high-risk, though, and would need a lot more - * testing. */ + * testing. + */ #define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */ #define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41) #define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41) @@ -243,7 +252,8 @@ * dirty pages. It can remain on the granted list during this whole time. * Threads racing to update the KMS after performing their writeback need * to know to exclude each other's locks from the calculation as they walk - * the granted list. */ + * the granted list. + */ #define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */ #define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42) #define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42) @@ -263,7 +273,8 @@ /** * optimization hint: LDLM can run blocking callback from current context - * w/o involving separate thread. in order to decrease cs rate */ + * w/o involving separate thread. in order to decrease cs rate + */ #define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */ #define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45) #define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45) @@ -280,7 +291,8 @@ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is * dropped to let ldlm_callback_handler() return EINVAL to the server. It * is used when ELC RPC is already prepared and is waiting for rpc_lock, - * too late to send a separate CANCEL RPC. */ + * too late to send a separate CANCEL RPC. + */ #define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */ #define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46) #define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46) @@ -295,7 +307,8 @@ /** * Don't put lock into the LRU list, so that it is not canceled due * to aging. Used by MGC locks, they are cancelled only at unmount or - * by callback. */ + * by callback. + */ #define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */ #define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48) #define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48) @@ -304,7 +317,8 @@ /** * Set for locks that failed and where the server has been notified. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */ #define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49) #define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49) @@ -315,7 +329,8 @@ * be destroyed when last reference to them is released. Set by * ldlm_lock_destroy_internal(). * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */ #define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50) #define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50) @@ -333,7 +348,8 @@ * NB: compared with check_res_locked(), checking this bit is cheaper. * Also, spin_is_locked() is deprecated for kernel code; one reason is * because it works only for SMP so user needs to add extra macros like - * LASSERT_SPIN_LOCKED for uniprocessor kernels. */ + * LASSERT_SPIN_LOCKED for uniprocessor kernels. + */ #define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */ #define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52) #define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52) @@ -343,7 +359,8 @@ * It's set once we call ldlm_add_waiting_lock_res_locked() to start the * lock-timeout timer and it will never be reset. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */ #define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53) #define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53) @@ -365,10 +382,10 @@ #define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) /** set a ldlm_lock flag bit */ -#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b)) +#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b)) /** clear a ldlm_lock flag bit */ -#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b)) +#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b)) /** Mask of flags inherited from parent lock when doing intents. */ #define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h index 311e5aa9b..3014d27e6 100644 --- a/drivers/staging/lustre/lustre/include/lustre_export.h +++ b/drivers/staging/lustre/lustre/include/lustre_export.h @@ -50,62 +50,6 @@ #include "lustre/lustre_idl.h" #include "lustre_dlm.h" -struct mds_client_data; -struct mdt_client_data; -struct mds_idmap_table; -struct mdt_idmap_table; - -/** - * Target-specific export data - */ -struct tg_export_data { - /** Protects led_lcd below */ - struct mutex ted_lcd_lock; - /** Per-client data for each export */ - struct lsd_client_data *ted_lcd; - /** Offset of record in last_rcvd file */ - loff_t ted_lr_off; - /** Client index in last_rcvd file */ - int ted_lr_idx; -}; - -/** - * MDT-specific export data - */ -struct mdt_export_data { - struct tg_export_data med_ted; - /** List of all files opened by client on this MDT */ - struct list_head med_open_head; - spinlock_t med_open_lock; /* med_open_head, mfd_list */ - /** Bitmask of all ibit locks this MDT understands */ - __u64 med_ibits_known; - struct mutex med_idmap_mutex; - struct lustre_idmap_table *med_idmap; -}; - -struct ec_export_data { /* echo client */ - struct list_head eced_locks; -}; - -/* In-memory access to client data from OST struct */ -/** Filter (oss-side) specific import data */ -struct filter_export_data { - struct tg_export_data fed_ted; - spinlock_t fed_lock; /**< protects fed_mod_list */ - long fed_dirty; /* in bytes */ - long fed_grant; /* in bytes */ - struct list_head fed_mod_list; /* files being modified */ - int fed_mod_count;/* items in fed_writing list */ - long fed_pending; /* bytes just being written */ - __u32 fed_group; - __u8 fed_pagesize; /* log2 of client page size */ -}; - -struct mgs_export_data { - struct list_head med_clients; /* mgc fs client via this exp */ - spinlock_t med_lock; /* protect med_clients */ -}; - enum obd_option { OBD_OPT_FORCE = 0x0001, OBD_OPT_FAILOVER = 0x0002, @@ -179,7 +123,8 @@ struct obd_export { */ spinlock_t exp_lock; /** Compatibility flags for this export are embedded into - * exp_connect_data */ + * exp_connect_data + */ struct obd_connect_data exp_connect_data; enum obd_option exp_flags; unsigned long exp_failed:1, @@ -200,22 +145,8 @@ struct obd_export { /** blocking dlm lock list, protected by exp_bl_list_lock */ struct list_head exp_bl_list; spinlock_t exp_bl_list_lock; - - /** Target specific data */ - union { - struct tg_export_data eu_target_data; - struct mdt_export_data eu_mdt_data; - struct filter_export_data eu_filter_data; - struct ec_export_data eu_ec_data; - struct mgs_export_data eu_mgs_data; - } u; }; -#define exp_target_data u.eu_target_data -#define exp_mdt_data u.eu_mdt_data -#define exp_filter_data u.eu_filter_data -#define exp_ec_data u.eu_ec_data - static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp) { return &exp->exp_connect_data.ocd_connect_flags; @@ -228,7 +159,6 @@ static inline __u64 exp_connect_flags(struct obd_export *exp) static inline int exp_max_brw_size(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE) return exp->exp_connect_data.ocd_brw_size; @@ -242,19 +172,16 @@ static inline int exp_connect_multibulk(struct obd_export *exp) static inline int exp_connect_cancelset(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET); } static inline int exp_connect_lru_resize(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE); } static inline int exp_connect_rmtclient(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT); } @@ -268,14 +195,11 @@ static inline int client_is_remote(struct obd_export *exp) static inline int exp_connect_vbr(struct obd_export *exp) { - LASSERT(exp != NULL); - LASSERT(exp->exp_connection); return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR); } static inline int exp_connect_som(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM); } @@ -288,7 +212,6 @@ static inline int imp_connect_lru_resize(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE); } @@ -300,7 +223,6 @@ static inline int exp_connect_layout(struct obd_export *exp) static inline bool exp_connect_lvb_type(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE) return true; else @@ -311,7 +233,6 @@ static inline bool imp_connect_lvb_type(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE) return true; @@ -331,13 +252,19 @@ static inline bool imp_connect_disp_stripe(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE; } struct obd_export *class_conn2export(struct lustre_handle *conn); +#define KKUC_CT_DATA_MAGIC 0x092013cea +struct kkuc_ct_data { + __u32 kcd_magic; + struct obd_uuid kcd_uuid; + __u32 kcd_archive; +}; + /** @} export */ #endif /* __EXPORT_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h index 9b1a9c695..ab4a92390 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fid.h +++ b/drivers/staging/lustre/lustre/include/lustre_fid.h @@ -251,7 +251,8 @@ static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid) /* For new FS (>= 2.4), the root FID will be changed to * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4), - * the root FID will still be IGIF */ + * the root FID will still be IGIF + */ static inline int fid_is_root(const struct lu_fid *fid) { return unlikely((fid_seq(fid) == FID_SEQ_ROOT && @@ -294,7 +295,8 @@ static inline int fid_is_namespace_visible(const struct lu_fid *fid) const __u64 seq = fid_seq(fid); /* Here, we cannot distinguish whether the normal FID is for OST - * object or not. It is caller's duty to check more if needed. */ + * object or not. It is caller's duty to check more if needed. + */ return (!fid_is_last_id(fid) && (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) || fid_is_root(fid) || fid_is_dot_lustre(fid); @@ -433,7 +435,7 @@ fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res) */ static inline struct ldlm_res_id * fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid, - struct ldlm_res_id *res) + struct ldlm_res_id *res) { fid_build_reg_res_name(glb_fid, res); res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid); @@ -516,7 +518,8 @@ static inline int ostid_res_name_eq(struct ost_id *oi, struct ldlm_res_id *name) { /* Note: it is just a trick here to save some effort, probably the - * correct way would be turn them into the FID and compare */ + * correct way would be turn them into the FID and compare + */ if (fid_seq_is_mdt0(ostid_seq(oi))) { return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) && name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi); @@ -589,12 +592,14 @@ static inline __u64 fid_flatten(const struct lu_fid *fid) static inline __u32 fid_hash(const struct lu_fid *f, int bits) { /* all objects with same id and different versions will belong to same - * collisions list. */ + * collisions list. + */ return hash_long(fid_flatten(f), bits); } /** - * map fid to 32 bit value for ino on 32bit systems. */ + * map fid to 32 bit value for ino on 32bit systems. + */ static inline __u32 fid_flatten32(const struct lu_fid *fid) { __u32 ino; @@ -611,7 +616,8 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid) * that inodes generated at about the same time have a reduced chance * of collisions. This will give a period of 2^12 = 1024 unique clients * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. */ + * (from OID), or up to 128M inodes without collisions for new files. + */ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + (seq >> (64 - (40-8)) & 0xffffff00) + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h index 551162624..4cf2b0e61 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fld.h +++ b/drivers/staging/lustre/lustre/include/lustre_fld.h @@ -71,50 +71,41 @@ struct lu_fld_target { struct lu_server_fld { /** * super sequence controller export, needed to forward fld - * lookup request. */ + * lookup request. + */ struct obd_export *lsf_control_exp; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lsf_cache; - /** - * Protect index modifications */ + /** Protect index modifications */ struct mutex lsf_lock; - /** - * Fld service name in form "fld-srv-lustre-MDTXXX" */ + /** Fld service name in form "fld-srv-lustre-MDTXXX" */ char lsf_name[LUSTRE_MDT_MAXNAMELEN]; }; struct lu_client_fld { - /** - * Client side debugfs entry. */ + /** Client side debugfs entry. */ struct dentry *lcf_debugfs_entry; - /** - * List of exports client FLD knows about. */ + /** List of exports client FLD knows about. */ struct list_head lcf_targets; - /** - * Current hash to be used to chose an export. */ + /** Current hash to be used to chose an export. */ struct lu_fld_hash *lcf_hash; - /** - * Exports count. */ + /** Exports count. */ int lcf_count; - /** - * Lock protecting exports list and fld_hash. */ + /** Lock protecting exports list and fld_hash. */ spinlock_t lcf_lock; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lcf_cache; - /** - * Client fld debugfs entry name. */ + /** Client fld debugfs entry name. */ char lcf_name[LUSTRE_MDT_MAXNAMELEN]; int lcf_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h index f39780ae4..27f169d2e 100644 --- a/drivers/staging/lustre/lustre/include/lustre_handles.h +++ b/drivers/staging/lustre/lustre/include/lustre_handles.h @@ -65,7 +65,8 @@ struct portals_handle_ops { * * Now you're able to assign the results of cookie2handle directly to an * ldlm_lock. If it's not at the top, you'll want to use container_of() - * to compute the start of the structure based on the handle field. */ + * to compute the start of the structure based on the handle field. + */ struct portals_handle { struct list_head h_link; __u64 h_cookie; diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h index 4e4230e94..dac2d84d8 100644 --- a/drivers/staging/lustre/lustre/include/lustre_import.h +++ b/drivers/staging/lustre/lustre/include/lustre_import.h @@ -292,7 +292,8 @@ struct obd_import { /* need IR MNE swab */ imp_need_mne_swab:1, /* import must be reconnected instead of - * chose new connection */ + * chosing new connection + */ imp_force_reconnect:1, /* import has tried to connect with server */ imp_connect_tried:1; diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h new file mode 100644 index 000000000..970610b6d --- /dev/null +++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h @@ -0,0 +1,55 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2013 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * + * Author: Nathan Rutman + * + * Kernel <-> userspace communication routines. + * The definitions below are used in the kernel and userspace. + */ + +#ifndef __LUSTRE_KERNELCOMM_H__ +#define __LUSTRE_KERNELCOMM_H__ + +/* For declarations shared with userspace */ +#include "uapi_kernelcomm.h" + +/* prototype for callback function on kuc groups */ +typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg); + +/* Kernel methods */ +int libcfs_kkuc_msg_put(struct file *fp, void *payload); +int libcfs_kkuc_group_put(unsigned int group, void *payload); +int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group, + void *data, size_t data_len); +int libcfs_kkuc_group_rem(int uid, unsigned int group); +int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, + void *cb_arg); + +#endif /* __LUSTRE_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 428469fec..f2223d558 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -153,9 +153,9 @@ struct obd_ioctl_data { /* buffers the kernel will treat as user pointers */ __u32 ioc_plen1; - char *ioc_pbuf1; + void __user *ioc_pbuf1; __u32 ioc_plen2; - char *ioc_pbuf2; + void __user *ioc_pbuf2; /* inline buffers for various arguments */ __u32 ioc_inllen1; @@ -252,8 +252,8 @@ static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data) #include "obd_support.h" /* function defined in lustre/obdclass//-module.c */ -int obd_ioctl_getdata(char **buf, int *len, void *arg); -int obd_ioctl_popdata(void *arg, void *data, int len); +int obd_ioctl_getdata(char **buf, int *len, void __user *arg); +int obd_ioctl_popdata(void __user *arg, void *data, int len); static inline void obd_ioctl_freedata(char *buf, int len) { @@ -365,10 +365,10 @@ static inline void obd_ioctl_freedata(char *buf, int len) /* OBD_IOC_LLOG_CATINFO is deprecated */ #define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) +/* #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */ #define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE) @@ -387,7 +387,8 @@ static inline void obd_ioctl_freedata(char *buf, int len) */ /* Until such time as we get_info the per-stripe maximum from the OST, - * we define this to be 2T - 4k, which is the ext3 maxbytes. */ + * we define this to be 2T - 4k, which is the ext3 maxbytes. + */ #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL /* Special values for remove LOV EA from disk */ @@ -540,7 +541,7 @@ do { \ l_add_wait(&wq, &__wait); \ \ /* Block all signals (just the non-fatal ones if no timeout). */ \ - if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \ + if (info->lwi_on_signal && (__timeout == 0 || __allow_intr)) \ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \ else \ __blocked = cfs_block_sigsinv(0); \ @@ -562,13 +563,13 @@ do { \ __timeout = cfs_time_sub(__timeout, \ cfs_time_sub(interval, remaining));\ if (__timeout == 0) { \ - if (info->lwi_on_timeout == NULL || \ + if (!info->lwi_on_timeout || \ info->lwi_on_timeout(info->lwi_cb_data)) { \ ret = -ETIMEDOUT; \ break; \ } \ /* Take signals after the timeout expires. */ \ - if (info->lwi_on_signal != NULL) \ + if (info->lwi_on_signal) \ (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\ } \ } \ @@ -578,7 +579,7 @@ do { \ if (condition) \ break; \ if (cfs_signal_pending()) { \ - if (info->lwi_on_signal != NULL && \ + if (info->lwi_on_signal && \ (__timeout == 0 || __allow_intr)) { \ if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \ info->lwi_on_signal(info->lwi_cb_data);\ diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h index f6d7aae3a..fcc5ebbce 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lite.h +++ b/drivers/staging/lustre/lustre/include/lustre_lite.h @@ -53,56 +53,8 @@ #define LL_MAX_BLKSIZE_BITS (22) #define LL_MAX_BLKSIZE (1UL<lrp_lock_mode = (cmd == OBD_BRW_READ) ? LCK_PR : LCK_PW; - params->lrp_brw_flags = 0; - - params->lrp_policy.l_extent.start = pos; - params->lrp_policy.l_extent.end = pos + len - 1; - /* - * for now O_APPEND always takes local locks. - */ - if (cmd == OBD_BRW_WRITE && (open_flags & O_APPEND)) { - params->lrp_policy.l_extent.start = 0; - params->lrp_policy.l_extent.end = OBD_OBJECT_EOF; - } else if (LIBLUSTRE_CLIENT && (connect_flags & OBD_CONNECT_SRVLOCK)) { - /* - * liblustre: OST-side locking for all non-O_APPEND - * reads/writes. - */ - params->lrp_lock_mode = LCK_NL; - params->lrp_brw_flags = OBD_BRW_SRVLOCK; - } else { - /* - * nothing special for the kernel. In the future llite may use - * OST-side locks for small writes into highly contended - * files. - */ - } - params->lrp_ast_flags = (open_flags & O_NONBLOCK) ? - LDLM_FL_BLOCK_NOWAIT : 0; -} - /* - * This is embedded into liblustre and llite super-blocks to keep track of + * This is embedded into llite super-blocks to keep track of * connect flags (capabilities) supported by all imports given mount is * connected to. */ diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h index e4fc8b5e1..49618e186 100644 --- a/drivers/staging/lustre/lustre/include/lustre_log.h +++ b/drivers/staging/lustre/lustre/include/lustre_log.h @@ -241,7 +241,8 @@ struct llog_ctxt { struct obd_llog_group *loc_olg; /* group containing that ctxt */ struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */ struct obd_import *loc_imp; /* to use in RPC's: can be backward - pointing import */ + * pointing import + */ struct llog_operations *loc_logops; struct llog_handle *loc_handle; struct mutex loc_mutex; /* protect loc_imp */ @@ -255,7 +256,7 @@ struct llog_ctxt { static inline int llog_handle2ops(struct llog_handle *loghandle, struct llog_operations **lop) { - if (loghandle == NULL || loghandle->lgh_logops == NULL) + if (!loghandle || !loghandle->lgh_logops) return -EINVAL; *lop = loghandle->lgh_logops; @@ -272,7 +273,7 @@ static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt) static inline void llog_ctxt_put(struct llog_ctxt *ctxt) { - if (ctxt == NULL) + if (!ctxt) return; LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON); CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt, @@ -294,7 +295,7 @@ static inline int llog_group_set_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] != NULL) { + if (olg->olg_ctxts[index]) { spin_unlock(&olg->olg_lock); return -EEXIST; } @@ -311,7 +312,7 @@ static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] == NULL) + if (!olg->olg_ctxts[index]) ctxt = NULL; else ctxt = llog_ctxt_get(olg->olg_ctxts[index]); @@ -335,7 +336,7 @@ static inline struct llog_ctxt *llog_get_context(struct obd_device *obd, static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index) { - return (olg->olg_ctxts[index] == NULL); + return (!olg->olg_ctxts[index]); } static inline int llog_ctxt_null(struct obd_device *obd, int index) @@ -354,7 +355,7 @@ static inline int llog_next_block(const struct lu_env *env, rc = llog_handle2ops(loghandle, &lop); if (rc) return rc; - if (lop->lop_next_block == NULL) + if (!lop->lop_next_block) return -EOPNOTSUPP; rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx, diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h index 3da373315..af77eb359 100644 --- a/drivers/staging/lustre/lustre/include/lustre_mdc.h +++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h @@ -81,8 +81,8 @@ static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck) static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; /* This would normally block until the existing request finishes. @@ -90,7 +90,8 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set * it will only be cleared when all fake requests are finished. * Only when all fake requests are finished can normal requests - * be sent, to ensure they are recoverable again. */ + * be sent, to ensure they are recoverable again. + */ again: mutex_lock(&lck->rpcl_mutex); @@ -105,22 +106,23 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * just turned off but there are still requests in progress. * Wait until they finish. It doesn't need to be efficient * in this extremely rare case, just have low overhead in - * the common case when it isn't true. */ + * the common case when it isn't true. + */ while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) { mutex_unlock(&lck->rpcl_mutex); schedule_timeout(cfs_time_seconds(1) / 4); goto again; } - LASSERT(lck->rpcl_it == NULL); + LASSERT(!lck->rpcl_it); lck->rpcl_it = it; } static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */ @@ -153,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp, if (cli->cl_max_mds_easize < body->max_mdsize) { cli->cl_max_mds_easize = body->max_mdsize; cli->cl_default_mds_easize = - min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); + min_t(__u32, body->max_mdsize, PAGE_SIZE); } if (cli->cl_max_mds_cookiesize < body->max_cookiesize) { cli->cl_max_mds_cookiesize = body->max_cookiesize; cli->cl_default_mds_cookiesize = - min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); + min_t(__u32, body->max_cookiesize, PAGE_SIZE); } } } diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index d834ddd81..69586a522 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -76,7 +76,8 @@ * In order for the client and server to properly negotiate the maximum * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two * value. The client is free to limit the actual RPC size for any bulk - * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */ + * transfer via cl_max_pages_per_rpc to some non-power-of-two value. + */ #define PTLRPC_BULK_OPS_BITS 2 #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) /** @@ -85,7 +86,8 @@ * protocol limitation on the maximum RPC size that can be used by any * RPC sent to that server in the future. Instead, the server should * use the negotiated per-client ocd_brw_size to determine the bulk - * RPC count. */ + * RPC count. + */ #define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1)) /** @@ -97,21 +99,21 @@ */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT) #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT) #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" # endif -# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" +# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE" # endif # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" @@ -419,16 +421,18 @@ struct ptlrpc_reply_state { /** A spinlock to protect the reply state flags */ spinlock_t rs_lock; /** Reply state flags */ - unsigned long rs_difficult:1; /* ACK/commit stuff */ + unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for - difficult requests */ + * difficult requests + */ unsigned long rs_scheduled:1; /* being handled? */ unsigned long rs_scheduled_ever:1;/* any schedule attempts? */ unsigned long rs_handled:1; /* been handled yet? */ unsigned long rs_on_net:1; /* reply_out_callback pending? */ unsigned long rs_prealloc:1; /* rs from prealloc list */ unsigned long rs_committed:1;/* the transaction was committed - * and the rs was dispatched */ + * and the rs was dispatched + */ /** Size of the state */ int rs_size; /** opcode */ @@ -463,7 +467,7 @@ struct ptlrpc_reply_state { /** Handles of locks awaiting client reply ACK */ struct lustre_handle rs_locks[RS_MAX_LOCKS]; /** Lock modes of locks in \a rs_locks */ - ldlm_mode_t rs_modes[RS_MAX_LOCKS]; + enum ldlm_mode rs_modes[RS_MAX_LOCKS]; }; struct ptlrpc_thread; @@ -1181,7 +1185,7 @@ struct nrs_fifo_req { * purpose of this object is to hold references to the request's resources * for the lifetime of the request, and to hold properties that policies use * use for determining the request's scheduling priority. - * */ + */ struct ptlrpc_nrs_request { /** * The request's resource hierarchy. @@ -1321,15 +1325,17 @@ struct ptlrpc_request { /* do not resend request on -EINPROGRESS */ rq_no_retry_einprogress:1, /* allow the req to be sent if the import is in recovery - * status */ + * status + */ rq_allow_replay:1; unsigned int rq_nr_resend; enum rq_phase rq_phase; /* one of RQ_PHASE_* */ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ - atomic_t rq_refcount;/* client-side refcount for SENT race, - server-side refcount for multiple replies */ + atomic_t rq_refcount; /* client-side refcount for SENT race, + * server-side refcount for multiple replies + */ /** Portal to which this request would be sent */ short rq_request_portal; /* XXX FIXME bug 249 */ @@ -1363,7 +1369,8 @@ struct ptlrpc_request { /** * security and encryption data - * @{ */ + * @{ + */ struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ struct list_head rq_ctx_chain; /**< link to waited ctx */ @@ -1477,7 +1484,8 @@ struct ptlrpc_request { /** when request must finish. volatile * so that servers' early reply updates to the deadline aren't - * kept in per-cpu cache */ + * kept in per-cpu cache + */ volatile time64_t rq_deadline; /** when req reply unlink must finish. */ time64_t rq_reply_deadline; @@ -1518,7 +1526,7 @@ struct ptlrpc_request { static inline int ptlrpc_req_interpret(const struct lu_env *env, struct ptlrpc_request *req, int rc) { - if (req->rq_interpret_reply != NULL) { + if (req->rq_interpret_reply) { req->rq_status = req->rq_interpret_reply(env, req, &req->rq_async_args, rc); @@ -1678,7 +1686,8 @@ do { \ /** * This is the debug print function you need to use to print request structure * content into lustre debug log. - * for most callers (level is a constant) this is resolved at compile time */ + * for most callers (level is a constant) this is resolved at compile time + */ #define DEBUG_REQ(level, req, fmt, args...) \ do { \ if ((level) & (D_ERROR | D_WARNING)) { \ @@ -1947,7 +1956,7 @@ struct ptlrpc_service_ops { * or general metadata service for MDS. */ struct ptlrpc_service { - /** serialize /proc operations */ + /** serialize sysfs operations */ spinlock_t srv_lock; /** most often accessed fields */ /** chain thru all services */ @@ -2101,7 +2110,8 @@ struct ptlrpc_service_part { /** NRS head for regular requests */ struct ptlrpc_nrs scp_nrs_reg; /** NRS head for HP requests; this is only valid for services that can - * handle HP requests */ + * handle HP requests + */ struct ptlrpc_nrs *scp_nrs_hp; /** AT stuff */ @@ -2141,8 +2151,8 @@ struct ptlrpc_service_part { #define ptlrpc_service_for_each_part(part, i, svc) \ for (i = 0; \ i < (svc)->srv_ncpts && \ - (svc)->srv_parts != NULL && \ - ((part) = (svc)->srv_parts[i]) != NULL; i++) + (svc)->srv_parts && \ + ((part) = (svc)->srv_parts[i]); i++) /** * Declaration of ptlrpcd control structure @@ -2259,7 +2269,6 @@ static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc, static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, const struct ptlrpc_nrs_pol_desc *desc) { - LASSERT(desc->pd_compat_svc_name != NULL); return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0; } @@ -2303,7 +2312,6 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) struct ptlrpc_bulk_desc *desc; int rc; - LASSERT(req != NULL); desc = req->rq_bulk; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && @@ -2374,14 +2382,14 @@ void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req); struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, const struct req_format *format); struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, - struct ptlrpc_request_pool *, - const struct req_format *format); + struct ptlrpc_request_pool *, + const struct req_format *); void ptlrpc_request_free(struct ptlrpc_request *request); int ptlrpc_request_pack(struct ptlrpc_request *request, __u32 version, int opcode); -struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, - const struct req_format *format, - __u32 version, int opcode); +struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *, + const struct req_format *, + __u32, int); int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx); @@ -2462,7 +2470,8 @@ struct ptlrpc_service_thr_conf { /* "soft" limit for total threads number */ unsigned int tc_nthrs_max; /* user specified threads number, it will be validated due to - * other members of this structure. */ + * other members of this structure. + */ unsigned int tc_nthrs_user; /* set NUMA node affinity for service threads */ unsigned int tc_cpu_affinity; @@ -2500,14 +2509,12 @@ struct ptlrpc_service_conf { */ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); -struct ptlrpc_service *ptlrpc_register_service( - struct ptlrpc_service_conf *conf, - struct kset *parent, - struct dentry *debugfs_entry); +struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf, + struct kset *parent, + struct dentry *debugfs_entry); int ptlrpc_start_threads(struct ptlrpc_service *svc); int ptlrpc_unregister_service(struct ptlrpc_service *service); -int liblustre_check_services(void *arg); int ptlrpc_hr_init(void); void ptlrpc_hr_fini(void); @@ -2536,7 +2543,7 @@ int ptlrpc_reconnect_import(struct obd_import *imp); int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, int index); void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - int index); + int index); int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len); @@ -2726,7 +2733,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { - if (req->rq_set == NULL) + if (!req->rq_set) wake_up(&req->rq_reply_waitq); else wake_up(&req->rq_set->set_waitq); @@ -2750,7 +2757,7 @@ ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) /* Should only be called once per req */ static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req) { - if (req->rq_reply_state == NULL) + if (!req->rq_reply_state) return; /* shouldn't occur */ ptlrpc_rs_decref(req->rq_reply_state); req->rq_reply_state = NULL; @@ -2807,7 +2814,6 @@ ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt) static inline struct ptlrpc_service * ptlrpc_req2svc(struct ptlrpc_request *req) { - LASSERT(req->rq_rqbd != NULL); return req->rq_rqbd->rqbd_svcpt->scp_service; } diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h index 46a662f89..b2e67fcf9 100644 --- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h +++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h @@ -107,8 +107,8 @@ void req_capsule_set_size(struct req_capsule *pill, const struct req_msg_field *field, enum req_location loc, int size); int req_capsule_get_size(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc); + const struct req_msg_field *field, + enum req_location loc); int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc); int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt, enum req_location loc); @@ -130,7 +130,6 @@ void req_layout_fini(void); extern struct req_format RQF_OBD_PING; extern struct req_format RQF_OBD_SET_INFO; extern struct req_format RQF_SEC_CTX; -extern struct req_format RQF_OBD_IDX_READ; /* MGS req_format */ extern struct req_format RQF_MGS_TARGET_REG; extern struct req_format RQF_MGS_SET_INFO; @@ -146,7 +145,6 @@ extern struct req_format RQF_MDS_GETSTATUS; extern struct req_format RQF_MDS_SYNC; extern struct req_format RQF_MDS_GETXATTR; extern struct req_format RQF_MDS_GETATTR; -extern struct req_format RQF_UPDATE_OBJ; /* * This is format of direct (non-intent) MDS_GETATTR_NAME request. @@ -177,7 +175,6 @@ extern struct req_format RQF_MDS_REINT_SETXATTR; extern struct req_format RQF_MDS_QUOTACHECK; extern struct req_format RQF_MDS_QUOTACTL; extern struct req_format RQF_QC_CALLBACK; -extern struct req_format RQF_QUOTA_DQACQ; extern struct req_format RQF_MDS_SWAP_LAYOUTS; /* MDS hsm formats */ extern struct req_format RQF_MDS_HSM_STATE_GET; @@ -220,7 +217,6 @@ extern struct req_format RQF_LDLM_INTENT_OPEN; extern struct req_format RQF_LDLM_INTENT_CREATE; extern struct req_format RQF_LDLM_INTENT_UNLINK; extern struct req_format RQF_LDLM_INTENT_GETXATTR; -extern struct req_format RQF_LDLM_INTENT_QUOTA; extern struct req_format RQF_LDLM_CANCEL; extern struct req_format RQF_LDLM_CALLBACK; extern struct req_format RQF_LDLM_CP_CALLBACK; @@ -252,7 +248,6 @@ extern struct req_msg_field RMF_SETINFO_KEY; extern struct req_msg_field RMF_GETINFO_VAL; extern struct req_msg_field RMF_GETINFO_VALLEN; extern struct req_msg_field RMF_GETINFO_KEY; -extern struct req_msg_field RMF_IDX_INFO; extern struct req_msg_field RMF_CLOSE_DATA; /* @@ -277,7 +272,6 @@ extern struct req_msg_field RMF_CAPA1; extern struct req_msg_field RMF_CAPA2; extern struct req_msg_field RMF_OBD_QUOTACHECK; extern struct req_msg_field RMF_OBD_QUOTACTL; -extern struct req_msg_field RMF_QUOTA_BODY; extern struct req_msg_field RMF_STRING; extern struct req_msg_field RMF_SWAP_LAYOUTS; extern struct req_msg_field RMF_MDS_HSM_PROGRESS; @@ -322,9 +316,6 @@ extern struct req_msg_field RMF_MGS_CONFIG_RES; /* generic uint32 */ extern struct req_msg_field RMF_U32; -/* OBJ update format */ -extern struct req_msg_field RMF_UPDATE; -extern struct req_msg_field RMF_UPDATE_REPLY; /** @} req_layout */ #endif /* _LUSTRE_REQ_LAYOUT_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h index dd1033be6..01b4e6726 100644 --- a/drivers/staging/lustre/lustre/include/lustre_sec.h +++ b/drivers/staging/lustre/lustre/include/lustre_sec.h @@ -351,26 +351,23 @@ struct ptlrpc_ctx_ops { /** * To determine whether it's suitable to use the \a ctx for \a vcred. */ - int (*match) (struct ptlrpc_cli_ctx *ctx, - struct vfs_cred *vcred); + int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred); /** * To bring the \a ctx uptodate. */ - int (*refresh) (struct ptlrpc_cli_ctx *ctx); + int (*refresh)(struct ptlrpc_cli_ctx *ctx); /** * Validate the \a ctx. */ - int (*validate) (struct ptlrpc_cli_ctx *ctx); + int (*validate)(struct ptlrpc_cli_ctx *ctx); /** * Force the \a ctx to die. */ - void (*force_die) (struct ptlrpc_cli_ctx *ctx, - int grace); - int (*display) (struct ptlrpc_cli_ctx *ctx, - char *buf, int bufsize); + void (*force_die)(struct ptlrpc_cli_ctx *ctx, int grace); + int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize); /** * Sign the request message using \a ctx. @@ -382,8 +379,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign(). */ - int (*sign) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Verify the reply message using \a ctx. @@ -395,8 +391,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify(). */ - int (*verify) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Encrypt the request message using \a ctx. @@ -408,8 +403,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_seal(). */ - int (*seal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Decrypt the reply message using \a ctx. @@ -421,8 +415,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_unseal(). */ - int (*unseal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Wrap bulk request data. This is called before wrapping RPC @@ -444,9 +437,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap bulk reply data. This is called after wrapping RPC @@ -461,9 +454,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; #define PTLRPC_CTX_NEW_BIT (0) /* newly created */ @@ -515,9 +508,9 @@ struct ptlrpc_sec_cops { * * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr(). */ - struct ptlrpc_sec * (*create_sec) (struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx, - struct sptlrpc_flavor *flavor); + struct ptlrpc_sec *(*create_sec)(struct obd_import *imp, + struct ptlrpc_svc_ctx *ctx, + struct sptlrpc_flavor *flavor); /** * Destructor of ptlrpc_sec. When called, refcount has been dropped @@ -525,7 +518,7 @@ struct ptlrpc_sec_cops { * * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr(). */ - void (*destroy_sec) (struct ptlrpc_sec *sec); + void (*destroy_sec)(struct ptlrpc_sec *sec); /** * Notify that this ptlrpc_sec is going to die. Optionally, policy @@ -534,7 +527,7 @@ struct ptlrpc_sec_cops { * * \see plain_kill_sec(), gss_sec_kill(). */ - void (*kill_sec) (struct ptlrpc_sec *sec); + void (*kill_sec)(struct ptlrpc_sec *sec); /** * Given \a vcred, lookup and/or create its context. The policy module @@ -544,10 +537,9 @@ struct ptlrpc_sec_cops { * * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr(). */ - struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec, - struct vfs_cred *vcred, - int create, - int remove_dead); + struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec, + struct vfs_cred *vcred, + int create, int remove_dead); /** * Called then the reference of \a ctx dropped to 0. The policy module @@ -559,9 +551,8 @@ struct ptlrpc_sec_cops { * * \see plain_release_ctx(), gss_sec_release_ctx_kr(). */ - void (*release_ctx) (struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx, - int sync); + void (*release_ctx)(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, + int sync); /** * Flush the context cache. @@ -573,11 +564,8 @@ struct ptlrpc_sec_cops { * * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr(). */ - int (*flush_ctx_cache) - (struct ptlrpc_sec *sec, - uid_t uid, - int grace, - int force); + int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid, + int grace, int force); /** * Called periodically by garbage collector to remove dead contexts @@ -585,7 +573,7 @@ struct ptlrpc_sec_cops { * * \see gss_sec_gc_ctx_kr(). */ - void (*gc_ctx) (struct ptlrpc_sec *sec); + void (*gc_ctx)(struct ptlrpc_sec *sec); /** * Given an context \a ctx, install a corresponding reverse service @@ -593,9 +581,8 @@ struct ptlrpc_sec_cops { * XXX currently it's only used by GSS module, maybe we should remove * this from general API. */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec, + struct ptlrpc_cli_ctx *ctx); /** * To allocate request buffer for \a req. @@ -608,9 +595,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf(). */ - int (*alloc_reqbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free request buffer for \a req. @@ -619,8 +605,7 @@ struct ptlrpc_sec_cops { * * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf(). */ - void (*free_reqbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To allocate reply buffer for \a req. @@ -632,9 +617,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf(). */ - int (*alloc_repbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free reply buffer for \a req. @@ -645,8 +629,7 @@ struct ptlrpc_sec_cops { * * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf(). */ - void (*free_repbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To expand the request buffer of \a req, thus the \a segment in @@ -658,15 +641,13 @@ struct ptlrpc_sec_cops { * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(), * gss_enlarge_reqbuf(). */ - int (*enlarge_reqbuf) - (struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int segment, int newsize); + int (*enlarge_reqbuf)(struct ptlrpc_sec *sec, + struct ptlrpc_request *req, + int segment, int newsize); /* * misc */ - int (*display) (struct ptlrpc_sec *sec, - struct seq_file *seq); + int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq); }; /** @@ -690,7 +671,7 @@ struct ptlrpc_sec_sops { * * \see null_accept(), plain_accept(), gss_svc_accept_kr(). */ - int (*accept) (struct ptlrpc_request *req); + int (*accept)(struct ptlrpc_request *req); /** * Perform security transformation upon reply message. @@ -702,15 +683,14 @@ struct ptlrpc_sec_sops { * * \see null_authorize(), plain_authorize(), gss_svc_authorize(). */ - int (*authorize) (struct ptlrpc_request *req); + int (*authorize)(struct ptlrpc_request *req); /** * Invalidate server context \a ctx. * * \see gss_svc_invalidate_ctx(). */ - void (*invalidate_ctx) - (struct ptlrpc_svc_ctx *ctx); + void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Allocate a ptlrpc_reply_state. @@ -724,28 +704,26 @@ struct ptlrpc_sec_sops { * * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs(). */ - int (*alloc_rs) (struct ptlrpc_request *req, - int msgsize); + int (*alloc_rs)(struct ptlrpc_request *req, int msgsize); /** * Free a ptlrpc_reply_state. */ - void (*free_rs) (struct ptlrpc_reply_state *rs); + void (*free_rs)(struct ptlrpc_reply_state *rs); /** * Release the server context \a ctx. * * \see gss_svc_free_ctx(). */ - void (*free_ctx) (struct ptlrpc_svc_ctx *ctx); + void (*free_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Install a reverse context based on the server context \a ctx. * * \see gss_svc_install_rctx_kr(). */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx); /** * Prepare buffer for incoming bulk write. @@ -755,24 +733,24 @@ struct ptlrpc_sec_sops { * * \see gss_svc_prep_bulk(). */ - int (*prep_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*prep_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap the bulk write data. * * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Wrap the bulk read data. * * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; struct ptlrpc_sec_policy { diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h index caa4da12f..64559a16f 100644 --- a/drivers/staging/lustre/lustre/include/lustre_ver.h +++ b/drivers/staging/lustre/lustre/include/lustre_ver.h @@ -1,26 +1,20 @@ #ifndef _LUSTRE_VER_H_ #define _LUSTRE_VER_H_ -/* This file automatically generated from lustre/include/lustre_ver.h.in, - * based on parameters in lustre/autoconf/lustre-version.ac. - * Changes made directly to this file will be lost. */ #define LUSTRE_MAJOR 2 -#define LUSTRE_MINOR 3 -#define LUSTRE_PATCH 64 +#define LUSTRE_MINOR 4 +#define LUSTRE_PATCH 60 #define LUSTRE_FIX 0 -#define LUSTRE_VERSION_STRING "2.3.64" +#define LUSTRE_VERSION_STRING "2.4.60" #define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \ LUSTRE_MINOR, LUSTRE_PATCH, \ LUSTRE_FIX) -/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches - * by this amount (set in lustre/autoconf/lustre-version.ac). */ -#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32) - -/* If lustre version of client and servers it connects to differs by more +/* + * If lustre version of client and servers it connects to differs by more * than this amount, client would issue a warning. - * (set in lustre/autoconf/lustre-version.ac) */ + */ #define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0) #endif diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index bcbe61301..4264d9765 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -90,7 +90,8 @@ struct lov_stripe_md { pid_t lsm_lock_owner; /* debugging */ /* maximum possible file size, might change as OSTs status changes, - * e.g. disconnected, deactivated */ + * e.g. disconnected, deactivated + */ __u64 lsm_maxbytes; struct { /* Public members. */ @@ -123,7 +124,7 @@ static inline bool lsm_is_released(struct lov_stripe_md *lsm) static inline bool lsm_has_objects(struct lov_stripe_md *lsm) { - if (lsm == NULL) + if (!lsm) return false; if (lsm_is_released(lsm)) return false; @@ -159,7 +160,8 @@ struct obd_info { /* An update callback which is called to update some data on upper * level. E.g. it is used for update lsm->lsm_oinfo at every received * request in osc level for enqueue requests. It is also possible to - * update some caller data from LOV layer if needed. */ + * update some caller data from LOV layer if needed. + */ obd_enqueue_update_f oi_cb_up; }; @@ -216,7 +218,6 @@ struct timeout_item { }; #define OSC_MAX_RIF_DEFAULT 8 -#define MDS_OSC_MAX_RIF_DEFAULT 50 #define OSC_MAX_RIF_MAX 256 #define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4) #define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */ @@ -241,7 +242,8 @@ struct client_obd { struct obd_import *cl_import; /* ptlrpc connection state */ int cl_conn_count; /* max_mds_easize is purely a performance thing so we don't have to - * call obd_size_diskmd() all the time. */ + * call obd_size_diskmd() all the time. + */ int cl_default_mds_easize; int cl_max_mds_easize; int cl_default_mds_cookiesize; @@ -261,7 +263,8 @@ struct client_obd { /* since we allocate grant by blocks, we don't know how many grant will * be used to add a page into cache. As a solution, we reserve maximum * grant before trying to dirty a page and unreserve the rest. - * See osc_{reserve|unreserve}_grant for details. */ + * See osc_{reserve|unreserve}_grant for details. + */ long cl_reserved_grant; struct list_head cl_cache_waiters; /* waiting for cache/grant */ unsigned long cl_next_shrink_grant; /* jiffies */ @@ -269,14 +272,16 @@ struct client_obd { int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ + * the extent size. A chunk is max(PAGE_SIZE, OST block size) + */ int cl_chunkbits; int cl_chunk; int cl_extent_tax; /* extent overhead, by bytes */ /* keep track of objects that have lois that contain pages which * have been queued for async brw. this lock also protects the - * lists of osc_client_pages that hang off of the loi */ + * lists of osc_client_pages that hang off of the loi + */ /* * ->cl_loi_list_lock protects consistency of * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and @@ -295,14 +300,14 @@ struct client_obd { * NB by Jinshan: though field names are still _loi_, but actually * osc_object{}s are in the list. */ - client_obd_lock_t cl_loi_list_lock; + struct client_obd_lock cl_loi_list_lock; struct list_head cl_loi_ready_list; struct list_head cl_loi_hp_ready_list; struct list_head cl_loi_write_list; struct list_head cl_loi_read_list; int cl_r_in_flight; int cl_w_in_flight; - /* just a sum of the loi/lop pending numbers to be exported by /proc */ + /* just a sum of the loi/lop pending numbers to be exported by sysfs */ atomic_t cl_pending_w_pages; atomic_t cl_pending_r_pages; __u32 cl_max_pages_per_rpc; @@ -322,7 +327,7 @@ struct client_obd { atomic_t cl_lru_shrinkers; atomic_t cl_lru_in_list; struct list_head cl_lru_list; /* lru page list */ - client_obd_lock_t cl_lru_list_lock; /* page list protector */ + struct client_obd_lock cl_lru_list_lock; /* page list protector */ /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ atomic_t cl_destroy_in_flight; @@ -340,7 +345,7 @@ struct client_obd { /* supported checksum types that are worked out at connect time */ __u32 cl_supp_cksum_types; /* checksum algorithm to be used */ - cksum_type_t cl_cksum_type; + enum cksum_type cl_cksum_type; /* also protected by the poorly named _loi_list_lock lock above */ struct osc_async_rc cl_ar; @@ -375,14 +380,12 @@ struct echo_client_obd { spinlock_t ec_lock; struct list_head ec_objects; struct list_head ec_locks; - int ec_nstripes; __u64 ec_unique; }; /* Generic subset of OSTs */ struct ost_pool { - __u32 *op_array; /* array of index of - lov_obd->lov_tgts */ + __u32 *op_array; /* array of index of lov_obd->lov_tgts */ unsigned int op_count; /* number of OSTs in the array */ unsigned int op_size; /* allocated size of lp_array */ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */ @@ -415,14 +418,16 @@ struct lov_qos { struct lov_qos_rr lq_rr; /* round robin qos data */ unsigned long lq_dirty:1, /* recalc qos data */ lq_same_space:1,/* the ost's all have approx. - the same space avail */ + * the same space avail + */ lq_reset:1, /* zero current penalties */ lq_statfs_in_progress:1; /* statfs op in progress */ /* qos statfs data */ struct lov_statfs_data *lq_statfs_data; - wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs - * requests completion */ + wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs + * requests completion + */ }; struct lov_tgt_desc { @@ -450,16 +455,16 @@ struct pool_desc { struct lov_qos_rr pool_rr; /* round robin qos */ struct hlist_node pool_hash; /* access by poolname */ struct list_head pool_list; /* serial access */ - struct dentry *pool_debugfs_entry; /* file in /proc */ + struct dentry *pool_debugfs_entry; /* file in debugfs */ struct obd_device *pool_lobd; /* obd of the lov/lod to which - * this pool belongs */ + * this pool belongs + */ }; struct lov_obd { struct lov_desc desc; struct lov_tgt_desc **lov_tgts; /* sparse array */ - struct ost_pool lov_packed; /* all OSTs in a packed - array */ + struct ost_pool lov_packed; /* all OSTs in a packed array */ struct mutex lov_lock; struct obd_connect_data lov_ocd; atomic_t lov_refcount; @@ -596,34 +601,6 @@ struct obd_trans_info { struct obd_uuid *oti_ost_uuid; }; -static inline void oti_init(struct obd_trans_info *oti, - struct ptlrpc_request *req) -{ - if (oti == NULL) - return; - memset(oti, 0, sizeof(*oti)); - - if (req == NULL) - return; - - oti->oti_xid = req->rq_xid; - /** VBR: take versions from request */ - if (req->rq_reqmsg != NULL && - lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) { - __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg); - - oti->oti_pre_version = pre_version ? pre_version[0] : 0; - oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg); - } - - /** called from mds_create_objects */ - if (req->rq_repmsg != NULL) - oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); - oti->oti_thread = req->rq_svc_thread; - if (req->rq_reqmsg != NULL) - oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg); -} - static inline void oti_alloc_cookies(struct obd_trans_info *oti, int num_cookies) { @@ -681,7 +658,7 @@ enum obd_notify_event { /* * Data structure used to pass obd_notify()-event to non-obd listeners (llite - * and liblustre being main examples). + * being main example). */ struct obd_notify_upcall { int (*onu_upcall)(struct obd_device *host, struct obd_device *watched, @@ -728,21 +705,23 @@ struct obd_device { unsigned long obd_attached:1, /* finished attach */ obd_set_up:1, /* finished setup */ obd_version_recov:1, /* obd uses version checking */ - obd_replayable:1, /* recovery is enabled; inform clients */ - obd_no_transno:1, /* no committed-transno notification */ + obd_replayable:1,/* recovery is enabled; inform clients */ + obd_no_transno:1, /* no committed-transno notification */ obd_no_recov:1, /* fail instead of retry messages */ obd_stopping:1, /* started cleanup */ obd_starting:1, /* started setup */ obd_force:1, /* cleanup with > 0 obd refcount */ - obd_fail:1, /* cleanup with failover */ - obd_async_recov:1, /* allow asynchronous orphan cleanup */ + obd_fail:1, /* cleanup with failover */ + obd_async_recov:1, /* allow asynchronous orphan cleanup */ obd_no_conn:1, /* deny new connections */ obd_inactive:1, /* device active/inactive - * (for /proc/status only!!) */ + * (for sysfs status only!!) + */ obd_no_ir:1, /* no imperative recovery. */ obd_process_conf:1; /* device is processing mgs config */ /* use separate field as it is set in interrupt to don't mess with - * protection of other bits using _bh lock */ + * protection of other bits using _bh lock + */ unsigned long obd_recovery_expired:1; /* uuid-export hash body */ struct cfs_hash *obd_uuid_hash; @@ -935,7 +914,8 @@ struct md_op_data { __u32 op_npages; /* used to transfer info between the stacks of MD client - * see enum op_cli_flags */ + * see enum op_cli_flags + */ __u32 op_cli_flags; /* File object data version for HSM release, on client */ @@ -957,7 +937,7 @@ struct md_enqueue_info { struct lustre_handle mi_lockh; struct inode *mi_dir; int (*mi_cb)(struct ptlrpc_request *req, - struct md_enqueue_info *minfo, int rc); + struct md_enqueue_info *minfo, int rc); __u64 mi_cbdata; unsigned int mi_generation; }; @@ -965,7 +945,7 @@ struct md_enqueue_info { struct obd_ops { struct module *owner; int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg); + void *karg, void __user *uarg); int (*get_info)(const struct lu_env *env, struct obd_export *, __u32 keylen, void *key, __u32 *vallen, void *val, struct lov_stripe_md *lsm); @@ -987,7 +967,8 @@ struct obd_ops { /* connect to the target device with given connection * data. @ocd->ocd_connect_flags is modified to reflect flags actually * granted by the target, which are guaranteed to be a subset of flags - * asked for. If @ocd == NULL, use default parameters. */ + * asked for. If @ocd == NULL, use default parameters. + */ int (*connect)(const struct lu_env *env, struct obd_export **exp, struct obd_device *src, struct obd_uuid *cluuid, struct obd_connect_data *ocd, @@ -1083,7 +1064,8 @@ struct obd_ops { /* * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c. - * Also, add a wrapper function in include/linux/obd_class.h. */ + * Also, add a wrapper function in include/linux/obd_class.h. + */ }; enum { @@ -1189,14 +1171,14 @@ struct md_ops { struct obd_client_handle *); int (*set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *); - ldlm_mode_t (*lock_match)(struct obd_export *, __u64, - const struct lu_fid *, ldlm_type_t, - ldlm_policy_data_t *, ldlm_mode_t, - struct lustre_handle *); + enum ldlm_mode (*lock_match)(struct obd_export *, __u64, + const struct lu_fid *, enum ldlm_type, + ldlm_policy_data_t *, enum ldlm_mode, + struct lustre_handle *); int (*cancel_unused)(struct obd_export *, const struct lu_fid *, - ldlm_policy_data_t *, ldlm_mode_t, - ldlm_cancel_flags_t flags, void *opaque); + ldlm_policy_data_t *, enum ldlm_mode, + enum ldlm_cancel_flags flags, void *opaque); int (*get_remote_perm)(struct obd_export *, const struct lu_fid *, __u32, struct ptlrpc_request **); @@ -1224,9 +1206,9 @@ struct lsm_operations { void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *, u64 *); int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes, - __u16 *stripe_count); + __u16 *stripe_count); int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmm); + struct lov_mds_md *lmm); }; extern const struct lsm_operations lsm_v1_ops; @@ -1253,7 +1235,7 @@ static inline struct md_open_data *obd_mod_alloc(void) struct md_open_data *mod; mod = kzalloc(sizeof(*mod), GFP_NOFS); - if (mod == NULL) + if (!mod) return NULL; atomic_set(&mod->mod_refcount, 1); return mod; @@ -1300,7 +1282,7 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return false; /* caller does not care of idx */ - if (idx == NULL) + if (!idx) return true; /* volatile file, the MDT can be set from name */ @@ -1327,7 +1309,8 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return true; bad_format: /* bad format of mdt idx, we cannot return an error - * to caller so we use hash algo */ + * to caller so we use hash algo + */ CERROR("Bad volatile file name format: %s\n", name + LUSTRE_VOLATILE_HDR_LEN); return false; @@ -1335,8 +1318,7 @@ bad_format: static inline int cli_brw_size(struct obd_device *obd) { - LASSERT(obd != NULL); - return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT; } #endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h index 01db60405..637fa2211 100644 --- a/drivers/staging/lustre/lustre/include/obd_cksum.h +++ b/drivers/staging/lustre/lustre/include/obd_cksum.h @@ -37,7 +37,7 @@ #include "../../include/linux/libcfs/libcfs.h" #include "lustre/lustre_idl.h" -static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) +static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type) { switch (cksum_type) { case OBD_CKSUM_CRC32: @@ -63,8 +63,9 @@ static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) * In case of an unsupported types/flags we fall back to ADLER * because that is supported by all clients since 1.8 * - * In case multiple algorithms are supported the best one is used. */ -static inline u32 cksum_type_pack(cksum_type_t cksum_type) + * In case multiple algorithms are supported the best one is used. + */ +static inline u32 cksum_type_pack(enum cksum_type cksum_type) { unsigned int performance = 0, tmp; u32 flag = OBD_FL_CKSUM_ADLER; @@ -98,7 +99,7 @@ static inline u32 cksum_type_pack(cksum_type_t cksum_type) return flag; } -static inline cksum_type_t cksum_type_unpack(u32 o_flags) +static inline enum cksum_type cksum_type_unpack(u32 o_flags) { switch (o_flags & OBD_FL_CKSUM_ALL) { case OBD_FL_CKSUM_CRC32C: @@ -116,9 +117,9 @@ static inline cksum_type_t cksum_type_unpack(u32 o_flags) * 1.8 supported ADLER it is base and not depend on hw * Client uses all available local algos */ -static inline cksum_type_t cksum_types_supported_client(void) +static inline enum cksum_type cksum_types_supported_client(void) { - cksum_type_t ret = OBD_CKSUM_ADLER; + enum cksum_type ret = OBD_CKSUM_ADLER; CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n", cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)), @@ -139,14 +140,16 @@ static inline cksum_type_t cksum_types_supported_client(void) * Currently, calling cksum_type_pack() with a mask will return the fastest * checksum type due to its benchmarking at libcfs module load. * Caution is advised, however, since what is fastest on a single client may - * not be the fastest or most efficient algorithm on the server. */ -static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types) + * not be the fastest or most efficient algorithm on the server. + */ +static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types) { return cksum_type_unpack(cksum_type_pack(cksum_types)); } /* Checksum algorithm names. Must be defined in the same order as the - * OBD_CKSUM_* flags. */ + * OBD_CKSUM_* flags. + */ #define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"} #endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h index 97d803975..706869f8c 100644 --- a/drivers/staging/lustre/lustre/include/obd_class.h +++ b/drivers/staging/lustre/lustre/include/obd_class.h @@ -45,18 +45,22 @@ #include "lprocfs_status.h" #define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay - * and resends for avoid deadlocks */ + * and resends for avoid deadlocks + */ #define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update - * obd_osfs_age */ + * obd_osfs_age + */ #define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd * instead of a specific set. This * means that we cannot rely on the set * interpret routine to be called. * lov_statfs_fini() must thus be called - * by the request interpret routine */ + * by the request interpret routine + */ #define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving - * information from MDT0. */ -#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ + * information from MDT0. + */ +#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ /* OBD Device Declarations */ extern struct obd_device *obd_devs[MAX_OBD_DEVICES]; @@ -83,10 +87,10 @@ int class_name2dev(const char *name); struct obd_device *class_name2obd(const char *name); int class_uuid2dev(struct obd_uuid *uuid); struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid); + const char *typ_name, + struct obd_uuid *grp_uuid); struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, - int *next); + int *next); struct obd_device *class_num2obd(int num); int class_notify_sptlrpc_conf(const char *fsname, int namelen); @@ -160,8 +164,9 @@ struct config_llog_data { struct mutex cld_lock; int cld_type; unsigned int cld_stopping:1, /* we were told to stop - * watching */ - cld_lostlock:1; /* lock not requeued */ + * watching + */ + cld_lostlock:1; /* lock not requeued */ char cld_logname[0]; }; @@ -193,7 +198,7 @@ extern void (*class_export_dump_hook)(struct obd_export *); struct obd_export *class_export_get(struct obd_export *exp); void class_export_put(struct obd_export *exp); struct obd_export *class_new_export(struct obd_device *obddev, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); void class_unlink_export(struct obd_export *exp); struct obd_import *class_import_get(struct obd_import *); @@ -203,7 +208,7 @@ void class_destroy_import(struct obd_import *exp); void class_put_type(struct obd_type *type); int class_connect(struct lustre_handle *conn, struct obd_device *obd, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); int class_disconnect(struct obd_export *exp); void class_fail_export(struct obd_export *exp); int class_manual_cleanup(struct obd_device *obd); @@ -275,7 +280,8 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid); #define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op /* Ensure obd_setup: used for cleanup which must be called - while obd is stopping */ + * while obd is stopping + */ static inline int obd_check_dev(struct obd_device *obd) { if (!obd) { @@ -306,7 +312,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct obd_ops *)(0))->iocontrol)) #define OBD_COUNTER_INCREMENT(obdx, op) \ - if ((obdx)->obd_stats != NULL) { \ + if ((obdx)->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -315,7 +321,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -329,7 +335,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct md_ops *)(0))->getstatus)) #define MD_COUNTER_INCREMENT(obdx, op) \ - if ((obd)->md_stats != NULL) { \ + if ((obd)->md_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ @@ -338,24 +344,24 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_MD_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \ lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \ - if ((export)->exp_md_stats != NULL) \ + if ((export)->exp_md_stats) \ lprocfs_counter_incr( \ (export)->exp_md_stats, coffset); \ } #define EXP_CHECK_MD_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -379,11 +385,11 @@ do { \ #define EXP_CHECK_DT_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -467,7 +473,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg) DECLARE_LU_VARS(ldt, d); ldt = obd->obd_type->typ_lu; - if (ldt != NULL) { + if (ldt) { struct lu_context session_ctx; struct lu_env env; @@ -509,7 +515,7 @@ static inline int obd_precleanup(struct obd_device *obd, return rc; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { if (cleanup_stage == OBD_CLEANUP_EXPORTS) { struct lu_env env; @@ -538,7 +544,7 @@ static inline int obd_cleanup(struct obd_device *obd) ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -558,7 +564,8 @@ static inline int obd_cleanup(struct obd_device *obd) static inline void obd_cleanup_client_import(struct obd_device *obd) { /* If we set up but never connected, the - client import will not have been cleaned. */ + * client import will not have been cleaned. + */ down_write(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) { struct obd_import *imp; @@ -586,7 +593,7 @@ obd_process_config(struct obd_device *obd, int datalen, void *data) obd->obd_process_conf = 1; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -674,7 +681,7 @@ static inline int obd_alloc_memmd(struct obd_export *exp, struct lov_stripe_md **mem_tgt) { LASSERT(mem_tgt); - LASSERT(*mem_tgt == NULL); + LASSERT(!*mem_tgt); return obd_unpackmd(exp, mem_tgt, NULL, 0); } @@ -767,7 +774,7 @@ static inline int obd_setattr_rqset(struct obd_export *exp, EXP_COUNTER_INCREMENT(exp, setattr_async); set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set); @@ -778,7 +785,8 @@ static inline int obd_setattr_rqset(struct obd_export *exp, } /* This adds all the requests into @set if @set != NULL, otherwise - all requests are sent asynchronously without waiting for response. */ + * all requests are sent asynchronously without waiting for response. + */ static inline int obd_setattr_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, @@ -848,7 +856,8 @@ static inline int obd_connect(const struct lu_env *env, { int rc; __u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition - * check */ + * check + */ rc = obd_check_dev_active(obd); if (rc) @@ -858,7 +867,7 @@ static inline int obd_connect(const struct lu_env *env, rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata); /* check that only subset is granted */ - LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) == + LASSERT(ergo(data, (data->ocd_connect_flags & ocf) == data->ocd_connect_flags)); return rc; } @@ -871,8 +880,7 @@ static inline int obd_reconnect(const struct lu_env *env, void *localdata) { int rc; - __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition - * check */ + __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */ rc = obd_check_dev_active(obd); if (rc) @@ -882,8 +890,7 @@ static inline int obd_reconnect(const struct lu_env *env, rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata); /* check that only subset is granted */ - LASSERT(ergo(d != NULL, - (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); + LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); return rc; } @@ -998,7 +1005,7 @@ static inline int obd_init_export(struct obd_export *exp) { int rc = 0; - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, init_export)) rc = OBP(exp->exp_obd, init_export)(exp); return rc; @@ -1006,7 +1013,7 @@ static inline int obd_init_export(struct obd_export *exp) static inline int obd_destroy_export(struct obd_export *exp) { - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, destroy_export)) OBP(exp->exp_obd, destroy_export)(exp); return 0; @@ -1014,7 +1021,8 @@ static inline int obd_destroy_export(struct obd_export *exp) /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs_async(struct obd_export *exp, struct obd_info *oinfo, __u64 max_age, @@ -1023,7 +1031,7 @@ static inline int obd_statfs_async(struct obd_export *exp, int rc = 0; struct obd_device *obd; - if (exp == NULL || exp->exp_obd == NULL) + if (!exp || !exp->exp_obd) return -EINVAL; obd = exp->exp_obd; @@ -1059,7 +1067,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp, int rc = 0; set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; oinfo.oi_osfs = osfs; @@ -1073,7 +1081,8 @@ static inline int obd_statfs_rqset(struct obd_export *exp, /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_statfs *osfs, __u64 max_age, __u32 flags) @@ -1081,7 +1090,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; struct obd_device *obd = exp->exp_obd; - if (obd == NULL) + if (!obd) return -EINVAL; OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP); @@ -1155,7 +1164,7 @@ static inline int obd_adjust_kms(struct obd_export *exp, } static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void *uarg) + int len, void *karg, void __user *uarg) { int rc; @@ -1205,9 +1214,10 @@ static inline int obd_notify(struct obd_device *obd, return rc; /* the check for async_recov is a complete hack - I'm hereby - overloading the meaning to also mean "this was called from - mds_postsetup". I know that my mds is able to handle notifies - by this point, and it needs to get them to execute mds_postrecov. */ + * overloading the meaning to also mean "this was called from + * mds_postsetup". I know that my mds is able to handle notifies + * by this point, and it needs to get them to execute mds_postrecov. + */ if (!obd->obd_set_up && !obd->obd_async_recov) { CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name); return -EINVAL; @@ -1241,7 +1251,7 @@ static inline int obd_notify_observer(struct obd_device *observer, * Also, call non-obd listener, if any */ onu = &observer->obd_upcall; - if (onu->onu_upcall != NULL) + if (onu->onu_upcall) rc2 = onu->onu_upcall(observer, observed, ev, onu->onu_owner, NULL); else @@ -1287,7 +1297,7 @@ static inline int obd_health_check(const struct lu_env *env, int rc; /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */ - if (obd == NULL || !OBT(obd)) { + if (!obd || !OBT(obd)) { CERROR("cleaned up obd\n"); return -EOPNOTSUPP; } @@ -1318,57 +1328,6 @@ static inline int obd_register_observer(struct obd_device *obd, return 0; } -#if 0 -static inline int obd_register_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb, - obd_pin_extent_cb pin_cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb); - - rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb); - return rc; -} - -static inline int obd_unregister_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb); - - rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb); - return rc; -} - -static inline int obd_register_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb); - - rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb); - return rc; -} - -static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb); - - rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb); - return rc; -} -#endif - /* metadata helpers */ static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid) { @@ -1392,7 +1351,7 @@ static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data, } static inline int md_null_inode(struct obd_export *exp, - const struct lu_fid *fid) + const struct lu_fid *fid) { int rc; @@ -1657,8 +1616,8 @@ static inline int md_set_lock_data(struct obd_export *exp, static inline int md_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { int rc; @@ -1671,12 +1630,12 @@ static inline int md_cancel_unused(struct obd_export *exp, return rc; } -static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, - ldlm_type_t type, - ldlm_policy_data_t *policy, - ldlm_mode_t mode, - struct lustre_handle *lockh) +static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh) { EXP_CHECK_MD_OP(exp, lock_match); EXP_MD_COUNTER_INCREMENT(exp, lock_match); @@ -1759,7 +1718,8 @@ struct lwp_register_item { /* I'm as embarrassed about this as you are. * * // XXX do not look into _superhack with remaining eye - * // XXX if this were any uglier, I'd get my own show on MTV */ + * // XXX if this were any uglier, I'd get my own show on MTV + */ extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c); /* obd_mount.c */ @@ -1774,7 +1734,7 @@ void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out); /* lustre_peer.c */ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index); int class_add_uuid(const char *uuid, __u64 nid); -int class_del_uuid (const char *uuid); +int class_del_uuid(const char *uuid); int class_check_uuid(struct obd_uuid *uuid, __u64 nid); void class_init_uuidlist(void); void class_exit_uuidlist(void); diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index d031437c0..f8ee3a325 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -47,7 +47,8 @@ extern unsigned int obd_debug_peer_on_timeout; extern unsigned int obd_dump_on_timeout; extern unsigned int obd_dump_on_eviction; /* obd_timeout should only be used for recovery, not for - networking / disk / timings affected by load (use Adaptive Timeouts) */ + * networking / disk / timings affected by load (use Adaptive Timeouts) + */ extern unsigned int obd_timeout; /* seconds */ extern unsigned int obd_timeout_set; extern unsigned int at_min; @@ -104,18 +105,21 @@ extern char obd_jobid_var[]; * failover targets the client only pings one server at a time, and pings * can be lost on a loaded network. Since eviction has serious consequences, * and there's no urgent need to evict a client just because it's idle, we - * should be very conservative here. */ + * should be very conservative here. + */ #define PING_EVICT_TIMEOUT (PING_INTERVAL * 6) #define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */ #define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */ - /* Max connect interval for nonresponsive servers; ~50s to avoid building up - connect requests in the LND queues, but within obd_timeout so we don't - miss the recovery window */ +/* Max connect interval for nonresponsive servers; ~50s to avoid building up + * connect requests in the LND queues, but within obd_timeout so we don't + * miss the recovery window + */ #define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout)) #define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */ /* In general this should be low to have quick detection of a system - running on a backup server. (If it's too low, import_select_connection - will increase the timeout anyhow.) */ + * running on a backup server. (If it's too low, import_select_connection + * will increase the timeout anyhow.) + */ #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20) /* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */ #define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \ @@ -496,7 +500,7 @@ extern char obd_jobid_var[]; #ifdef POISON_BULK #define POISON_PAGE(page, val) do { \ - memset(kmap(page), val, PAGE_CACHE_SIZE); \ + memset(kmap(page), val, PAGE_SIZE); \ kunmap(page); \ } while (0) #else @@ -507,7 +511,6 @@ extern char obd_jobid_var[]; do { \ struct portals_handle *__h = (handle); \ \ - LASSERT(handle != NULL); \ __h->h_cookie = (unsigned long)(ptr); \ __h->h_size = (size); \ call_rcu(&__h->h_rcu, class_handle_free_cb); \ diff --git a/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h new file mode 100644 index 000000000..5e998362e --- /dev/null +++ b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h @@ -0,0 +1,94 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2013, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * + * Author: Nathan Rutman + * + * Kernel <-> userspace communication routines. + * The definitions below are used in the kernel and userspace. + */ + +#ifndef __UAPI_KERNELCOMM_H__ +#define __UAPI_KERNELCOMM_H__ + +#include + +/* KUC message header. + * All current and future KUC messages should use this header. + * To avoid having to include Lustre headers from libcfs, define this here. + */ +struct kuc_hdr { + __u16 kuc_magic; + /* Each new Lustre feature should use a different transport */ + __u8 kuc_transport; + __u8 kuc_flags; + /* Message type or opcode, transport-specific */ + __u16 kuc_msgtype; + /* Including header */ + __u16 kuc_msglen; +} __aligned(sizeof(__u64)); + +#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE) + +#define KUC_MAGIC 0x191C /*Lustre9etLinC */ + +/* kuc_msgtype values are defined in each transport */ +enum kuc_transport_type { + KUC_TRANSPORT_GENERIC = 1, + KUC_TRANSPORT_HSM = 2, + KUC_TRANSPORT_CHANGELOG = 3, +}; + +enum kuc_generic_message_type { + KUC_MSG_SHUTDOWN = 1, +}; + +/* KUC Broadcast Groups. This determines which userspace process hears which + * messages. Mutliple transports may be used within a group, or multiple + * groups may use the same transport. Broadcast + * groups need not be used if e.g. a UID is specified instead; + * use group 0 to signify unicast. + */ +#define KUC_GRP_HSM 0x02 +#define KUC_GRP_MAX KUC_GRP_HSM + +#define LK_FLG_STOP 0x01 +#define LK_NOFD -1U + +/* kernelcomm control structure, passed from userspace to kernel */ +struct lustre_kernelcomm { + __u32 lk_wfd; + __u32 lk_rfd; + __u32 lk_uid; + __u32 lk_group; + __u32 lk_data; + __u32 lk_flags; +} __packed; + +#endif /* __UAPI_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c index 8533a1e53..c4e8a0878 100644 --- a/drivers/staging/lustre/lustre/lclient/glimpse.c +++ b/drivers/staging/lustre/lustre/lclient/glimpse.c @@ -109,7 +109,8 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, * if there were no conflicting locks. If there * were conflicting locks, enqueuing or waiting * fails with -ENAVAIL, but valid inode - * attributes are returned anyway. */ + * attributes are returned anyway. + */ *descr = whole_file; descr->cld_obj = clob; descr->cld_mode = CLM_PHANTOM; diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c index 34dde7ded..96141d17d 100644 --- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c +++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c @@ -116,8 +116,8 @@ void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key) { struct ccc_thread_info *info; - info = kmem_cache_alloc(ccc_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -135,8 +135,8 @@ void *ccc_session_key_init(const struct lu_context *ctx, { struct ccc_session *session; - session = kmem_cache_alloc(ccc_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } @@ -173,7 +173,7 @@ int ccc_device_init(const struct lu_env *env, struct lu_device *d, vdv = lu2ccc_dev(d); vdv->cdv_next = lu2cl_dev(next); - LASSERT(d->ld_site != NULL && next->ld_type != NULL); + LASSERT(d->ld_site && next->ld_type); next->ld_site = d->ld_site; rc = next->ld_type->ldt_ops->ldto_device_init( env, next, next->ld_type->ldt_name, NULL); @@ -211,12 +211,12 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env, vdv->cdv_cl.cd_ops = clops; site = kzalloc(sizeof(*site), GFP_NOFS); - if (site != NULL) { + if (site) { rc = cl_site_init(site, &vdv->cdv_cl); if (rc == 0) rc = lu_site_init_finish(&site->cs_lu); else { - LASSERT(lud->ld_site == NULL); + LASSERT(!lud->ld_site); CERROR("Cannot init lu_site, rc %d.\n", rc); kfree(site); } @@ -236,7 +236,7 @@ struct lu_device *ccc_device_free(const struct lu_env *env, struct cl_site *site = lu2cl_site(d->ld_site); struct lu_device *next = cl2lu_dev(vdv->cdv_next); - if (d->ld_site != NULL) { + if (d->ld_site) { cl_site_fini(site); kfree(site); } @@ -251,8 +251,8 @@ int ccc_req_init(const struct lu_env *env, struct cl_device *dev, struct ccc_req *vrq; int result; - vrq = kmem_cache_alloc(ccc_req_kmem, GFP_NOFS | __GFP_ZERO); - if (vrq != NULL) { + vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS); + if (vrq) { cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops); result = 0; } else @@ -304,7 +304,7 @@ out_kmem: void ccc_global_fini(struct lu_device_type *device_type) { - if (ccc_inode_fini_env != NULL) { + if (ccc_inode_fini_env) { cl_env_put(ccc_inode_fini_env, &dummy_refcheck); ccc_inode_fini_env = NULL; } @@ -327,8 +327,8 @@ struct lu_object *ccc_object_alloc(const struct lu_env *env, struct ccc_object *vob; struct lu_object *obj; - vob = kmem_cache_alloc(ccc_object_kmem, GFP_NOFS | __GFP_ZERO); - if (vob != NULL) { + vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS); + if (vob) { struct cl_object_header *hdr; obj = ccc2lu(vob); @@ -365,7 +365,7 @@ int ccc_object_init(const struct lu_env *env, struct lu_object *obj, under = &dev->cdv_next->cd_lu_dev; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below != NULL) { + if (below) { const struct cl_object_conf *cconf; cconf = lu2cl_conf(conf); @@ -396,8 +396,8 @@ int ccc_lock_init(const struct lu_env *env, CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - clk = kmem_cache_alloc(ccc_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (clk != NULL) { + clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS); + if (clk) { cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops); result = 0; } else @@ -613,7 +613,8 @@ void ccc_lock_state(const struct lu_env *env, * stale i_size when doing appending writes and effectively * cancel the result of the truncate. Getting the * ll_inode_size_lock() after the enqueue maintains the DLM - * -> ll_inode_size_lock() acquiring order. */ + * -> ll_inode_size_lock() acquiring order. + */ if (lock->cll_descr.cld_start == 0 && lock->cll_descr.cld_end == CL_PAGE_EOF) cl_merge_lvb(env, inode); @@ -660,7 +661,7 @@ void ccc_io_update_iov(const struct lu_env *env, { size_t size = io->u.ci_rw.crw_count; - if (!cl_is_normalio(env, io) || cio->cui_iter == NULL) + if (!cl_is_normalio(env, io) || !cio->cui_iter) return; iov_iter_truncate(cio->cui_iter, size); @@ -749,16 +750,17 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, */ ccc_object_size_unlock(obj); result = cl_glimpse_lock(env, io, inode, obj, 0); - if (result == 0 && exceed != NULL) { + if (result == 0 && exceed) { /* If objective page index exceed end-of-file * page index, return directly. Do not expect * kernel will check such case correctly. * linux-2.6.18-128.1.1 miss to do that. - * --bug 17336 */ + * --bug 17336 + */ loff_t size = cl_isize_read(inode); - loff_t cur_index = start >> PAGE_CACHE_SHIFT; + loff_t cur_index = start >> PAGE_SHIFT; loff_t size_index = (size - 1) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if ((size == 0 && cur_index != 0) || size_index < cur_index) @@ -884,7 +886,8 @@ again: if (attr->ia_valid & ATTR_FILE) /* populate the file descriptor for ftruncate to honor - * group lock - see LU-787 */ + * group lock - see LU-787 + */ cio->cui_fd = cl_iattr2fd(inode, attr); result = cl_io_loop(env, io); @@ -896,7 +899,8 @@ again: goto again; /* HSM import case: file is released, cannot be restored * no need to fail except if restore registration failed - * with -ENODATA */ + * with -ENODATA + */ if (result == -ENODATA && io->ci_restore_needed && io->ci_result != -ENODATA) result = 0; @@ -985,17 +989,6 @@ struct inode *ccc_object_inode(const struct cl_object *obj) return cl2ccc(obj)->cob_inode; } -/** - * Returns a pointer to cl_page associated with \a vmpage, without acquiring - * additional reference to the resulting page. This is an unsafe version of - * cl_vmpage_page() that can only be used under vmpage lock. - */ -struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) -{ - KLASSERT(PageLocked(vmpage)); - return (struct cl_page *)vmpage->private; -} - /** * Initialize or update CLIO structures for regular files when new * meta-data arrives from the server. @@ -1033,11 +1026,12 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) fid = &lli->lli_fid; LASSERT(fid_is_sane(fid)); - if (lli->lli_clob == NULL) { + if (!lli->lli_clob) { /* clob is slave of inode, empty lli_clob means for new inode, * there is no clob in cache with the given fid, so it is * unnecessary to perform lookup-alloc-lookup-insert, just - * alloc and insert directly. */ + * alloc and insert directly. + */ LASSERT(inode->i_state & I_NEW); conf.coc_lu.loc_flags = LOC_F_NEW; clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), @@ -1109,7 +1103,7 @@ void cl_inode_fini(struct inode *inode) int refcheck; int emergency; - if (clob != NULL) { + if (clob) { void *cookie; cookie = cl_env_reenter(); @@ -1117,7 +1111,7 @@ void cl_inode_fini(struct inode *inode) emergency = IS_ERR(env); if (emergency) { mutex_lock(&ccc_inode_fini_guard); - LASSERT(ccc_inode_fini_env != NULL); + LASSERT(ccc_inode_fini_env); cl_env_implant(ccc_inode_fini_env, &refcheck); env = ccc_inode_fini_env; } @@ -1162,7 +1156,8 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent) } /** - * build inode number from passed @fid */ + * build inode number from passed @fid + */ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) { if (BITS_PER_LONG == 32 || api32) @@ -1173,7 +1168,8 @@ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) /** * build inode generation from passed @fid. If our FID overflows the 32-bit - * inode number then return a non-zero generation to distinguish them. */ + * inode number then return a non-zero generation to distinguish them. + */ __u32 cl_fid_build_gen(const struct lu_fid *fid) { __u32 gen; @@ -1194,7 +1190,8 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid) * have to wait for the refcount to become zero to destroy the older layout. * * Notice that the lsm returned by this function may not be valid unless called - * inside layout lock - MDS_INODELOCK_LAYOUT. */ + * inside layout lock - MDS_INODELOCK_LAYOUT. + */ struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode) { return lov_lsm_get(cl_i2info(inode)->lli_clob); diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c index 8389a0eda..d80bcedd7 100644 --- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c +++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c @@ -48,7 +48,8 @@ /* Initialize the default and maximum LOV EA and cookie sizes. This allows * us to make MDS RPCs with large enough reply buffers to hold the * maximum-sized (= maximum striped) EA and cookie without having to - * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */ + * calculate this (via a call into the LOV + OSCs) each time we make an RPC. + */ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) { struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 }; @@ -74,7 +75,8 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) cookiesize = stripes * sizeof(struct llog_cookie); /* default cookiesize is 0 because from 2.4 server doesn't send - * llog cookies to client. */ + * llog cookies to client. + */ CDEBUG(D_HA, "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n", def_easize, easize, cookiesize); diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c index a2ea8e5b9..323060626 100644 --- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c +++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c @@ -49,13 +49,11 @@ enum { static inline int node_is_left_child(struct interval_node *node) { - LASSERT(node->in_parent != NULL); return node == node->in_parent->in_left; } static inline int node_is_right_child(struct interval_node *node) { - LASSERT(node->in_parent != NULL); return node == node->in_parent->in_right; } @@ -135,7 +133,8 @@ static void __rotate_change_maxhigh(struct interval_node *node, /* The left rotation "pivots" around the link from node to node->right, and * - node will be linked to node->right's left child, and - * - node->right's left child will be linked to node's right child. */ + * - node->right's left child will be linked to node's right child. + */ static void __rotate_left(struct interval_node *node, struct interval_node **root) { @@ -164,7 +163,8 @@ static void __rotate_left(struct interval_node *node, /* The right rotation "pivots" around the link from node to node->left, and * - node will be linked to node->left's right child, and - * - node->left's right child will be linked to node's left child. */ + * - node->left's right child will be linked to node's left child. + */ static void __rotate_right(struct interval_node *node, struct interval_node **root) { diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c index 9c70f31ea..a803e200f 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c @@ -62,7 +62,8 @@ * is the "highest lock". This function returns the new KMS value. * Caller must hold lr_lock already. * - * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */ + * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! + */ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) { struct ldlm_resource *res = lock->l_resource; @@ -72,7 +73,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) /* don't let another thread in ldlm_extent_shift_kms race in * just after we finish and take our lock into account in its - * calculation of the kms */ + * calculation of the kms + */ lock->l_flags |= LDLM_FL_KMS_IGNORE; list_for_each(tmp, &res->lr_granted) { @@ -85,7 +87,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) return old_kms; /* This extent _has_ to be smaller than old_kms (checked above) - * so kms can only ever be smaller or the same as old_kms. */ + * so kms can only ever be smaller or the same as old_kms. + */ if (lck->l_policy_data.l_extent.end + 1 > kms) kms = lck->l_policy_data.l_extent.end + 1; } @@ -112,8 +115,8 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock) struct ldlm_interval *node; LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); - node = kmem_cache_alloc(ldlm_interval_slab, GFP_NOFS | __GFP_ZERO); - if (node == NULL) + node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS); + if (!node) return NULL; INIT_LIST_HEAD(&node->li_group); @@ -134,7 +137,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l) { struct ldlm_interval *n = l->l_tree_node; - if (n == NULL) + if (!n) return NULL; LASSERT(!list_empty(&n->li_group)); @@ -144,7 +147,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l) return list_empty(&n->li_group) ? n : NULL; } -static inline int lock_mode_to_index(ldlm_mode_t mode) +static inline int lock_mode_to_index(enum ldlm_mode mode) { int index; @@ -168,7 +171,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, LASSERT(lock->l_granted_mode == lock->l_req_mode); node = lock->l_tree_node; - LASSERT(node != NULL); + LASSERT(node); LASSERT(!interval_is_intree(&node->li_node)); idx = lock_mode_to_index(lock->l_granted_mode); @@ -185,14 +188,14 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_interval *tmp; tmp = ldlm_interval_detach(lock); - LASSERT(tmp != NULL); ldlm_interval_free(tmp); ldlm_interval_attach(to_ldlm_interval(found), lock); } res->lr_itree[idx].lit_size++; /* even though we use interval tree to manage the extent lock, we also - * add the locks into grant list, for debug purpose, .. */ + * add the locks into grant list, for debug purpose, .. + */ ldlm_resource_add_lock(res, &res->lr_granted, lock); } @@ -211,7 +214,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock) LASSERT(lock->l_granted_mode == 1 << idx); tree = &res->lr_itree[idx]; - LASSERT(tree->lit_root != NULL); /* assure the tree is not null */ + LASSERT(tree->lit_root); /* assure the tree is not null */ tree->lit_size--; node = ldlm_interval_detach(lock); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c index 4310154e1..b88b78606 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c @@ -92,7 +92,7 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) } static inline void -ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) +ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags) { LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)", mode, flags); @@ -107,7 +107,8 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING; /* when reaching here, it is under lock_res_and_lock(). Thus, - need call the nolock version of ldlm_lock_decref_internal*/ + * need call the nolock version of ldlm_lock_decref_internal + */ ldlm_lock_decref_internal_nolock(lock, mode); } @@ -133,7 +134,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) * would be collected and ASTs sent. */ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, - int first_enq, ldlm_error_t *err, + int first_enq, enum ldlm_error *err, struct list_head *work_list) { struct ldlm_resource *res = req->l_resource; @@ -143,7 +144,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, struct ldlm_lock *lock = NULL; struct ldlm_lock *new = req; struct ldlm_lock *new2 = NULL; - ldlm_mode_t mode = req->l_req_mode; + enum ldlm_mode mode = req->l_req_mode; int added = (mode == LCK_NL); int overlaps = 0; int splitted = 0; @@ -159,13 +160,15 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, *err = ELDLM_OK; /* No blocking ASTs are sent to the clients for - * Posix file & record locks */ + * Posix file & record locks + */ req->l_blocking_ast = NULL; reprocess: if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) { /* This loop determines where this processes locks start - * in the resource lr_granted list. */ + * in the resource lr_granted list. + */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -180,7 +183,8 @@ reprocess: lockmode_verify(mode); /* This loop determines if there are existing locks - * that conflict with the new lock request. */ + * that conflict with the new lock request. + */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -238,8 +242,8 @@ reprocess: } /* Scan the locks owned by this process that overlap this request. - * We may have to merge or split existing locks. */ - + * We may have to merge or split existing locks. + */ if (!ownlocks) ownlocks = &res->lr_granted; @@ -253,7 +257,8 @@ reprocess: /* If the modes are the same then we need to process * locks that overlap OR adjoin the new lock. The extra * logic condition is necessary to deal with arithmetic - * overflow and underflow. */ + * overflow and underflow. + */ if ((new->l_policy_data.l_flock.start > (lock->l_policy_data.l_flock.end + 1)) && (lock->l_policy_data.l_flock.end != @@ -327,11 +332,13 @@ reprocess: * with the request but this would complicate the reply * processing since updates to req get reflected in the * reply. The client side replays the lock request so - * it must see the original lock data in the reply. */ + * it must see the original lock data in the reply. + */ /* XXX - if ldlm_lock_new() can sleep we should * release the lr_lock, allocate the new lock, - * and restart processing this lock. */ + * and restart processing this lock. + */ if (!new2) { unlock_res_and_lock(req); new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK, @@ -361,7 +368,7 @@ reprocess: lock->l_policy_data.l_flock.start = new->l_policy_data.l_flock.end + 1; new2->l_conn_export = lock->l_conn_export; - if (lock->l_export != NULL) { + if (lock->l_export) { new2->l_export = class_export_lock_get(lock->l_export, new2); if (new2->l_export->exp_lock_hash && @@ -381,7 +388,7 @@ reprocess: } /* if new2 is created but never used, destroy it*/ - if (splitted == 0 && new2 != NULL) + if (splitted == 0 && new2) ldlm_lock_destroy_nolock(new2); /* At this point we're granting the lock request. */ @@ -396,7 +403,8 @@ reprocess: if (*flags != LDLM_FL_WAIT_NOREPROC) { /* The only one possible case for client-side calls flock * policy function is ldlm_flock_completion_ast inside which - * carries LDLM_FL_WAIT_NOREPROC flag. */ + * carries LDLM_FL_WAIT_NOREPROC flag. + */ CERROR("Illegal parameter for client-side-only module.\n"); LBUG(); } @@ -404,7 +412,8 @@ reprocess: /* In case we're reprocessing the requested lock we can't destroy * it until after calling ldlm_add_ast_work_item() above so that laawi() * can bump the reference count on \a req. Otherwise \a req - * could be freed before the completion AST can be sent. */ + * could be freed before the completion AST can be sent. + */ if (added) ldlm_flock_destroy(req, mode, *flags); @@ -449,7 +458,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) struct obd_import *imp = NULL; struct ldlm_flock_wait_data fwd; struct l_wait_info lwi; - ldlm_error_t err; + enum ldlm_error err; int rc = 0; CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n", @@ -458,12 +467,12 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) /* Import invalidation. We need to actually release the lock * references being held, so that it can go away. No point in * holding the lock even if app still believes it has it, since - * server already dropped it anyway. Only for granted locks too. */ + * server already dropped it anyway. Only for granted locks too. + */ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) == (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) { if (lock->l_req_mode == lock->l_granted_mode && - lock->l_granted_mode != LCK_NL && - data == NULL) + lock->l_granted_mode != LCK_NL && !data) ldlm_lock_decref_internal(lock, lock->l_req_mode); /* Need to wake up the waiter if we were evicted */ @@ -475,7 +484,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV))) { - if (data == NULL) + if (!data) /* mds granted the lock in the reply */ goto granted; /* CP AST RPC: lock get granted, wake it up */ @@ -488,10 +497,10 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) obd = class_exp2obd(lock->l_conn_export); /* if this is a local lock, there is no import */ - if (obd != NULL) + if (obd) imp = obd->u.cli.cl_import; - if (imp != NULL) { + if (imp) { spin_lock(&imp->imp_lock); fwd.fwd_generation = imp->imp_generation; spin_unlock(&imp->imp_lock); @@ -540,7 +549,8 @@ granted: } else if (flags & LDLM_FL_TEST_LOCK) { /* fcntl(F_GETLK) request */ /* The old mode was saved in getlk->fl_type so that if the mode - * in the lock changes we can decref the appropriate refcount.*/ + * in the lock changes we can decref the appropriate refcount. + */ ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC); switch (lock->l_granted_mode) { case LCK_PR: @@ -559,7 +569,8 @@ granted: __u64 noreproc = LDLM_FL_WAIT_NOREPROC; /* We need to reprocess the lock to do merges or splits - * with existing locks owned by this process. */ + * with existing locks owned by this process. + */ ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL); } unlock_res_and_lock(lock); @@ -576,7 +587,8 @@ void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy, lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid; /* Compat code, old clients had no idea about owner field and * relied solely on pid for ownership. Introduced in LU-104, 2.1, - * April 2011 */ + * April 2011 + */ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid; } diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h index 849cc98df..e21373e73 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h @@ -96,14 +96,15 @@ enum { LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */ LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither - * sending nor waiting for any rpcs) */ + * sending nor waiting for any rpcs) + */ }; int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - ldlm_cancel_flags_t sync, int flags); + enum ldlm_cancel_flags sync, int flags); int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, - ldlm_cancel_flags_t cancel_flags, int flags); + enum ldlm_cancel_flags cancel_flags, int flags); extern int ldlm_enqueue_min; /* ldlm_resource.c */ @@ -133,11 +134,11 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, enum req_location loc, void *data, int size); struct ldlm_lock * ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *, - ldlm_type_t type, ldlm_mode_t, + enum ldlm_type type, enum ldlm_mode mode, const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len, enum lvb_type lvb_type); -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, - void *cookie, __u64 *flags); +enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, + void *cookie, __u64 *flags); void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode); void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode); void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode); @@ -154,7 +155,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags); + enum ldlm_cancel_flags cancel_flags); void ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c index 3c8d4413d..7dd7df59a 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c @@ -219,7 +219,8 @@ EXPORT_SYMBOL(client_import_find_conn); void client_destroy_import(struct obd_import *imp) { /* Drop security policy instance after all RPCs have finished/aborted - * to let all busy contexts be released. */ + * to let all busy contexts be released. + */ class_import_get(imp); class_destroy_import(imp); sptlrpc_import_sec_put(imp); @@ -227,29 +228,6 @@ void client_destroy_import(struct obd_import *imp) } EXPORT_SYMBOL(client_destroy_import); -/** - * Check whether or not the OSC is on MDT. - * In the config log, - * osc on MDT - * setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID - * osc on client - * setup 0:{fsname}-OSTxxxx-osc 1:lustre-OST0000_UUID 2:NID - * - **/ -static int osc_on_mdt(char *obdname) -{ - char *ptr; - - ptr = strrchr(obdname, '-'); - if (ptr == NULL) - return 0; - - if (strncmp(ptr + 1, "MDT", 3) == 0) - return 1; - - return 0; -} - /* Configure an RPC client OBD device. * * lcfg parameters: @@ -264,11 +242,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) struct obd_uuid server_uuid; int rq_portal, rp_portal, connect_op; char *name = obddev->obd_type->typ_name; - ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN; + enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN; int rc; /* In a more perfect world, we would hang a ptlrpc_client off of - * obd_type and just use the values from there. */ + * obd_type and just use the values from there. + */ if (!strcmp(name, LUSTRE_OSC_NAME)) { rq_portal = OST_REQUEST_PORTAL; rp_portal = OSC_REPLY_PORTAL; @@ -284,22 +263,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) cli->cl_sp_me = LUSTRE_SP_CLI; cli->cl_sp_to = LUSTRE_SP_MDT; ns_type = LDLM_NS_TYPE_MDC; - } else if (!strcmp(name, LUSTRE_OSP_NAME)) { - if (strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL) { - /* OSP_on_MDT for other MDTs */ - connect_op = MDS_CONNECT; - cli->cl_sp_to = LUSTRE_SP_MDT; - ns_type = LDLM_NS_TYPE_MDC; - rq_portal = OUT_PORTAL; - } else { - /* OSP on MDT for OST */ - connect_op = OST_CONNECT; - cli->cl_sp_to = LUSTRE_SP_OST; - ns_type = LDLM_NS_TYPE_OSC; - rq_portal = OST_REQUEST_PORTAL; - } - rp_portal = OSC_REPLY_PORTAL; - cli->cl_sp_me = LUSTRE_SP_CLI; } else if (!strcmp(name, LUSTRE_MGC_NAME)) { rq_portal = MGS_REQUEST_PORTAL; rp_portal = MGC_REPLY_PORTAL; @@ -344,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) cli->cl_avail_grant = 0; /* FIXME: Should limit this for the sum of all cl_dirty_max. */ cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; - if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) - cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); + if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8) + cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3); INIT_LIST_HEAD(&cli->cl_cache_waiters); INIT_LIST_HEAD(&cli->cl_loi_ready_list); INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); @@ -387,23 +350,21 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) /* This value may be reduced at connect time in * ptlrpc_connect_interpret() . We initialize it to only * 1MB until we know what the performance looks like. - * In the future this should likely be increased. LU-1431 */ + * In the future this should likely be increased. LU-1431 + */ cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, - LNET_MTU >> PAGE_CACHE_SHIFT); + LNET_MTU >> PAGE_SHIFT); if (!strcmp(name, LUSTRE_MDC_NAME)) { cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) { cli->cl_max_rpcs_in_flight = 2; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) { cli->cl_max_rpcs_in_flight = 3; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) { cli->cl_max_rpcs_in_flight = 4; } else { - if (osc_on_mdt(obddev->obd_name)) - cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT; - else - cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; + cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; } rc = ldlm_get_ref(); if (rc) { @@ -415,7 +376,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) &obddev->obd_ldlm_client); imp = class_new_import(obddev); - if (imp == NULL) { + if (!imp) { rc = -ENOENT; goto err_ldlm; } @@ -451,7 +412,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) LDLM_NAMESPACE_CLIENT, LDLM_NAMESPACE_GREEDY, ns_type); - if (obddev->obd_namespace == NULL) { + if (!obddev->obd_namespace) { CERROR("Unable to create client namespace - %s\n", obddev->obd_name); rc = -ENOMEM; @@ -477,7 +438,7 @@ int client_obd_cleanup(struct obd_device *obddev) ldlm_namespace_free_post(obddev->obd_namespace); obddev->obd_namespace = NULL; - LASSERT(obddev->u.cli.cl_import == NULL); + LASSERT(!obddev->u.cli.cl_import); ldlm_put_ref(); return 0; @@ -528,7 +489,7 @@ int client_connect_import(const struct lu_env *env, LASSERT(imp->imp_state == LUSTRE_IMP_DISCON); goto out_ldlm; } - LASSERT(*exp != NULL && (*exp)->exp_connection); + LASSERT(*exp && (*exp)->exp_connection); if (data) { LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) == @@ -587,17 +548,19 @@ int client_disconnect_export(struct obd_export *exp) /* Mark import deactivated now, so we don't try to reconnect if any * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't - * fully deactivate the import, or that would drop all requests. */ + * fully deactivate the import, or that would drop all requests. + */ spin_lock(&imp->imp_lock); imp->imp_deactive = 1; spin_unlock(&imp->imp_lock); /* Some non-replayable imports (MDS's OSCs) are pinged, so just * delete it regardless. (It's safe to delete an import that was - * never added.) */ + * never added.) + */ (void)ptlrpc_pinger_del_import(imp); - if (obd->obd_namespace != NULL) { + if (obd->obd_namespace) { /* obd_force == local only */ ldlm_cli_cancel_unused(obd->obd_namespace, NULL, obd->obd_force ? LCF_LOCAL : 0, NULL); @@ -606,7 +569,8 @@ int client_disconnect_export(struct obd_export *exp) } /* There's no need to hold sem while disconnecting an import, - * and it may actually cause deadlock in GSS. */ + * and it may actually cause deadlock in GSS. + */ up_write(&cli->cl_sem); rc = ptlrpc_disconnect_import(imp, 0); down_write(&cli->cl_sem); @@ -615,7 +579,8 @@ int client_disconnect_export(struct obd_export *exp) out_disconnect: /* Use server style - class_disconnect should be always called for - * o_disconnect. */ + * o_disconnect. + */ err = class_disconnect(exp); if (!rc && err) rc = err; @@ -634,7 +599,8 @@ int target_pack_pool_reply(struct ptlrpc_request *req) struct obd_device *obd; /* Check that we still have all structures alive as this may - * be some late RPC at shutdown time. */ + * be some late RPC at shutdown time. + */ if (unlikely(!req->rq_export || !req->rq_export->exp_obd || !exp_connect_lru_resize(req->rq_export))) { lustre_msg_set_slv(req->rq_repmsg, 0); @@ -684,14 +650,14 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) svcpt = req->rq_rqbd->rqbd_svcpt; rs = req->rq_reply_state; - if (rs == NULL || !rs->rs_difficult) { + if (!rs || !rs->rs_difficult) { /* no notifiers */ target_send_reply_msg(req, rc, fail_id); return; } /* must be an export if locks saved */ - LASSERT(req->rq_export != NULL); + LASSERT(req->rq_export); /* req/reply consistent */ LASSERT(rs->rs_svcpt == svcpt); @@ -700,7 +666,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) LASSERT(!rs->rs_scheduled_ever); LASSERT(!rs->rs_handled); LASSERT(!rs->rs_on_net); - LASSERT(rs->rs_export == NULL); + LASSERT(!rs->rs_export); LASSERT(list_empty(&rs->rs_obd_list)); LASSERT(list_empty(&rs->rs_exp_list)); @@ -739,7 +705,8 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) * reply ref until ptlrpc_handle_rs() is done * with the reply state (if the send was successful, there * would have been +1 ref for the net, which - * reply_out_callback leaves alone) */ + * reply_out_callback leaves alone) + */ rs->rs_on_net = 0; ptlrpc_rs_addref(rs); } @@ -760,7 +727,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) } EXPORT_SYMBOL(target_send_reply); -ldlm_mode_t lck_compat_array[] = { +enum ldlm_mode lck_compat_array[] = { [LCK_EX] = LCK_COMPAT_EX, [LCK_PW] = LCK_COMPAT_PW, [LCK_PR] = LCK_COMPAT_PR, @@ -775,7 +742,7 @@ ldlm_mode_t lck_compat_array[] = { * Rather arbitrary mapping from LDLM error codes to errno values. This should * not escape to the user level. */ -int ldlm_error2errno(ldlm_error_t error) +int ldlm_error2errno(enum ldlm_error error) { int result; @@ -803,7 +770,7 @@ int ldlm_error2errno(ldlm_error_t error) break; default: if (((int)error) < 0) /* cast to signed type */ - result = error; /* as ldlm_error_t can be unsigned */ + result = error; /* as enum ldlm_error can be unsigned */ else { CERROR("Invalid DLM result code: %d\n", error); result = -EPROTO; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index cf9ec0cfe..ecd65a7a3 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -91,7 +91,7 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { /** * Converts lock policy from local format to on the wire lock_desc format */ -static void ldlm_convert_policy_to_wire(ldlm_type_t type, +static void ldlm_convert_policy_to_wire(enum ldlm_type type, const ldlm_policy_data_t *lpolicy, ldlm_wire_policy_data_t *wpolicy) { @@ -105,7 +105,7 @@ static void ldlm_convert_policy_to_wire(ldlm_type_t type, /** * Converts lock policy from on the wire lock_desc format to local format */ -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy) { @@ -326,9 +326,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) if (lock->l_export && lock->l_export->exp_lock_hash) { /* NB: it's safe to call cfs_hash_del() even lock isn't - * in exp_lock_hash. */ + * in exp_lock_hash. + */ /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_del(lock->l_export->exp_lock_hash, &lock->l_remote_handle, &lock->l_exp_hash); @@ -337,16 +339,6 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) ldlm_lock_remove_from_lru(lock); class_handle_unhash(&lock->l_handle); -#if 0 - /* Wake anyone waiting for this lock */ - /* FIXME: I should probably add yet another flag, instead of using - * l_export to only call this on clients */ - if (lock->l_export) - class_export_put(lock->l_export); - lock->l_export = NULL; - if (lock->l_export && lock->l_completion_ast) - lock->l_completion_ast(lock, 0); -#endif return 1; } @@ -412,11 +404,10 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) { struct ldlm_lock *lock; - if (resource == NULL) - LBUG(); + LASSERT(resource); - lock = kmem_cache_alloc(ldlm_lock_slab, GFP_NOFS | __GFP_ZERO); - if (lock == NULL) + lock = kmem_cache_zalloc(ldlm_lock_slab, GFP_NOFS); + if (!lock) return NULL; spin_lock_init(&lock->l_lock); @@ -485,7 +476,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, unlock_res_and_lock(lock); newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); - if (newres == NULL) + if (!newres) return -ENOMEM; lu_ref_add(&newres->lr_reference, "lock", lock); @@ -547,11 +538,12 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, LASSERT(handle); lock = class_handle2object(handle->cookie); - if (lock == NULL) + if (!lock) return NULL; /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it */ + * destroyed after we did handle2object on it + */ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) { lu_ref_add(&lock->l_reference, "handle", current); return lock; @@ -559,7 +551,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, lock_res_and_lock(lock); - LASSERT(lock->l_resource != NULL); + LASSERT(lock->l_resource); lu_ref_add_atomic(&lock->l_reference, "handle", current); if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { @@ -611,13 +603,14 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); lock->l_flags |= LDLM_FL_AST_SENT; /* If the enqueuing client said so, tell the AST recipient to - * discard dirty data, rather than writing back. */ + * discard dirty data, rather than writing back. + */ if (new->l_flags & LDLM_FL_AST_DISCARD_DATA) lock->l_flags |= LDLM_FL_DISCARD_DATA; LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, work_list); LDLM_LOCK_GET(lock); - LASSERT(lock->l_blocking_lock == NULL); + LASSERT(!lock->l_blocking_lock); lock->l_blocking_lock = LDLM_LOCK_GET(new); } } @@ -664,7 +657,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode) struct ldlm_lock *lock; lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); + LASSERT(lock); ldlm_lock_addref_internal(lock, mode); LDLM_LOCK_PUT(lock); } @@ -708,7 +701,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) result = -EAGAIN; lock = ldlm_handle2lock(lockh); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (lock->l_readers != 0 || lock->l_writers != 0 || !(lock->l_flags & LDLM_FL_CBPENDING)) { @@ -780,7 +773,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) if (lock->l_flags & LDLM_FL_LOCAL && !lock->l_readers && !lock->l_writers) { /* If this is a local lock on a server namespace and this was - * the last reference, cancel the lock. */ + * the last reference, cancel the lock. + */ CDEBUG(D_INFO, "forcing cancel of local lock\n"); lock->l_flags |= LDLM_FL_CBPENDING; } @@ -788,7 +782,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) if (!lock->l_readers && !lock->l_writers && (lock->l_flags & LDLM_FL_CBPENDING)) { /* If we received a blocked AST and this was the last reference, - * run the callback. */ + * run the callback. + */ LDLM_DEBUG(lock, "final decref done on cbpending lock"); @@ -809,7 +804,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) LDLM_DEBUG(lock, "add lock into lru list"); /* If this is a client-side namespace and this was the last - * reference, put it on the LRU. */ + * reference, put it on the LRU. + */ ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); @@ -818,7 +814,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE * are not supported by the server, otherwise, it is done on - * enqueue. */ + * enqueue. + */ if (!exp_connect_cancelset(lock->l_conn_export) && !ns_connect_lru_resize(ns)) ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0); @@ -835,7 +832,7 @@ void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie); + LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie); ldlm_lock_decref_internal(lock, mode); LDLM_LOCK_PUT(lock); } @@ -852,7 +849,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - LASSERT(lock != NULL); + LASSERT(lock); LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); lock_res_and_lock(lock); @@ -893,8 +890,7 @@ static void search_granted_lock(struct list_head *queue, list_for_each(tmp, queue) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); - mode_end = list_entry(lock->l_sl_mode.prev, - struct ldlm_lock, l_sl_mode); + mode_end = list_prev_entry(lock, l_sl_mode); if (lock->l_req_mode != req->l_req_mode) { /* jump to last lock of mode group */ @@ -914,14 +910,13 @@ static void search_granted_lock(struct list_head *queue, if (lock->l_resource->lr_type == LDLM_IBITS) { for (;;) { policy_end = - list_entry(lock->l_sl_policy.prev, - struct ldlm_lock, - l_sl_policy); + list_prev_entry(lock, l_sl_policy); if (lock->l_policy_data.l_inodebits.bits == req->l_policy_data.l_inodebits.bits) { /* insert point is last lock of - * the policy group */ + * the policy group + */ prev->res_link = &policy_end->l_res_link; prev->mode_link = @@ -942,7 +937,8 @@ static void search_granted_lock(struct list_head *queue, } /* loop over policy groups within the mode group */ /* insert point is last lock of the mode group, - * new policy group is started */ + * new policy group is started + */ prev->res_link = &mode_end->l_res_link; prev->mode_link = &mode_end->l_sl_mode; prev->policy_link = &req->l_sl_policy; @@ -954,7 +950,8 @@ static void search_granted_lock(struct list_head *queue, } /* insert point is last lock on the queue, - * new mode group and new policy group are started */ + * new mode group and new policy group are started + */ prev->res_link = queue->prev; prev->mode_link = &req->l_sl_mode; prev->policy_link = &req->l_sl_policy; @@ -1034,10 +1031,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) else ldlm_resource_add_lock(res, &res->lr_granted, lock); - if (lock->l_granted_mode < res->lr_most_restr) - res->lr_most_restr = lock->l_granted_mode; - - if (work_list && lock->l_completion_ast != NULL) + if (work_list && lock->l_completion_ast) ldlm_add_ast_work_item(lock, NULL, work_list); ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); @@ -1050,7 +1044,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) * comment above ldlm_lock_match */ static struct ldlm_lock *search_queue(struct list_head *queue, - ldlm_mode_t *mode, + enum ldlm_mode *mode, ldlm_policy_data_t *policy, struct ldlm_lock *old_lock, __u64 flags, int unref) @@ -1059,7 +1053,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue, struct list_head *tmp; list_for_each(tmp, queue) { - ldlm_mode_t match; + enum ldlm_mode match; lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -1067,7 +1061,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, break; /* Check if this lock can be matched. - * Used by LU-2919(exclusive open) for open lease lock */ + * Used by LU-2919(exclusive open) for open lease lock + */ if (ldlm_is_excl(lock)) continue; @@ -1076,7 +1071,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, * if it passes in CBPENDING and the lock still has users. * this is generally only going to be used by children * whose parents already hold a lock so forward progress - * can still happen. */ + * can still happen. + */ if (lock->l_flags & LDLM_FL_CBPENDING && !(flags & LDLM_FL_CBPENDING)) continue; @@ -1100,7 +1096,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, continue; /* We match if we have existing lock with same or wider set - of bits. */ + * of bits. + */ if (lock->l_resource->lr_type == LDLM_IBITS && ((lock->l_policy_data.l_inodebits.bits & policy->l_inodebits.bits) != @@ -1192,16 +1189,18 @@ EXPORT_SYMBOL(ldlm_lock_allow_match); * keep caller code unchanged), the context failure will be discovered by * caller sometime later. */ -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *res_id, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh, int unref) +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *res_id, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh, int unref) { struct ldlm_resource *res; struct ldlm_lock *lock, *old_lock = NULL; int rc = 0; - if (ns == NULL) { + if (!ns) { old_lock = ldlm_handle2lock(lockh); LASSERT(old_lock); @@ -1212,8 +1211,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } res = ldlm_resource_get(ns, NULL, res_id, type, 0); - if (res == NULL) { - LASSERT(old_lock == NULL); + if (!res) { + LASSERT(!old_lock); return 0; } @@ -1222,7 +1221,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, lock = search_queue(&res->lr_granted, &mode, policy, old_lock, flags, unref); - if (lock != NULL) { + if (lock) { rc = 1; goto out; } @@ -1232,7 +1231,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, flags, unref); - if (lock != NULL) { + if (lock) { rc = 1; goto out; } @@ -1317,14 +1316,14 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } EXPORT_SYMBOL(ldlm_lock_match); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits) +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits) { struct ldlm_lock *lock; - ldlm_mode_t mode = 0; + enum ldlm_mode mode = 0; lock = ldlm_handle2lock(lockh); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (lock->l_flags & LDLM_FL_GONE_MASK) goto out; @@ -1340,7 +1339,7 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, } out: - if (lock != NULL) { + if (lock) { unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); } @@ -1354,7 +1353,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, { void *lvb; - LASSERT(data != NULL); + LASSERT(data); LASSERT(size >= 0); switch (lock->l_lvb_type) { @@ -1368,7 +1367,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_swab_get(pill, &RMF_DLM_LVB, lustre_swab_ost_lvb); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1385,7 +1384,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_sized_swab_get(pill, &RMF_DLM_LVB, size, lustre_swab_ost_lvb_v1); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1410,7 +1409,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_swab_get(pill, &RMF_DLM_LVB, lustre_swab_lquota_lvb); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1431,7 +1430,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_client_get(pill, &RMF_DLM_LVB); else lvb = req_capsule_server_get(pill, &RMF_DLM_LVB); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1453,8 +1452,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, */ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_type_t type, - ldlm_mode_t mode, + enum ldlm_type type, + enum ldlm_mode mode, const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len, enum lvb_type lvb_type) @@ -1463,12 +1462,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, struct ldlm_resource *res; res = ldlm_resource_get(ns, NULL, res_id, type, 1); - if (res == NULL) + if (!res) return NULL; lock = ldlm_lock_new(res); - if (lock == NULL) + if (!lock) return NULL; lock->l_req_mode = mode; @@ -1483,7 +1482,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, lock->l_tree_node = NULL; /* if this is the extent lock, allocate the interval tree node */ if (type == LDLM_EXTENT) { - if (ldlm_interval_alloc(lock) == NULL) + if (!ldlm_interval_alloc(lock)) goto out; } @@ -1514,9 +1513,9 @@ out: * Does not block. As a result of enqueue the lock would be put * into granted or waiting list. */ -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, - struct ldlm_lock **lockp, - void *cookie, __u64 *flags) +enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns, + struct ldlm_lock **lockp, + void *cookie, __u64 *flags) { struct ldlm_lock *lock = *lockp; struct ldlm_resource *res = lock->l_resource; @@ -1527,7 +1526,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, if (lock->l_req_mode == lock->l_granted_mode) { /* The server returned a blocked lock, but it was granted * before we got a chance to actually enqueue it. We don't - * need to do anything else. */ + * need to do anything else. + */ *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); goto out; @@ -1540,7 +1540,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, LBUG(); /* Some flags from the enqueue want to make it into the AST, via the - * lock's l_flags. */ + * lock's l_flags. + */ lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; /* @@ -1621,19 +1622,21 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) * This can't happen with the blocking_ast, however, because we * will never call the local blocking_ast until we drop our * reader/writer reference, which we won't do until we get the - * reply and finish enqueueing. */ + * reply and finish enqueueing. + */ /* nobody should touch l_cp_ast */ lock_res_and_lock(lock); list_del_init(&lock->l_cp_ast); LASSERT(lock->l_flags & LDLM_FL_CP_REQD); /* save l_completion_ast since it can be changed by - * mds_intent_policy(), see bug 14225 */ + * mds_intent_policy(), see bug 14225 + */ completion_callback = lock->l_completion_ast; lock->l_flags &= ~LDLM_FL_CP_REQD; unlock_res_and_lock(lock); - if (completion_callback != NULL) + if (completion_callback) rc = completion_callback(lock, 0, (void *)arg); LDLM_LOCK_RELEASE(lock); @@ -1749,10 +1752,11 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, /* We create a ptlrpc request set with flow control extension. * This request set will use the work_ast_lock function to produce new * requests and will send a new request each time one completes in order - * to keep the number of requests in flight to ns_max_parallel_ast */ + * to keep the number of requests in flight to ns_max_parallel_ast + */ arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX, work_ast_lock, arg); - if (arg->set == NULL) { + if (!arg->set) { rc = -ENOMEM; goto out; } @@ -1815,7 +1819,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) ns = ldlm_res_to_ns(res); /* Please do not, no matter how tempting, remove this LBUG without - * talking to me first. -phik */ + * talking to me first. -phik + */ if (lock->l_readers || lock->l_writers) { LDLM_ERROR(lock, "lock still has references"); LBUG(); @@ -1831,7 +1836,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) ldlm_pool_del(&ns->ns_pool, lock); /* Make sure we will not be called again for same lock what is possible - * if not to zero out lock->l_granted_mode */ + * if not to zero out lock->l_granted_mode + */ lock->l_granted_mode = LCK_MINMODE; unlock_res_and_lock(lock); } @@ -1846,7 +1852,7 @@ int ldlm_lock_set_data(struct lustre_handle *lockh, void *data) int rc = -EINVAL; if (lock) { - if (lock->l_ast_data == NULL) + if (!lock->l_ast_data) lock->l_ast_data = data; if (lock->l_ast_data == data) rc = 0; @@ -1874,7 +1880,7 @@ void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh) return; lock = ldlm_handle2lock(lockh); - if (lock == NULL) + if (!lock) return; LDLM_DEBUG_LIMIT(level, lock, "###"); @@ -1900,13 +1906,13 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, if (exp && exp->exp_connection) { nid = libcfs_nid2str(exp->exp_connection->c_peer.nid); - } else if (exp && exp->exp_obd != NULL) { + } else if (exp && exp->exp_obd) { struct obd_import *imp = exp->exp_obd->u.cli.cl_import; nid = libcfs_nid2str(imp->imp_connection->c_peer.nid); } - if (resource == NULL) { + if (!resource) { libcfs_debug_vmsg2(msgdata, fmt, args, " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", lock, diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index 79aeb2bf6..ebe9042ad 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -107,7 +107,7 @@ struct ldlm_bl_work_item { struct list_head blwi_head; int blwi_count; struct completion blwi_comp; - ldlm_cancel_flags_t blwi_flags; + enum ldlm_cancel_flags blwi_flags; int blwi_mem_pressure; }; @@ -136,7 +136,7 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns, CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n", lock, lock->l_blocking_ast); - if (lock->l_blocking_ast != NULL) + if (lock->l_blocking_ast) lock->l_blocking_ast(lock, ld, lock->l_ast_data, LDLM_CB_BLOCKING); } else { @@ -185,7 +185,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } else if (lvb_len > 0) { if (lock->l_lvb_len > 0) { /* for extent lock, lvb contains ost_lvb{}. */ - LASSERT(lock->l_lvb_data != NULL); + LASSERT(lock->l_lvb_data); if (unlikely(lock->l_lvb_len < lvb_len)) { LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d", @@ -194,7 +194,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, goto out; } } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has - * variable length */ + * variable length + */ void *lvb_data; lvb_data = kzalloc(lvb_len, GFP_NOFS); @@ -205,7 +206,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } lock_res_and_lock(lock); - LASSERT(lock->l_lvb_data == NULL); + LASSERT(!lock->l_lvb_data); lock->l_lvb_type = LVB_T_LAYOUT; lock->l_lvb_data = lvb_data; lock->l_lvb_len = lvb_len; @@ -224,7 +225,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } /* If we receive the completion AST before the actual enqueue returned, - * then we might need to switch lock modes, resources, or extents. */ + * then we might need to switch lock modes, resources, or extents. + */ if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) { lock->l_req_mode = dlm_req->lock_desc.l_granted_mode; LDLM_DEBUG(lock, "completion AST, new lock mode"); @@ -256,7 +258,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, if (dlm_req->lock_flags & LDLM_FL_AST_SENT) { /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. */ + * Let ldlm_cancel_lru() be fast. + */ ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; LDLM_DEBUG(lock, "completion AST includes blocking AST"); @@ -276,8 +279,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work"); - /* Let Enqueue to call osc_lock_upcall() and initialize - * l_ast_data */ + /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2); ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST); @@ -312,10 +314,10 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req, LDLM_DEBUG(lock, "client glimpse AST callback handler"); - if (lock->l_glimpse_ast != NULL) + if (lock->l_glimpse_ast) rc = lock->l_glimpse_ast(lock, req); - if (req->rq_repmsg != NULL) { + if (req->rq_repmsg) { ptlrpc_reply(req); } else { req->rq_status = rc; @@ -353,7 +355,7 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc) } static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; @@ -371,7 +373,8 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, wake_up(&blp->blp_waitq); /* can not check blwi->blwi_flags as blwi could be already freed in - LCF_ASYNC mode */ + * LCF_ASYNC mode + */ if (!(cancel_flags & LCF_ASYNC)) wait_for_completion(&blwi->blwi_comp); @@ -383,7 +386,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, struct ldlm_lock *lock, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { init_completion(&blwi->blwi_comp); INIT_LIST_HEAD(&blwi->blwi_head); @@ -393,7 +396,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, blwi->blwi_ns = ns; blwi->blwi_flags = cancel_flags; - if (ld != NULL) + if (ld) blwi->blwi_ld = *ld; if (count) { list_add(&blwi->blwi_head, cancels); @@ -417,7 +420,7 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { if (cancels && count == 0) return 0; @@ -451,7 +454,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags); } @@ -470,14 +473,14 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req) req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO); key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); - if (key == NULL) { + if (!key) { DEBUG_REQ(D_IOCTL, req, "no set_info key"); return -EFAULT; } keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY, RCL_CLIENT); val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL); - if (val == NULL) { + if (!val) { DEBUG_REQ(D_IOCTL, req, "no set_info val"); return -EFAULT; } @@ -519,7 +522,7 @@ static int ldlm_handle_qc_callback(struct ptlrpc_request *req) struct client_obd *cli = &req->rq_export->exp_obd->u.cli; oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - if (oqctl == NULL) { + if (!oqctl) { CERROR("Can't unpack obd_quotactl\n"); return -EPROTO; } @@ -541,7 +544,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) /* Requests arrive in sender's byte order. The ptlrpc service * handler has already checked and, if necessary, byte-swapped the * incoming request message body, but I am responsible for the - * message buffers. */ + * message buffers. + */ /* do nothing for sec context finalize */ if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI) @@ -549,15 +553,14 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) req_capsule_init(&req->rq_pill, req, RCL_SERVER); - if (req->rq_export == NULL) { + if (!req->rq_export) { rc = ldlm_callback_reply(req, -ENOTCONN); ldlm_callback_errmsg(req, "Operate on unconnected server", rc, NULL); return 0; } - LASSERT(req->rq_export != NULL); - LASSERT(req->rq_export->exp_obd != NULL); + LASSERT(req->rq_export->exp_obd); switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: @@ -591,12 +594,12 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) } ns = req->rq_export->exp_obd->obd_namespace; - LASSERT(ns != NULL); + LASSERT(ns); req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK); dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - if (dlm_req == NULL) { + if (!dlm_req) { rc = ldlm_callback_reply(req, -EPROTO); ldlm_callback_errmsg(req, "Operate without parameter", rc, NULL); @@ -604,7 +607,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) } /* Force a known safe race, send a cancel to the server for a lock - * which the server has already started a blocking callback on. */ + * which the server has already started a blocking callback on. + */ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) && lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0); @@ -634,7 +638,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) /* If somebody cancels lock and cache is already dropped, * or lock is failed before cp_ast received on client, * we can tell the server we have no lock. Otherwise, we - * should send cancel after dropping the cache. */ + * should send cancel after dropping the cache. + */ if (((lock->l_flags & LDLM_FL_CANCELING) && (lock->l_flags & LDLM_FL_BL_DONE)) || (lock->l_flags & LDLM_FL_FAILED)) { @@ -648,7 +653,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) return 0; } /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. */ + * Let ldlm_cancel_lru() be fast. + */ ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_BL_AST; } @@ -661,7 +667,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) * But we'd also like to be able to indicate in the reply that we're * cancelling right now, because it's unused, or have an intent result * in the reply, so we might have to push the responsibility for sending - * the reply down into the AST handlers, alas. */ + * the reply down into the AST handlers, alas. + */ switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: @@ -781,17 +788,17 @@ static int ldlm_bl_thread_main(void *arg) blwi = ldlm_bl_get_work(blp); - if (blwi == NULL) { + if (!blwi) { atomic_dec(&blp->blp_busy_threads); l_wait_event_exclusive(blp->blp_waitq, - (blwi = ldlm_bl_get_work(blp)) != NULL, + (blwi = ldlm_bl_get_work(blp)), &lwi); busy = atomic_inc_return(&blp->blp_busy_threads); } else { busy = atomic_read(&blp->blp_busy_threads); } - if (blwi->blwi_ns == NULL) + if (!blwi->blwi_ns) /* added by ldlm_cleanup() */ break; @@ -810,7 +817,8 @@ static int ldlm_bl_thread_main(void *arg) /* The special case when we cancel locks in LRU * asynchronously, we pass the list of locks here. * Thus locks are marked LDLM_FL_CANCELING, but NOT - * canceled locally yet. */ + * canceled locally yet. + */ count = ldlm_cli_cancel_list_local(&blwi->blwi_head, blwi->blwi_count, LCF_BL_AST); @@ -915,7 +923,7 @@ static int ldlm_setup(void) int rc = 0; int i; - if (ldlm_state != NULL) + if (ldlm_state) return -EALREADY; ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS); @@ -1040,7 +1048,7 @@ static int ldlm_cleanup(void) ldlm_pools_fini(); - if (ldlm_state->ldlm_bl_pool != NULL) { + if (ldlm_state->ldlm_bl_pool) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; while (atomic_read(&blp->blp_num_threads) > 0) { @@ -1059,7 +1067,7 @@ static int ldlm_cleanup(void) kfree(blp); } - if (ldlm_state->ldlm_cb_service != NULL) + if (ldlm_state->ldlm_cb_service) ptlrpc_unregister_service(ldlm_state->ldlm_cb_service); if (ldlm_ns_kset) @@ -1085,13 +1093,13 @@ int ldlm_init(void) ldlm_resource_slab = kmem_cache_create("ldlm_resources", sizeof(struct ldlm_resource), 0, SLAB_HWCACHE_ALIGN, NULL); - if (ldlm_resource_slab == NULL) + if (!ldlm_resource_slab) return -ENOMEM; ldlm_lock_slab = kmem_cache_create("ldlm_locks", sizeof(struct ldlm_lock), 0, SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL); - if (ldlm_lock_slab == NULL) { + if (!ldlm_lock_slab) { kmem_cache_destroy(ldlm_resource_slab); return -ENOMEM; } @@ -1099,7 +1107,7 @@ int ldlm_init(void) ldlm_interval_slab = kmem_cache_create("interval_node", sizeof(struct ldlm_interval), 0, SLAB_HWCACHE_ALIGN, NULL); - if (ldlm_interval_slab == NULL) { + if (!ldlm_interval_slab) { kmem_cache_destroy(ldlm_resource_slab); kmem_cache_destroy(ldlm_lock_slab); return -ENOMEM; @@ -1117,7 +1125,8 @@ void ldlm_exit(void) kmem_cache_destroy(ldlm_resource_slab); /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call * synchronize_rcu() to wait a grace period elapsed, so that - * ldlm_lock_free() get a chance to be called. */ + * ldlm_lock_free() get a chance to be called. + */ synchronize_rcu(); kmem_cache_destroy(ldlm_lock_slab); kmem_cache_destroy(ldlm_interval_slab); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c index 3d7c137d2..b913ba9cf 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c @@ -107,7 +107,7 @@ /* * 50 ldlm locks for 1MB of RAM. */ -#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) +#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50) /* * Maximal possible grant step plan in %. @@ -246,7 +246,6 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) */ obd = container_of(pl, struct ldlm_namespace, ns_pool)->ns_obd; - LASSERT(obd != NULL); read_lock(&obd->obd_pool_lock); pl->pl_server_lock_volume = obd->obd_pool_slv; atomic_set(&pl->pl_limit, obd->obd_pool_limit); @@ -381,7 +380,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl) spin_unlock(&pl->pl_lock); recalc: - if (pl->pl_ops->po_recalc != NULL) { + if (pl->pl_ops->po_recalc) { count = pl->pl_ops->po_recalc(pl); lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, count); @@ -409,7 +408,7 @@ static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask) { int cancel = 0; - if (pl->pl_ops->po_shrink != NULL) { + if (pl->pl_ops->po_shrink) { cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); if (nr > 0) { lprocfs_counter_add(pl->pl_stats, @@ -643,11 +642,11 @@ static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl) static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl) { - if (pl->pl_stats != NULL) { + if (pl->pl_stats) { lprocfs_free_stats(&pl->pl_stats); pl->pl_stats = NULL; } - if (pl->pl_debugfs_entry != NULL) { + if (pl->pl_debugfs_entry) { ldebugfs_remove(&pl->pl_debugfs_entry); pl->pl_debugfs_entry = NULL; } @@ -834,7 +833,7 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) continue; } - if (ns_old == NULL) + if (!ns_old) ns_old = ns; ldlm_namespace_get(ns); @@ -957,7 +956,7 @@ static int ldlm_pools_recalc(ldlm_side_t client) continue; } - if (ns_old == NULL) + if (!ns_old) ns_old = ns; spin_lock(&ns->ns_lock); @@ -1040,7 +1039,7 @@ static int ldlm_pools_thread_start(void) struct l_wait_info lwi = { 0 }; struct task_struct *task; - if (ldlm_pools_thread != NULL) + if (ldlm_pools_thread) return -EALREADY; ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS); @@ -1065,7 +1064,7 @@ static int ldlm_pools_thread_start(void) static void ldlm_pools_thread_stop(void) { - if (ldlm_pools_thread == NULL) + if (!ldlm_pools_thread) return; thread_set_flags(ldlm_pools_thread, SVC_STOPPING); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c index b9eb37762..74e193e52 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c @@ -94,7 +94,7 @@ static int ldlm_expired_completion_wait(void *data) struct obd_import *imp; struct obd_device *obd; - if (lock->l_conn_export == NULL) { + if (!lock->l_conn_export) { static unsigned long next_dump, last_dump; LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n", @@ -128,7 +128,8 @@ static int ldlm_expired_completion_wait(void *data) } /* We use the same basis for both server side and client side functions - from a single node. */ + * from a single node. + */ static int ldlm_get_enq_timeout(struct ldlm_lock *lock) { int timeout = at_get(ldlm_lock_to_ns_at(lock)); @@ -136,8 +137,9 @@ static int ldlm_get_enq_timeout(struct ldlm_lock *lock) if (AT_OFF) return obd_timeout / 2; /* Since these are non-updating timeouts, we should be conservative. - It would be nice to have some kind of "early reply" mechanism for - lock callbacks too... */ + * It would be nice to have some kind of "early reply" mechanism for + * lock callbacks too... + */ timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */ return max(timeout, ldlm_enqueue_min); } @@ -239,12 +241,13 @@ noreproc: obd = class_exp2obd(lock->l_conn_export); /* if this is a local lock, then there is no import */ - if (obd != NULL) + if (obd) imp = obd->u.cli.cl_import; /* Wait a long time for enqueue - server may have to callback a - lock from another client. Server will evict the other client if it - doesn't respond reasonably, and then give us the lock. */ + * lock from another client. Server will evict the other client if it + * doesn't respond reasonably, and then give us the lock. + */ timeout = ldlm_get_enq_timeout(lock) * 2; lwd.lwd_lock = lock; @@ -258,7 +261,7 @@ noreproc: interrupted_completion_wait, &lwd); } - if (imp != NULL) { + if (imp) { spin_lock(&imp->imp_lock); lwd.lwd_conn_cnt = imp->imp_conn_cnt; spin_unlock(&imp->imp_lock); @@ -296,7 +299,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, !(lock->l_flags & LDLM_FL_FAILED)) { /* Make sure that this lock will not be found by raced * bl_ast and -EINVAL reply is sent to server anyways. - * bug 17645 */ + * bug 17645 + */ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING; need_cancel = 1; @@ -312,11 +316,13 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, ldlm_lock_decref_internal(lock, mode); /* XXX - HACK because we shouldn't call ldlm_lock_destroy() - * from llite/file.c/ll_file_flock(). */ + * from llite/file.c/ll_file_flock(). + */ /* This code makes for the fact that we do not have blocking handler on * a client for flock locks. As such this is the place where we must * completely kill failed locks. (interrupted and those that - * were waiting to be granted when server evicted us. */ + * were waiting to be granted when server evicted us. + */ if (lock->l_resource->lr_type == LDLM_FLOCK) { lock_res_and_lock(lock); ldlm_resource_unlink_lock(lock); @@ -331,7 +337,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, * Called after receiving reply from server. */ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, + enum ldlm_type type, __u8 with_policy, + enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, struct lustre_handle *lockh, int rc) { @@ -363,13 +370,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* Before we return, swab the reply */ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto cleanup; } if (lvb_len != 0) { - LASSERT(lvb != NULL); + LASSERT(lvb); size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER); @@ -401,7 +408,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* Key change rehash lock in per-export hash with new key */ if (exp->exp_lock_hash) { /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, @@ -415,7 +423,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & LDLM_INHERIT_FLAGS); /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match() - * to wait with no timeout as well */ + * to wait with no timeout as well + */ lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & LDLM_FL_NO_TIMEOUT); unlock_res_and_lock(lock); @@ -425,7 +434,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* If enqueue returned a blocked lock but the completion handler has * already run, then it fixed up the resource and we don't need to do it - * again. */ + * again. + */ if ((*flags) & LDLM_FL_LOCK_CHANGED) { int newmode = reply->lock_desc.l_req_mode; @@ -445,7 +455,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, rc = ldlm_lock_change_resource(ns, lock, &reply->lock_desc.l_resource.lr_name); - if (rc || lock->l_resource == NULL) { + if (rc || !lock->l_resource) { rc = -ENOMEM; goto cleanup; } @@ -467,7 +477,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, if ((*flags) & LDLM_FL_AST_SENT || /* Cancel extent locks as soon as possible on a liblustre client, * because it cannot handle asynchronous ASTs robustly (see - * bug 7311). */ + * bug 7311). + */ (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) { lock_res_and_lock(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; @@ -476,12 +487,14 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, } /* If the lock has already been granted by a completion AST, don't - * clobber the LVB with an older one. */ + * clobber the LVB with an older one. + */ if (lvb_len != 0) { /* We must lock or a racing completion might update lvb without * letting us know and we'll clobber the correct value. - * Cannot unlock after the check either, a that still leaves - * a tiny window for completion to get in */ + * Cannot unlock after the check either, as that still leaves + * a tiny window for completion to get in + */ lock_res_and_lock(lock); if (lock->l_req_mode != lock->l_granted_mode) rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, @@ -495,7 +508,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, if (!is_replay) { rc = ldlm_lock_enqueue(ns, &lock, NULL, flags); - if (lock->l_completion_ast != NULL) { + if (lock->l_completion_ast) { int err = lock->l_completion_ast(lock, *flags, NULL); if (!rc) @@ -505,9 +518,10 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, } } - if (lvb_len && lvb != NULL) { + if (lvb_len && lvb) { /* Copy the LVB here, and not earlier, because the completion - * AST (if any) can override what we got in the reply */ + * AST (if any) can override what we got in the reply + */ memcpy(lvb, lock->l_lvb_data, lvb_len); } @@ -532,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off) { int avail; - avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; + avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size; if (likely(avail >= 0)) avail /= (int)sizeof(struct lustre_handle); else @@ -579,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, LIST_HEAD(head); int rc; - if (cancels == NULL) + if (!cancels) cancels = &head; if (ns_connect_cancelset(ns)) { /* Estimate the amount of available space in the request. */ @@ -593,7 +607,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, /* Cancel LRU locks here _only_ if the server supports * EARLY_CANCEL. Otherwise we have to send extra CANCEL - * RPC, which will make us slower. */ + * RPC, which will make us slower. + */ if (avail > count) count += ldlm_cancel_lru_local(ns, cancels, to_free, avail - count, 0, flags); @@ -618,7 +633,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, /* Skip first lock handler in ldlm_request_pack(), * this method will increment @lock_count according * to the lock handle amount actually written to - * the buffer. */ + * the buffer. + */ dlm->lock_count = canceloff; } /* Pack into the request @pack lock handles. */ @@ -665,15 +681,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, int rc, err; struct ptlrpc_request *req; - LASSERT(exp != NULL); - ns = exp->exp_obd->obd_namespace; /* If we're replaying this lock, just check some invariants. - * If we're creating a new lock, get everything all setup nice. */ + * If we're creating a new lock, get everything all setup nicely. + */ if (is_replay) { lock = ldlm_handle2lock_long(lockh, 0); - LASSERT(lock != NULL); + LASSERT(lock); LDLM_DEBUG(lock, "client-side enqueue START"); LASSERT(exp == lock->l_conn_export); } else { @@ -685,16 +700,21 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lock = ldlm_lock_create(ns, res_id, einfo->ei_type, einfo->ei_mode, &cbs, einfo->ei_cbdata, lvb_len, lvb_type); - if (lock == NULL) + if (!lock) return -ENOMEM; /* for the local lock, add the reference */ ldlm_lock_addref_internal(lock, einfo->ei_mode); ldlm_lock2handle(lock, lockh); - if (policy != NULL) - lock->l_policy_data = *policy; + if (policy) + lock->l_policy_data = *policy; + + if (einfo->ei_type == LDLM_EXTENT) { + /* extent lock without policy is a bug */ + if (!policy) + LBUG(); - if (einfo->ei_type == LDLM_EXTENT) lock->l_req_extent = policy->l_extent; + } LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n", *flags); } @@ -706,12 +726,12 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, /* lock not sent to server yet */ - if (reqp == NULL || *reqp == NULL) { + if (!reqp || !*reqp) { req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) { + if (!req) { failed_lock_cleanup(ns, lock, einfo->ei_mode); LDLM_LOCK_RELEASE(lock); return -ENOMEM; @@ -754,7 +774,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, policy->l_extent.end == OBD_OBJECT_EOF)); if (async) { - LASSERT(reqp != NULL); + LASSERT(reqp); return 0; } @@ -767,13 +787,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lockh, rc); /* If ldlm_cli_enqueue_fini did not find the lock, we need to free - * one reference that we took */ + * one reference that we took + */ if (err == -ENOLCK) LDLM_LOCK_RELEASE(lock); else rc = err; - if (!req_passed_in && req != NULL) { + if (!req_passed_in && req) { ptlrpc_req_finished(req); if (reqp) *reqp = NULL; @@ -832,7 +853,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, int max, packed = 0; dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - LASSERT(dlm != NULL); + LASSERT(dlm); /* Check the room in the request buffer. */ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) - @@ -843,7 +864,8 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, /* XXX: it would be better to pack lock handles grouped by resource. * so that the server cancel would call filter_lvbo_update() less - * frequently. */ + * frequently. + */ list_for_each_entry(lock, head, l_bl_ast) { if (!count--) break; @@ -858,17 +880,18 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, /** * Prepare and send a batched cancel RPC. It will include \a count lock - * handles of locks given in \a cancels list. */ + * handles of locks given in \a cancels list. + */ static int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels, - int count, ldlm_cancel_flags_t flags) + int count, enum ldlm_cancel_flags flags) { struct ptlrpc_request *req = NULL; struct obd_import *imp; int free, sent = 0; int rc = 0; - LASSERT(exp != NULL); + LASSERT(exp); LASSERT(count > 0); CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val); @@ -883,14 +906,14 @@ static int ldlm_cli_cancel_req(struct obd_export *exp, while (1) { imp = class_exp2cliimp(exp); - if (imp == NULL || imp->imp_invalid) { + if (!imp || imp->imp_invalid) { CDEBUG(D_DLMTRACE, "skipping cancel on invalid import %p\n", imp); return count; } req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -946,7 +969,6 @@ out: static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp) { - LASSERT(imp != NULL); return &imp->imp_obd->obd_namespace->ns_pool; } @@ -971,7 +993,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) * is the case when server does not support LRU resize feature. * This is also possible in some recovery cases when server-side * reqs have no reference to the OBD export and thus access to - * server-side namespace is not possible. */ + * server-side namespace is not possible. + */ if (lustre_msg_get_slv(req->rq_repmsg) == 0 || lustre_msg_get_limit(req->rq_repmsg) == 0) { DEBUG_REQ(D_HA, req, @@ -989,7 +1012,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) * to the pool thread. We do not access obd_namespace and pool * directly here as there is no reliable way to make sure that * they are still alive at cleanup time. Evil races are possible - * which may cause Oops at that time. */ + * which may cause Oops at that time. + */ write_lock(&obd->obd_pool_lock); obd->obd_pool_slv = new_slv; obd->obd_pool_limit = new_limit; @@ -1005,7 +1029,7 @@ EXPORT_SYMBOL(ldlm_cli_update_pool); * Lock must not have any readers or writers by this time. */ int ldlm_cli_cancel(struct lustre_handle *lockh, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { struct obd_export *exp; int avail, flags, count = 1; @@ -1016,8 +1040,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh, /* concurrent cancels on the same handle can happen */ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING); - if (lock == NULL) { - LDLM_DEBUG_NOLOCK("lock is already being destroyed\n"); + if (!lock) { + LDLM_DEBUG_NOLOCK("lock is already being destroyed"); return 0; } @@ -1028,7 +1052,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh, } /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL * RPC which goes to canceld portal, so we can cancel other LRU locks - * here and send them all as one LDLM_CANCEL RPC. */ + * here and send them all as one LDLM_CANCEL RPC. + */ LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, &cancels); @@ -1055,7 +1080,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel); * Return the number of cancelled locks. */ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - ldlm_cancel_flags_t flags) + enum ldlm_cancel_flags flags) { LIST_HEAD(head); struct ldlm_lock *lock, *next; @@ -1076,7 +1101,8 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, /* Until we have compound requests and can send LDLM_CANCEL * requests batched with generic RPCs, we need to send cancels * with the LDLM_FL_BL_AST flag in a separate RPC from - * the one being generated now. */ + * the one being generated now. + */ if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) { LDLM_DEBUG(lock, "Cancel lock separately"); list_del_init(&lock->l_bl_ast); @@ -1116,7 +1142,8 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, lock_res_and_lock(lock); /* don't check added & count since we want to process all locks - * from unused list */ + * from unused list + */ switch (lock->l_resource->lr_type) { case LDLM_EXTENT: case LDLM_IBITS: @@ -1152,7 +1179,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, unsigned long la; /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. */ + * locks in LRU. + */ if (count && added >= count) return LDLM_POLICY_KEEP_LOCK; @@ -1166,7 +1194,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, ldlm_pool_set_clv(pl, lv); /* Stop when SLV is not yet come from server or lv is smaller than - * it is. */ + * it is. + */ return (slv == 0 || lv < slv) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1186,7 +1215,8 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns, int count) { /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. */ + * locks in LRU. + */ return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1227,7 +1257,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns, int count) { /* Stop LRU processing when we reach past count or have checked all - * locks in LRU. */ + * locks in LRU. + */ return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1307,7 +1338,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, count += unused - ns->ns_max_unused; pf = ldlm_cancel_lru_policy(ns, flags); - LASSERT(pf != NULL); + LASSERT(pf); while (!list_empty(&ns->ns_unused_list)) { ldlm_policy_res_t result; @@ -1331,7 +1362,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, continue; /* Somebody is already doing CANCEL. No need for this - * lock in LRU, do not traverse it again. */ + * lock in LRU, do not traverse it again. + */ if (!(lock->l_flags & LDLM_FL_CANCELING)) break; @@ -1380,7 +1412,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, /* Another thread is removing lock from LRU, or * somebody is already doing CANCEL, or there * is a blocking request which will send cancel - * by itself, or the lock is no longer unused. */ + * by itself, or the lock is no longer unused. + */ unlock_res_and_lock(lock); lu_ref_del(&lock->l_reference, __func__, current); @@ -1394,7 +1427,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * better send cancel notification to server, so that it * frees appropriate state. This might lead to a race * where while we are doing cancel here, server is also - * silently cancelling this lock. */ + * silently cancelling this lock. + */ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK; /* Setting the CBPENDING flag is a little misleading, @@ -1402,7 +1436,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * CBPENDING is set, the lock can accumulate no more * readers/writers. Since readers and writers are * already zero here, ldlm_lock_decref() won't see - * this flag and call l_blocking_ast */ + * this flag and call l_blocking_ast + */ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING; /* We can't re-add to l_lru as it confuses the @@ -1410,7 +1445,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * arrives after we drop lr_lock below. We use l_bl_ast * and can't use l_pending_chain as it is used both on * server and client nevertheless bug 5666 says it is - * used only on server */ + * used only on server + */ LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, cancels); unlock_res_and_lock(lock); @@ -1425,7 +1461,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, - ldlm_cancel_flags_t cancel_flags, int flags) + enum ldlm_cancel_flags cancel_flags, int flags) { int added; @@ -1444,14 +1480,15 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, * callback will be performed in this function. */ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - ldlm_cancel_flags_t cancel_flags, + enum ldlm_cancel_flags cancel_flags, int flags) { LIST_HEAD(cancels); int count, rc; /* Just prepare the list of locks, do not actually cancel them yet. - * Locks are cancelled later in a separate thread. */ + * Locks are cancelled later in a separate thread. + */ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags); rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags); if (rc == 0) @@ -1468,15 +1505,16 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, - ldlm_mode_t mode, __u64 lock_flags, - ldlm_cancel_flags_t cancel_flags, void *opaque) + enum ldlm_mode mode, __u64 lock_flags, + enum ldlm_cancel_flags cancel_flags, + void *opaque) { struct ldlm_lock *lock; int count = 0; lock_res(res); list_for_each_entry(lock, &res->lr_granted, l_res_link) { - if (opaque != NULL && lock->l_ast_data != opaque) { + if (opaque && lock->l_ast_data != opaque) { LDLM_ERROR(lock, "data %p doesn't match opaque %p", lock->l_ast_data, opaque); continue; @@ -1486,7 +1524,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, continue; /* If somebody is already doing CANCEL, or blocking AST came, - * skip this lock. */ + * skip this lock. + */ if (lock->l_flags & LDLM_FL_BL_AST || lock->l_flags & LDLM_FL_CANCELING) continue; @@ -1495,7 +1534,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, continue; /* If policy is given and this is IBITS lock, add to list only - * those locks that match by policy. */ + * those locks that match by policy. + */ if (policy && (lock->l_resource->lr_type == LDLM_IBITS) && !(lock->l_policy_data.l_inodebits.bits & policy->l_inodebits.bits)) @@ -1527,7 +1567,8 @@ EXPORT_SYMBOL(ldlm_cancel_resource_local); * Destroy \a cancels at the end. */ int ldlm_cli_cancel_list(struct list_head *cancels, int count, - struct ptlrpc_request *req, ldlm_cancel_flags_t flags) + struct ptlrpc_request *req, + enum ldlm_cancel_flags flags) { struct ldlm_lock *lock; int res = 0; @@ -1539,7 +1580,8 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count, * Usually it is enough to have just 1 RPC, but it is possible that * there are too many locks to be cancelled in LRU or on a resource. * It would also speed up the case when the server does not support - * the feature. */ + * the feature. + */ while (count > 0) { LASSERT(!list_empty(cancels)); lock = list_entry(cancels->next, struct ldlm_lock, @@ -1577,12 +1619,13 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list); * Cancel all locks on a resource that have 0 readers/writers. * * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying - * to notify the server. */ + * to notify the server. + */ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_resource *res; @@ -1591,7 +1634,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, int rc; res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (res == NULL) { + if (!res) { /* This is not a problem. */ CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]); return 0; @@ -1638,17 +1681,17 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, * to notify the server. */ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_cancel_flags_t flags, void *opaque) + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_cli_cancel_arg arg = { .lc_flags = flags, .lc_opaque = opaque, }; - if (ns == NULL) + if (!ns) return ELDLM_OK; - if (res_id != NULL) { + if (res_id) { return ldlm_cli_cancel_unused_resource(ns, res_id, NULL, LCK_MINMODE, flags, opaque); @@ -1743,13 +1786,13 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns, struct ldlm_resource *res; int rc; - if (ns == NULL) { + if (!ns) { CERROR("must pass in namespace\n"); LBUG(); } res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); @@ -1796,7 +1839,7 @@ static int replay_lock_interpret(const struct lu_env *env, goto out; reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto out; } @@ -1815,7 +1858,8 @@ static int replay_lock_interpret(const struct lu_env *env, exp = req->rq_export; if (exp && exp->exp_lock_hash) { /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, @@ -1850,7 +1894,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) /* If this is reply-less callback lock, we cannot replay it, since * server might have long dropped it, but notification of that event was - * lost by network. (and server granted conflicting lock already) */ + * lost by network. (and server granted conflicting lock already) + */ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) { LDLM_DEBUG(lock, "Not replaying reply-less lock:"); ldlm_lock_cancel(lock); @@ -1882,7 +1927,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) + if (!req) return -ENOMEM; /* We're part of recovery, so don't wait for it. */ @@ -1901,7 +1946,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) /* notify the server we've replayed all requests. * also, we mark the request to be put on a dedicated * queue to be processed after all request replayes. - * bug 6063 */ + * bug 6063 + */ lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE); LDLM_DEBUG(lock, "replaying lock:"); @@ -1936,7 +1982,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns) /* We don't need to care whether or not LRU resize is enabled * because the LDLM_CANCEL_NO_WAIT policy doesn't use the - * count parameter */ + * count parameter + */ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0, LCF_LOCAL, LDLM_CANCEL_NO_WAIT); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c index 0ae610015..9dede87ad 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c @@ -56,7 +56,8 @@ LIST_HEAD(ldlm_srv_namespace_list); struct mutex ldlm_cli_namespace_lock; /* Client Namespaces that have active resources in them. * Once all resources go away, ldlm_poold moves such namespaces to the - * inactive list */ + * inactive list + */ LIST_HEAD(ldlm_cli_active_namespace_list); /* Client namespaces that don't have any locks in them */ static LIST_HEAD(ldlm_cli_inactive_namespace_list); @@ -66,7 +67,8 @@ static struct dentry *ldlm_ns_debugfs_dir; struct dentry *ldlm_svc_debugfs_dir; /* during debug dump certain amount of granted locks for one resource to avoid - * DDOS. */ + * DDOS. + */ static unsigned int ldlm_dump_granted_max = 256; static ssize_t @@ -275,7 +277,8 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); /* Make sure that LRU resize was originally supported before - * turning it on here. */ + * turning it on here. + */ if (lru_resize && (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) { CDEBUG(D_DLMTRACE, @@ -380,7 +383,7 @@ static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns) else ldebugfs_remove(&ns->ns_debugfs_entry); - if (ns->ns_stats != NULL) + if (ns->ns_stats) lprocfs_free_stats(&ns->ns_stats); } @@ -400,7 +403,7 @@ static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns) "%s", ldlm_ns_name(ns)); ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0); - if (ns->ns_stats == NULL) { + if (!ns->ns_stats) { kobject_put(&ns->ns_kobj); return -ENOMEM; } @@ -420,7 +423,7 @@ static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns) } else { ns_entry = debugfs_create_dir(ldlm_ns_name(ns), ldlm_ns_debugfs_dir); - if (ns_entry == NULL) + if (!ns_entry) return -ENOMEM; ns->ns_debugfs_entry = ns_entry; } @@ -554,7 +557,7 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = { }; struct ldlm_ns_hash_def { - ldlm_ns_type_t nsd_type; + enum ldlm_ns_type nsd_type; /** hash bucket bits */ unsigned nsd_bkt_bits; /** hash bits */ @@ -621,8 +624,8 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns, */ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, ldlm_side_t client, - ldlm_appetite_t apt, - ldlm_ns_type_t ns_type) + enum ldlm_appetite apt, + enum ldlm_ns_type ns_type) { struct ldlm_namespace *ns = NULL; struct ldlm_ns_bucket *nsb; @@ -631,7 +634,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, int idx; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = ldlm_get_ref(); if (rc) { @@ -664,7 +667,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, CFS_HASH_BIGNAME | CFS_HASH_SPIN_BKTLOCK | CFS_HASH_NO_ITEMREF); - if (ns->ns_rs_hash == NULL) + if (!ns->ns_rs_hash) goto out_ns; cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) { @@ -749,7 +752,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, struct lustre_handle lockh; /* First, we look for non-cleaned-yet lock - * all cleaned locks are marked by CLEANED flag. */ + * all cleaned locks are marked by CLEANED flag. + */ lock_res(res); list_for_each(tmp, q) { lock = list_entry(tmp, struct ldlm_lock, @@ -763,13 +767,14 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, break; } - if (lock == NULL) { + if (!lock) { unlock_res(res); break; } /* Set CBPENDING so nothing in the cancellation path - * can match this lock. */ + * can match this lock. + */ lock->l_flags |= LDLM_FL_CBPENDING; lock->l_flags |= LDLM_FL_FAILED; lock->l_flags |= flags; @@ -782,7 +787,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, /* This is a little bit gross, but much better than the * alternative: pretend that we got a blocking AST from * the server, so that when the lock is decref'd, it - * will go away ... */ + * will go away ... + */ unlock_res(res); LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY"); if (lock->l_completion_ast) @@ -837,7 +843,7 @@ static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd, */ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) { - if (ns == NULL) { + if (!ns) { CDEBUG(D_INFO, "NULL ns, skipping cleanup\n"); return ELDLM_OK; } @@ -873,7 +879,8 @@ force_wait: atomic_read(&ns->ns_bref) == 0, &lwi); /* Forced cleanups should be able to reclaim all references, - * so it's safe to wait forever... we can't leak locks... */ + * so it's safe to wait forever... we can't leak locks... + */ if (force && rc == -ETIMEDOUT) { LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n", ldlm_ns_name(ns), @@ -943,7 +950,8 @@ static void ldlm_namespace_unregister(struct ldlm_namespace *ns, LASSERT(!list_empty(&ns->ns_list_chain)); /* Some asserts and possibly other parts of the code are still * using list_empty(&ns->ns_list_chain). This is why it is - * important to use list_del_init() here. */ + * important to use list_del_init() here. + */ list_del_init(&ns->ns_list_chain); ldlm_namespace_nr_dec(client); mutex_unlock(ldlm_namespace_lock(client)); @@ -963,7 +971,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns) ldlm_namespace_unregister(ns, ns->ns_client); /* Fini pool _before_ parent proc dir is removed. This is important as * ldlm_pool_fini() removes own proc dir which is child to @dir. - * Removing it after @dir may cause oops. */ + * Removing it after @dir may cause oops. + */ ldlm_pool_fini(&ns->ns_pool); ldlm_namespace_debugfs_unregister(ns); @@ -971,7 +980,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns) cfs_hash_putref(ns->ns_rs_hash); /* Namespace \a ns should be not on list at this time, otherwise * this will cause issues related to using freed \a ns in poold - * thread. */ + * thread. + */ LASSERT(list_empty(&ns->ns_list_chain)); kfree(ns); ldlm_put_ref(); @@ -1031,8 +1041,8 @@ static struct ldlm_resource *ldlm_resource_new(void) struct ldlm_resource *res; int idx; - res = kmem_cache_alloc(ldlm_resource_slab, GFP_NOFS | __GFP_ZERO); - if (res == NULL) + res = kmem_cache_zalloc(ldlm_resource_slab, GFP_NOFS); + if (!res) return NULL; INIT_LIST_HEAD(&res->lr_granted); @@ -1050,7 +1060,8 @@ static struct ldlm_resource *ldlm_resource_new(void) lu_ref_init(&res->lr_reference); /* The creator of the resource must unlock the mutex after LVB - * initialization. */ + * initialization. + */ mutex_init(&res->lr_lvb_mutex); mutex_lock(&res->lr_lvb_mutex); @@ -1065,7 +1076,8 @@ static struct ldlm_resource *ldlm_resource_new(void) */ struct ldlm_resource * ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, - const struct ldlm_res_id *name, ldlm_type_t type, int create) + const struct ldlm_res_id *name, enum ldlm_type type, + int create) { struct hlist_node *hnode; struct ldlm_resource *res; @@ -1073,14 +1085,13 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, __u64 version; int ns_refcount = 0; - LASSERT(ns != NULL); - LASSERT(parent == NULL); - LASSERT(ns->ns_rs_hash != NULL); + LASSERT(!parent); + LASSERT(ns->ns_rs_hash); LASSERT(name->name[0] != 0); cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0); hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - if (hnode != NULL) { + if (hnode) { cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); res = hlist_entry(hnode, struct ldlm_resource, lr_hash); /* Synchronize with regard to resource creation. */ @@ -1111,13 +1122,12 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); res->lr_name = *name; res->lr_type = type; - res->lr_most_restr = LCK_NL; cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL : cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - if (hnode != NULL) { + if (hnode) { /* Someone won the race and already added the resource. */ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); /* Clean lu_ref for failed resource. */ @@ -1167,7 +1177,8 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, /* Let's see if we happened to be the very first resource in this * namespace. If so, and this is a client namespace, we need to move * the namespace into the active namespaces list to be patrolled by - * the ldlm_poold. */ + * the ldlm_poold. + */ if (ns_refcount == 1) { mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT); diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lustre/libcfs/Makefile deleted file mode 100644 index 03d3f3d7b..000000000 --- a/drivers/staging/lustre/lustre/libcfs/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -obj-$(CONFIG_LUSTRE_FS) += libcfs.o - -libcfs-linux-objs := linux-tracefile.o linux-debug.o -libcfs-linux-objs += linux-prim.o linux-cpu.o -libcfs-linux-objs += linux-curproc.o -libcfs-linux-objs += linux-module.o -libcfs-linux-objs += linux-crypto.o -libcfs-linux-objs += linux-crypto-adler.o -libcfs-linux-objs += linux-mem.o - -libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs)) - -libcfs-all-objs := debug.o fail.o module.o tracefile.o \ - libcfs_string.o hash.o kernel_user_comm.o \ - prng.o workitem.o libcfs_cpu.o \ - libcfs_mem.o libcfs_lock.o - -libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs) diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c deleted file mode 100644 index 0b38dad13..000000000 --- a/drivers/staging/lustre/lustre/libcfs/debug.c +++ /dev/null @@ -1,559 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/debug.c - * - * Author: Phil Schwan - * - */ - -# define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" -#include "tracefile.h" - -static char debug_file_name[1024]; - -unsigned int libcfs_subsystem_debug = ~0; -module_param(libcfs_subsystem_debug, int, 0644); -MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask"); -EXPORT_SYMBOL(libcfs_subsystem_debug); - -unsigned int libcfs_debug = (D_CANTMASK | - D_NETERROR | D_HA | D_CONFIG | D_IOCTL); -module_param(libcfs_debug, int, 0644); -MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask"); -EXPORT_SYMBOL(libcfs_debug); - -static int libcfs_param_debug_mb_set(const char *val, - const struct kernel_param *kp) -{ - int rc; - unsigned num; - - rc = kstrtouint(val, 0, &num); - if (rc < 0) - return rc; - - if (!*((unsigned int *)kp->arg)) { - *((unsigned int *)kp->arg) = num; - return 0; - } - - rc = cfs_trace_set_debug_mb(num); - - if (!rc) - *((unsigned int *)kp->arg) = cfs_trace_get_debug_mb(); - - return rc; -} - -/* While debug_mb setting look like unsigned int, in fact - * it needs quite a bunch of extra processing, so we define special - * debugmb parameter type with corresponding methods to handle this case */ -static struct kernel_param_ops param_ops_debugmb = { - .set = libcfs_param_debug_mb_set, - .get = param_get_uint, -}; - -#define param_check_debugmb(name, p) \ - __param_check(name, p, unsigned int) - -static unsigned int libcfs_debug_mb; -module_param(libcfs_debug_mb, debugmb, 0644); -MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size."); - -unsigned int libcfs_printk = D_CANTMASK; -module_param(libcfs_printk, uint, 0644); -MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask"); - -unsigned int libcfs_console_ratelimit = 1; -module_param(libcfs_console_ratelimit, uint, 0644); -MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)"); - -static int param_set_delay_minmax(const char *val, - const struct kernel_param *kp, - long min, long max) -{ - long d; - int sec; - int rc; - - rc = kstrtoint(val, 0, &sec); - if (rc) - return -EINVAL; - - d = cfs_time_seconds(sec) / 100; - if (d < min || d > max) - return -EINVAL; - - *((unsigned int *)kp->arg) = d; - - return 0; -} - -static int param_get_delay(char *buffer, const struct kernel_param *kp) -{ - unsigned int d = *(unsigned int *)kp->arg; - - return sprintf(buffer, "%u", (unsigned int)cfs_duration_sec(d * 100)); -} - -unsigned int libcfs_console_max_delay; -unsigned int libcfs_console_min_delay; - -static int param_set_console_max_delay(const char *val, - const struct kernel_param *kp) -{ - return param_set_delay_minmax(val, kp, - libcfs_console_min_delay, INT_MAX); -} - -static struct kernel_param_ops param_ops_console_max_delay = { - .set = param_set_console_max_delay, - .get = param_get_delay, -}; - -#define param_check_console_max_delay(name, p) \ - __param_check(name, p, unsigned int) - -module_param(libcfs_console_max_delay, console_max_delay, 0644); -MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)"); - -static int param_set_console_min_delay(const char *val, - const struct kernel_param *kp) -{ - return param_set_delay_minmax(val, kp, - 1, libcfs_console_max_delay); -} - -static struct kernel_param_ops param_ops_console_min_delay = { - .set = param_set_console_min_delay, - .get = param_get_delay, -}; - -#define param_check_console_min_delay(name, p) \ - __param_check(name, p, unsigned int) - -module_param(libcfs_console_min_delay, console_min_delay, 0644); -MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)"); - -static int param_set_uint_minmax(const char *val, - const struct kernel_param *kp, - unsigned int min, unsigned int max) -{ - unsigned int num; - int ret; - - if (!val) - return -EINVAL; - ret = kstrtouint(val, 0, &num); - if (ret < 0 || num < min || num > max) - return -EINVAL; - *((unsigned int *)kp->arg) = num; - return 0; -} - -static int param_set_uintpos(const char *val, const struct kernel_param *kp) -{ - return param_set_uint_minmax(val, kp, 1, -1); -} - -static struct kernel_param_ops param_ops_uintpos = { - .set = param_set_uintpos, - .get = param_get_uint, -}; - -#define param_check_uintpos(name, p) \ - __param_check(name, p, unsigned int) - -unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF; -module_param(libcfs_console_backoff, uintpos, 0644); -MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor"); - -unsigned int libcfs_debug_binary = 1; - -unsigned int libcfs_stack = 3 * THREAD_SIZE / 4; -EXPORT_SYMBOL(libcfs_stack); - -unsigned int libcfs_catastrophe; -EXPORT_SYMBOL(libcfs_catastrophe); - -unsigned int libcfs_panic_on_lbug = 1; -module_param(libcfs_panic_on_lbug, uint, 0644); -MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG"); - -static wait_queue_head_t debug_ctlwq; - -char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT; - -/* We need to pass a pointer here, but elsewhere this must be a const */ -static char *libcfs_debug_file_path; -module_param(libcfs_debug_file_path, charp, 0644); -MODULE_PARM_DESC(libcfs_debug_file_path, - "Path for dumping debug logs, set 'NONE' to prevent log dumping"); - -int libcfs_panic_in_progress; - -/* libcfs_debug_token2mask() expects the returned - * string in lower-case */ -static const char * -libcfs_debug_subsys2str(int subsys) -{ - switch (1 << subsys) { - default: - return NULL; - case S_UNDEFINED: - return "undefined"; - case S_MDC: - return "mdc"; - case S_MDS: - return "mds"; - case S_OSC: - return "osc"; - case S_OST: - return "ost"; - case S_CLASS: - return "class"; - case S_LOG: - return "log"; - case S_LLITE: - return "llite"; - case S_RPC: - return "rpc"; - case S_LNET: - return "lnet"; - case S_LND: - return "lnd"; - case S_PINGER: - return "pinger"; - case S_FILTER: - return "filter"; - case S_ECHO: - return "echo"; - case S_LDLM: - return "ldlm"; - case S_LOV: - return "lov"; - case S_LQUOTA: - return "lquota"; - case S_OSD: - return "osd"; - case S_LMV: - return "lmv"; - case S_SEC: - return "sec"; - case S_GSS: - return "gss"; - case S_MGC: - return "mgc"; - case S_MGS: - return "mgs"; - case S_FID: - return "fid"; - case S_FLD: - return "fld"; - } -} - -/* libcfs_debug_token2mask() expects the returned - * string in lower-case */ -static const char * -libcfs_debug_dbg2str(int debug) -{ - switch (1 << debug) { - default: - return NULL; - case D_TRACE: - return "trace"; - case D_INODE: - return "inode"; - case D_SUPER: - return "super"; - case D_EXT2: - return "ext2"; - case D_MALLOC: - return "malloc"; - case D_CACHE: - return "cache"; - case D_INFO: - return "info"; - case D_IOCTL: - return "ioctl"; - case D_NETERROR: - return "neterror"; - case D_NET: - return "net"; - case D_WARNING: - return "warning"; - case D_BUFFS: - return "buffs"; - case D_OTHER: - return "other"; - case D_DENTRY: - return "dentry"; - case D_NETTRACE: - return "nettrace"; - case D_PAGE: - return "page"; - case D_DLMTRACE: - return "dlmtrace"; - case D_ERROR: - return "error"; - case D_EMERG: - return "emerg"; - case D_HA: - return "ha"; - case D_RPCTRACE: - return "rpctrace"; - case D_VFSTRACE: - return "vfstrace"; - case D_READA: - return "reada"; - case D_MMAP: - return "mmap"; - case D_CONFIG: - return "config"; - case D_CONSOLE: - return "console"; - case D_QUOTA: - return "quota"; - case D_SEC: - return "sec"; - case D_LFSCK: - return "lfsck"; - } -} - -int -libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys) -{ - const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : - libcfs_debug_dbg2str; - int len = 0; - const char *token; - int i; - - if (mask == 0) { /* "0" */ - if (size > 0) - str[0] = '0'; - len = 1; - } else { /* space-separated tokens */ - for (i = 0; i < 32; i++) { - if ((mask & (1 << i)) == 0) - continue; - - token = fn(i); - if (token == NULL) /* unused bit */ - continue; - - if (len > 0) { /* separator? */ - if (len < size) - str[len] = ' '; - len++; - } - - while (*token != 0) { - if (len < size) - str[len] = *token; - token++; - len++; - } - } - } - - /* terminate 'str' */ - if (len < size) - str[len] = 0; - else - str[size - 1] = 0; - - return len; -} - -int -libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) -{ - const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : - libcfs_debug_dbg2str; - int m = 0; - int matched; - int n; - int t; - - /* Allow a number for backwards compatibility */ - - for (n = strlen(str); n > 0; n--) - if (!isspace(str[n-1])) - break; - matched = n; - t = sscanf(str, "%i%n", &m, &matched); - if (t >= 1 && matched == n) { - /* don't print warning for lctl set_param debug=0 or -1 */ - if (m != 0 && m != -1) - CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n"); - *mask = m; - return 0; - } - - return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK, - 0xffffffff); -} - -/** - * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages() - */ -void libcfs_debug_dumplog_internal(void *arg) -{ - void *journal_info; - - journal_info = current->journal_info; - current->journal_info = NULL; - - if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) { - snprintf(debug_file_name, sizeof(debug_file_name) - 1, - "%s.%lld.%ld", libcfs_debug_file_path_arr, - (s64)ktime_get_real_seconds(), (long_ptr_t)arg); - pr_alert("LustreError: dumping log to %s\n", - debug_file_name); - cfs_tracefile_dump_all_pages(debug_file_name); - libcfs_run_debug_log_upcall(debug_file_name); - } - - current->journal_info = journal_info; -} - -static int libcfs_debug_dumplog_thread(void *arg) -{ - libcfs_debug_dumplog_internal(arg); - wake_up(&debug_ctlwq); - return 0; -} - -void libcfs_debug_dumplog(void) -{ - wait_queue_t wait; - struct task_struct *dumper; - - /* we're being careful to ensure that the kernel thread is - * able to set our state to running as it exits before we - * get to schedule() */ - init_waitqueue_entry(&wait, current); - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&debug_ctlwq, &wait); - - dumper = kthread_run(libcfs_debug_dumplog_thread, - (void *)(long)current_pid(), - "libcfs_debug_dumper"); - if (IS_ERR(dumper)) - pr_err("LustreError: cannot start log dump thread: %ld\n", - PTR_ERR(dumper)); - else - schedule(); - - /* be sure to teardown if cfs_create_thread() failed */ - remove_wait_queue(&debug_ctlwq, &wait); - set_current_state(TASK_RUNNING); -} -EXPORT_SYMBOL(libcfs_debug_dumplog); - -int libcfs_debug_init(unsigned long bufsize) -{ - int rc = 0; - unsigned int max = libcfs_debug_mb; - - init_waitqueue_head(&debug_ctlwq); - - if (libcfs_console_max_delay <= 0 || /* not set by user or */ - libcfs_console_min_delay <= 0 || /* set to invalid values */ - libcfs_console_min_delay >= libcfs_console_max_delay) { - libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY; - libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY; - } - - if (libcfs_debug_file_path != NULL) { - strlcpy(libcfs_debug_file_path_arr, - libcfs_debug_file_path, - sizeof(libcfs_debug_file_path_arr)); - } - - /* If libcfs_debug_mb is set to an invalid value or uninitialized - * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */ - if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) { - max = TCD_MAX_PAGES; - } else { - max = max / num_possible_cpus(); - max <<= (20 - PAGE_CACHE_SHIFT); - } - rc = cfs_tracefile_init(max); - - if (rc == 0) { - libcfs_register_panic_notifier(); - libcfs_debug_mb = cfs_trace_get_debug_mb(); - } - - return rc; -} - -int libcfs_debug_cleanup(void) -{ - libcfs_unregister_panic_notifier(); - cfs_tracefile_exit(); - return 0; -} - -int libcfs_debug_clear_buffer(void) -{ - cfs_trace_flush_pages(); - return 0; -} - -/* Debug markers, although printed by S_LNET - * should not be be marked as such. */ -#undef DEBUG_SUBSYSTEM -#define DEBUG_SUBSYSTEM S_UNDEFINED -int libcfs_debug_mark_buffer(const char *text) -{ - CDEBUG(D_TRACE, - "***************************************************\n"); - LCONSOLE(D_WARNING, "DEBUG MARKER: %s\n", text); - CDEBUG(D_TRACE, - "***************************************************\n"); - - return 0; -} - -#undef DEBUG_SUBSYSTEM -#define DEBUG_SUBSYSTEM S_LNET diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c deleted file mode 100644 index 27831432d..000000000 --- a/drivers/staging/lustre/lustre/libcfs/fail.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores, - * CA 94065 USA or visit www.oracle.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Oracle Corporation, Inc. - */ - -#include "../../include/linux/libcfs/libcfs.h" - -unsigned long cfs_fail_loc; -EXPORT_SYMBOL(cfs_fail_loc); - -unsigned int cfs_fail_val; -EXPORT_SYMBOL(cfs_fail_val); - -DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq); -EXPORT_SYMBOL(cfs_race_waitq); - -int cfs_race_state; -EXPORT_SYMBOL(cfs_race_state); - -int __cfs_fail_check_set(__u32 id, __u32 value, int set) -{ - static atomic_t cfs_fail_count = ATOMIC_INIT(0); - - LASSERT(!(id & CFS_FAIL_ONCE)); - - if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) == - (CFS_FAILED | CFS_FAIL_ONCE)) { - atomic_set(&cfs_fail_count, 0); /* paranoia */ - return 0; - } - - /* Fail 1/cfs_fail_val times */ - if (cfs_fail_loc & CFS_FAIL_RAND) { - if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0) - return 0; - } - - /* Skip the first cfs_fail_val, then fail */ - if (cfs_fail_loc & CFS_FAIL_SKIP) { - if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val) - return 0; - } - - /* check cfs_fail_val... */ - if (set == CFS_FAIL_LOC_VALUE) { - if (cfs_fail_val != -1 && cfs_fail_val != value) - return 0; - } - - /* Fail cfs_fail_val times, overridden by FAIL_ONCE */ - if (cfs_fail_loc & CFS_FAIL_SOME && - (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) { - int count = atomic_inc_return(&cfs_fail_count); - - if (count >= cfs_fail_val) { - set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); - atomic_set(&cfs_fail_count, 0); - /* we are lost race to increase */ - if (count > cfs_fail_val) - return 0; - } - } - - if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) && - (value & CFS_FAIL_ONCE)) - set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); - /* Lost race to set CFS_FAILED_BIT. */ - if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) { - /* If CFS_FAIL_ONCE is valid, only one process can fail, - * otherwise multi-process can fail at the same time. */ - if (cfs_fail_loc & CFS_FAIL_ONCE) - return 0; - } - - switch (set) { - case CFS_FAIL_LOC_NOSET: - case CFS_FAIL_LOC_VALUE: - break; - case CFS_FAIL_LOC_ORSET: - cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE); - break; - case CFS_FAIL_LOC_RESET: - cfs_fail_loc = value; - break; - default: - LASSERTF(0, "called with bad set %u\n", set); - break; - } - - return 1; -} -EXPORT_SYMBOL(__cfs_fail_check_set); - -int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) -{ - int ret; - - ret = __cfs_fail_check_set(id, value, set); - if (ret && likely(ms > 0)) { - CERROR("cfs_fail_timeout id %x sleeping for %dms\n", - id, ms); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(ms) / 1000); - CERROR("cfs_fail_timeout id %x awake\n", id); - } - return ret; -} -EXPORT_SYMBOL(__cfs_fail_timeout_set); diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c deleted file mode 100644 index 4d5051043..000000000 --- a/drivers/staging/lustre/lustre/libcfs/hash.c +++ /dev/null @@ -1,2092 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/hash.c - * - * Implement a hash class for hash process in lustre system. - * - * Author: YuZhangyong - * - * 2008-08-15: Brian Behlendorf - * - Simplified API and improved documentation - * - Added per-hash feature flags: - * * CFS_HASH_DEBUG additional validation - * * CFS_HASH_REHASH dynamic rehashing - * - Added per-hash statistics - * - General performance enhancements - * - * 2009-07-31: Liang Zhen - * - move all stuff to libcfs - * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH - * - ignore hs_rwlock if without CFS_HASH_REHASH setting - * - buckets are allocated one by one(instead of contiguous memory), - * to avoid unnecessary cacheline conflict - * - * 2010-03-01: Liang Zhen - * - "bucket" is a group of hlist_head now, user can specify bucket size - * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share - * one lock for reducing memory overhead. - * - * - support lockless hash, caller will take care of locks: - * avoid lock overhead for hash tables that are already protected - * by locking in the caller for another reason - * - * - support both spin_lock/rwlock for bucket: - * overhead of spinlock contention is lower than read/write - * contention of rwlock, so using spinlock to serialize operations on - * bucket is more reasonable for those frequently changed hash tables - * - * - support one-single lock mode: - * one lock to protect all hash operations to avoid overhead of - * multiple locks if hash table is always small - * - * - removed a lot of unnecessary addref & decref on hash element: - * addref & decref are atomic operations in many use-cases which - * are expensive. - * - * - support non-blocking cfs_hash_add() and cfs_hash_findadd(): - * some lustre use-cases require these functions to be strictly - * non-blocking, we need to schedule required rehash on a different - * thread on those cases. - * - * - safer rehash on large hash table - * In old implementation, rehash function will exclusively lock the - * hash table and finish rehash in one batch, it's dangerous on SMP - * system because rehash millions of elements could take long time. - * New implemented rehash can release lock and relax CPU in middle - * of rehash, it's safe for another thread to search/change on the - * hash table even it's in rehasing. - * - * - support two different refcount modes - * . hash table has refcount on element - * . hash table doesn't change refcount on adding/removing element - * - * - support long name hash table (for param-tree) - * - * - fix a bug for cfs_hash_rehash_key: - * in old implementation, cfs_hash_rehash_key could screw up the - * hash-table because @key is overwritten without any protection. - * Now we need user to define hs_keycpy for those rehash enabled - * hash tables, cfs_hash_rehash_key will overwrite hash-key - * inside lock by calling hs_keycpy. - * - * - better hash iteration: - * Now we support both locked iteration & lockless iteration of hash - * table. Also, user can break the iteration by return 1 in callback. - */ -#include -#include - -#include "../../include/linux/libcfs/libcfs.h" - -#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static unsigned int warn_on_depth = 8; -module_param(warn_on_depth, uint, 0644); -MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high."); -#endif - -struct cfs_wi_sched *cfs_sched_rehash; - -static inline void -cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {} - -static inline void -cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {} - -static inline void -cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive) - __acquires(&lock->spin) -{ - spin_lock(&lock->spin); -} - -static inline void -cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive) - __releases(&lock->spin) -{ - spin_unlock(&lock->spin); -} - -static inline void -cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive) - __acquires(&lock->rw) -{ - if (!exclusive) - read_lock(&lock->rw); - else - write_lock(&lock->rw); -} - -static inline void -cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive) - __releases(&lock->rw) -{ - if (!exclusive) - read_unlock(&lock->rw); - else - write_unlock(&lock->rw); -} - -/** No lock hash */ -static struct cfs_hash_lock_ops cfs_hash_nl_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_nl_lock, - .hs_bkt_unlock = cfs_hash_nl_unlock, -}; - -/** no bucket lock, one spinlock to protect everything */ -static struct cfs_hash_lock_ops cfs_hash_nbl_lops = { - .hs_lock = cfs_hash_spin_lock, - .hs_unlock = cfs_hash_spin_unlock, - .hs_bkt_lock = cfs_hash_nl_lock, - .hs_bkt_unlock = cfs_hash_nl_unlock, -}; - -/** spin bucket lock, rehash is enabled */ -static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = { - .hs_lock = cfs_hash_rw_lock, - .hs_unlock = cfs_hash_rw_unlock, - .hs_bkt_lock = cfs_hash_spin_lock, - .hs_bkt_unlock = cfs_hash_spin_unlock, -}; - -/** rw bucket lock, rehash is enabled */ -static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = { - .hs_lock = cfs_hash_rw_lock, - .hs_unlock = cfs_hash_rw_unlock, - .hs_bkt_lock = cfs_hash_rw_lock, - .hs_bkt_unlock = cfs_hash_rw_unlock, -}; - -/** spin bucket lock, rehash is disabled */ -static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_spin_lock, - .hs_bkt_unlock = cfs_hash_spin_unlock, -}; - -/** rw bucket lock, rehash is disabled */ -static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_rw_lock, - .hs_bkt_unlock = cfs_hash_rw_unlock, -}; - -static void -cfs_hash_lock_setup(struct cfs_hash *hs) -{ - if (cfs_hash_with_no_lock(hs)) { - hs->hs_lops = &cfs_hash_nl_lops; - - } else if (cfs_hash_with_no_bktlock(hs)) { - hs->hs_lops = &cfs_hash_nbl_lops; - spin_lock_init(&hs->hs_lock.spin); - - } else if (cfs_hash_with_rehash(hs)) { - rwlock_init(&hs->hs_lock.rw); - - if (cfs_hash_with_rw_bktlock(hs)) - hs->hs_lops = &cfs_hash_bkt_rw_lops; - else if (cfs_hash_with_spin_bktlock(hs)) - hs->hs_lops = &cfs_hash_bkt_spin_lops; - else - LBUG(); - } else { - if (cfs_hash_with_rw_bktlock(hs)) - hs->hs_lops = &cfs_hash_nr_bkt_rw_lops; - else if (cfs_hash_with_spin_bktlock(hs)) - hs->hs_lops = &cfs_hash_nr_bkt_spin_lops; - else - LBUG(); - } -} - -/** - * Simple hash head without depth tracking - * new element is always added to head of hlist - */ -struct cfs_hash_head { - struct hlist_head hh_head; /**< entries list */ -}; - -static int -cfs_hash_hh_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_head); -} - -static struct hlist_head * -cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_head *head; - - head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hh_head; -} - -static int -cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd)); - return -1; /* unknown depth */ -} - -static int -cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hlist_del_init(hnode); - return -1; /* unknown depth */ -} - -/** - * Simple hash head with depth tracking - * new element is always added to head of hlist - */ -struct cfs_hash_head_dep { - struct hlist_head hd_head; /**< entries list */ - unsigned int hd_depth; /**< list length */ -}; - -static int -cfs_hash_hd_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_head_dep); -} - -static struct hlist_head * -cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_head_dep *head; - - head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hd_head; -} - -static int -cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_head_dep *hh; - - hh = container_of(cfs_hash_hd_hhead(hs, bd), - struct cfs_hash_head_dep, hd_head); - hlist_add_head(hnode, &hh->hd_head); - return ++hh->hd_depth; -} - -static int -cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_head_dep *hh; - - hh = container_of(cfs_hash_hd_hhead(hs, bd), - struct cfs_hash_head_dep, hd_head); - hlist_del_init(hnode); - return --hh->hd_depth; -} - -/** - * double links hash head without depth tracking - * new element is always added to tail of hlist - */ -struct cfs_hash_dhead { - struct hlist_head dh_head; /**< entries list */ - struct hlist_node *dh_tail; /**< the last entry */ -}; - -static int -cfs_hash_dh_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_dhead); -} - -static struct hlist_head * -cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_dhead *head; - - head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dh_head; -} - -static int -cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_dhead *dh; - - dh = container_of(cfs_hash_dh_hhead(hs, bd), - struct cfs_hash_dhead, dh_head); - if (dh->dh_tail != NULL) /* not empty */ - hlist_add_behind(hnode, dh->dh_tail); - else /* empty list */ - hlist_add_head(hnode, &dh->dh_head); - dh->dh_tail = hnode; - return -1; /* unknown depth */ -} - -static int -cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnd) -{ - struct cfs_hash_dhead *dh; - - dh = container_of(cfs_hash_dh_hhead(hs, bd), - struct cfs_hash_dhead, dh_head); - if (hnd->next == NULL) { /* it's the tail */ - dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL : - container_of(hnd->pprev, struct hlist_node, next); - } - hlist_del_init(hnd); - return -1; /* unknown depth */ -} - -/** - * double links hash head with depth tracking - * new element is always added to tail of hlist - */ -struct cfs_hash_dhead_dep { - struct hlist_head dd_head; /**< entries list */ - struct hlist_node *dd_tail; /**< the last entry */ - unsigned int dd_depth; /**< list length */ -}; - -static int -cfs_hash_dd_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_dhead_dep); -} - -static struct hlist_head * -cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_dhead_dep *head; - - head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dd_head; -} - -static int -cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_dhead_dep *dh; - - dh = container_of(cfs_hash_dd_hhead(hs, bd), - struct cfs_hash_dhead_dep, dd_head); - if (dh->dd_tail != NULL) /* not empty */ - hlist_add_behind(hnode, dh->dd_tail); - else /* empty list */ - hlist_add_head(hnode, &dh->dd_head); - dh->dd_tail = hnode; - return ++dh->dd_depth; -} - -static int -cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnd) -{ - struct cfs_hash_dhead_dep *dh; - - dh = container_of(cfs_hash_dd_hhead(hs, bd), - struct cfs_hash_dhead_dep, dd_head); - if (hnd->next == NULL) { /* it's the tail */ - dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL : - container_of(hnd->pprev, struct hlist_node, next); - } - hlist_del_init(hnd); - return --dh->dd_depth; -} - -static struct cfs_hash_hlist_ops cfs_hash_hh_hops = { - .hop_hhead = cfs_hash_hh_hhead, - .hop_hhead_size = cfs_hash_hh_hhead_size, - .hop_hnode_add = cfs_hash_hh_hnode_add, - .hop_hnode_del = cfs_hash_hh_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_hd_hops = { - .hop_hhead = cfs_hash_hd_hhead, - .hop_hhead_size = cfs_hash_hd_hhead_size, - .hop_hnode_add = cfs_hash_hd_hnode_add, - .hop_hnode_del = cfs_hash_hd_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_dh_hops = { - .hop_hhead = cfs_hash_dh_hhead, - .hop_hhead_size = cfs_hash_dh_hhead_size, - .hop_hnode_add = cfs_hash_dh_hnode_add, - .hop_hnode_del = cfs_hash_dh_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_dd_hops = { - .hop_hhead = cfs_hash_dd_hhead, - .hop_hhead_size = cfs_hash_dd_hhead_size, - .hop_hnode_add = cfs_hash_dd_hnode_add, - .hop_hnode_del = cfs_hash_dd_hnode_del, -}; - -static void -cfs_hash_hlist_setup(struct cfs_hash *hs) -{ - if (cfs_hash_with_add_tail(hs)) { - hs->hs_hops = cfs_hash_with_depth(hs) ? - &cfs_hash_dd_hops : &cfs_hash_dh_hops; - } else { - hs->hs_hops = cfs_hash_with_depth(hs) ? - &cfs_hash_hd_hops : &cfs_hash_hh_hops; - } -} - -static void -cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts, - unsigned int bits, const void *key, struct cfs_hash_bd *bd) -{ - unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1); - - LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits); - - bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)]; - bd->bd_offset = index >> (bits - hs->hs_bkt_bits); -} - -void -cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (likely(hs->hs_rehash_buckets == NULL)) { - cfs_hash_bd_from_key(hs, hs->hs_buckets, - hs->hs_cur_bits, key, bd); - } else { - LASSERT(hs->hs_rehash_bits != 0); - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, bd); - } -} -EXPORT_SYMBOL(cfs_hash_bd_get); - -static inline void -cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) -{ - if (likely(dep_cur <= bd->bd_bucket->hsb_depmax)) - return; - - bd->bd_bucket->hsb_depmax = dep_cur; -# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 - if (likely(warn_on_depth == 0 || - max(warn_on_depth, hs->hs_dep_max) >= dep_cur)) - return; - - spin_lock(&hs->hs_dep_lock); - hs->hs_dep_max = dep_cur; - hs->hs_dep_bkt = bd->bd_bucket->hsb_index; - hs->hs_dep_off = bd->bd_offset; - hs->hs_dep_bits = hs->hs_cur_bits; - spin_unlock(&hs->hs_dep_lock); - - cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi); -# endif -} - -void -cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - int rc; - - rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode); - cfs_hash_bd_dep_record(hs, bd, rc); - bd->bd_bucket->hsb_version++; - if (unlikely(bd->bd_bucket->hsb_version == 0)) - bd->bd_bucket->hsb_version++; - bd->bd_bucket->hsb_count++; - - if (cfs_hash_with_counter(hs)) - atomic_inc(&hs->hs_count); - if (!cfs_hash_with_no_itemref(hs)) - cfs_hash_get(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_bd_add_locked); - -void -cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hs->hs_hops->hop_hnode_del(hs, bd, hnode); - - LASSERT(bd->bd_bucket->hsb_count > 0); - bd->bd_bucket->hsb_count--; - bd->bd_bucket->hsb_version++; - if (unlikely(bd->bd_bucket->hsb_version == 0)) - bd->bd_bucket->hsb_version++; - - if (cfs_hash_with_counter(hs)) { - LASSERT(atomic_read(&hs->hs_count) > 0); - atomic_dec(&hs->hs_count); - } - if (!cfs_hash_with_no_itemref(hs)) - cfs_hash_put_locked(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_bd_del_locked); - -void -cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, - struct cfs_hash_bd *bd_new, struct hlist_node *hnode) -{ - struct cfs_hash_bucket *obkt = bd_old->bd_bucket; - struct cfs_hash_bucket *nbkt = bd_new->bd_bucket; - int rc; - - if (cfs_hash_bd_compare(bd_old, bd_new) == 0) - return; - - /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops - * in cfs_hash_bd_del/add_locked */ - hs->hs_hops->hop_hnode_del(hs, bd_old, hnode); - rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode); - cfs_hash_bd_dep_record(hs, bd_new, rc); - - LASSERT(obkt->hsb_count > 0); - obkt->hsb_count--; - obkt->hsb_version++; - if (unlikely(obkt->hsb_version == 0)) - obkt->hsb_version++; - nbkt->hsb_count++; - nbkt->hsb_version++; - if (unlikely(nbkt->hsb_version == 0)) - nbkt->hsb_version++; -} - -enum { - /** always set, for sanity (avoid ZERO intent) */ - CFS_HS_LOOKUP_MASK_FIND = BIT(0), - /** return entry with a ref */ - CFS_HS_LOOKUP_MASK_REF = BIT(1), - /** add entry if not existing */ - CFS_HS_LOOKUP_MASK_ADD = BIT(2), - /** delete entry, ignore other masks */ - CFS_HS_LOOKUP_MASK_DEL = BIT(3), -}; - -enum cfs_hash_lookup_intent { - /** return item w/o refcount */ - CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND, - /** return item with refcount */ - CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_REF), - /** return item w/o refcount if existed, otherwise add */ - CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_ADD), - /** return item with refcount if existed, otherwise add */ - CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND | - CFS_HS_LOOKUP_MASK_ADD), - /** delete if existed */ - CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_DEL) -}; - -static struct hlist_node * -cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key, struct hlist_node *hnode, - enum cfs_hash_lookup_intent intent) - -{ - struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd); - struct hlist_node *ehnode; - struct hlist_node *match; - int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; - - /* with this function, we can avoid a lot of useless refcount ops, - * which are expensive atomic operations most time. */ - match = intent_add ? NULL : hnode; - hlist_for_each(ehnode, hhead) { - if (!cfs_hash_keycmp(hs, key, ehnode)) - continue; - - if (match != NULL && match != ehnode) /* can't match */ - continue; - - /* match and ... */ - if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) { - cfs_hash_bd_del_locked(hs, bd, ehnode); - return ehnode; - } - - /* caller wants refcount? */ - if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0) - cfs_hash_get(hs, ehnode); - return ehnode; - } - /* no match item */ - if (!intent_add) - return NULL; - - LASSERT(hnode != NULL); - cfs_hash_bd_add_locked(hs, bd, hnode); - return hnode; -} - -struct hlist_node * -cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key) -{ - return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_FIND); -} -EXPORT_SYMBOL(cfs_hash_bd_lookup_locked); - -struct hlist_node * -cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key) -{ - return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_PEEK); -} -EXPORT_SYMBOL(cfs_hash_bd_peek_locked); - -static void -cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, int excl) -{ - struct cfs_hash_bucket *prev = NULL; - int i; - - /** - * bds must be ascendantly ordered by bd->bd_bucket->hsb_index. - * NB: it's possible that several bds point to the same bucket but - * have different bd::bd_offset, so need take care of deadlock. - */ - cfs_hash_for_each_bd(bds, n, i) { - if (prev == bds[i].bd_bucket) - continue; - - LASSERT(prev == NULL || - prev->hsb_index < bds[i].bd_bucket->hsb_index); - cfs_hash_bd_lock(hs, &bds[i], excl); - prev = bds[i].bd_bucket; - } -} - -static void -cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, int excl) -{ - struct cfs_hash_bucket *prev = NULL; - int i; - - cfs_hash_for_each_bd(bds, n, i) { - if (prev != bds[i].bd_bucket) { - cfs_hash_bd_unlock(hs, &bds[i], excl); - prev = bds[i].bd_bucket; - } - } -} - -static struct hlist_node * -cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key) -{ - struct hlist_node *ehnode; - unsigned i; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, - CFS_HS_LOOKUP_IT_FIND); - if (ehnode != NULL) - return ehnode; - } - return NULL; -} - -static struct hlist_node * -cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key, - struct hlist_node *hnode, int noref) -{ - struct hlist_node *ehnode; - int intent; - unsigned i; - - LASSERT(hnode != NULL); - intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, - NULL, intent); - if (ehnode != NULL) - return ehnode; - } - - if (i == 1) { /* only one bucket */ - cfs_hash_bd_add_locked(hs, &bds[0], hnode); - } else { - struct cfs_hash_bd mybd; - - cfs_hash_bd_get(hs, key, &mybd); - cfs_hash_bd_add_locked(hs, &mybd, hnode); - } - - return hnode; -} - -static struct hlist_node * -cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key, - struct hlist_node *hnode) -{ - struct hlist_node *ehnode; - unsigned int i; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode, - CFS_HS_LOOKUP_IT_FINDDEL); - if (ehnode != NULL) - return ehnode; - } - return NULL; -} - -static void -cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) -{ - int rc; - - if (bd2->bd_bucket == NULL) - return; - - if (bd1->bd_bucket == NULL) { - *bd1 = *bd2; - bd2->bd_bucket = NULL; - return; - } - - rc = cfs_hash_bd_compare(bd1, bd2); - if (rc == 0) { - bd2->bd_bucket = NULL; - - } else if (rc > 0) { /* swab bd1 and bd2 */ - struct cfs_hash_bd tmp; - - tmp = *bd2; - *bd2 = *bd1; - *bd1 = tmp; - } -} - -void -cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, - struct cfs_hash_bd *bds) -{ - /* NB: caller should hold hs_lock.rw if REHASH is set */ - cfs_hash_bd_from_key(hs, hs->hs_buckets, - hs->hs_cur_bits, key, &bds[0]); - if (likely(hs->hs_rehash_buckets == NULL)) { - /* no rehash or not rehashing */ - bds[1].bd_bucket = NULL; - return; - } - - LASSERT(hs->hs_rehash_bits != 0); - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, &bds[1]); - - cfs_hash_bd_order(&bds[0], &bds[1]); -} - -void -cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) -{ - cfs_hash_multi_bd_lock(hs, bds, 2, excl); -} - -void -cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) -{ - cfs_hash_multi_bd_unlock(hs, bds, 2, excl); -} - -struct hlist_node * -cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key) -{ - return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key); -} - -struct hlist_node * -cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode, - int noref) -{ - return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key, - hnode, noref); -} - -struct hlist_node * -cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode) -{ - return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode); -} - -static void -cfs_hash_buckets_free(struct cfs_hash_bucket **buckets, - int bkt_size, int prev_size, int size) -{ - int i; - - for (i = prev_size; i < size; i++) { - if (buckets[i] != NULL) - LIBCFS_FREE(buckets[i], bkt_size); - } - - LIBCFS_FREE(buckets, sizeof(buckets[0]) * size); -} - -/* - * Create or grow bucket memory. Return old_buckets if no allocation was - * needed, the newly allocated buckets if allocation was needed and - * successful, and NULL on error. - */ -static struct cfs_hash_bucket ** -cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, - unsigned int old_size, unsigned int new_size) -{ - struct cfs_hash_bucket **new_bkts; - int i; - - LASSERT(old_size == 0 || old_bkts != NULL); - - if (old_bkts != NULL && old_size == new_size) - return old_bkts; - - LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size); - if (new_bkts == NULL) - return NULL; - - if (old_bkts != NULL) { - memcpy(new_bkts, old_bkts, - min(old_size, new_size) * sizeof(*old_bkts)); - } - - for (i = old_size; i < new_size; i++) { - struct hlist_head *hhead; - struct cfs_hash_bd bd; - - LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs)); - if (new_bkts[i] == NULL) { - cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs), - old_size, new_size); - return NULL; - } - - new_bkts[i]->hsb_index = i; - new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ - new_bkts[i]->hsb_depmax = -1; /* unknown */ - bd.bd_bucket = new_bkts[i]; - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) - INIT_HLIST_HEAD(hhead); - - if (cfs_hash_with_no_lock(hs) || - cfs_hash_with_no_bktlock(hs)) - continue; - - if (cfs_hash_with_rw_bktlock(hs)) - rwlock_init(&new_bkts[i]->hsb_lock.rw); - else if (cfs_hash_with_spin_bktlock(hs)) - spin_lock_init(&new_bkts[i]->hsb_lock.spin); - else - LBUG(); /* invalid use-case */ - } - return new_bkts; -} - -/** - * Initialize new libcfs hash, where: - * @name - Descriptive hash name - * @cur_bits - Initial hash table size, in bits - * @max_bits - Maximum allowed hash table resize, in bits - * @ops - Registered hash table operations - * @flags - CFS_HASH_REHASH enable synamic hash resizing - * - CFS_HASH_SORT enable chained hash sort - */ -static int cfs_hash_rehash_worker(cfs_workitem_t *wi); - -#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static int cfs_hash_dep_print(cfs_workitem_t *wi) -{ - struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); - int dep; - int bkt; - int off; - int bits; - - spin_lock(&hs->hs_dep_lock); - dep = hs->hs_dep_max; - bkt = hs->hs_dep_bkt; - off = hs->hs_dep_off; - bits = hs->hs_dep_bits; - spin_unlock(&hs->hs_dep_lock); - - LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n", - hs->hs_name, bits, dep, bkt, off); - spin_lock(&hs->hs_dep_lock); - hs->hs_dep_bits = 0; /* mark as workitem done */ - spin_unlock(&hs->hs_dep_lock); - return 0; -} - -static void cfs_hash_depth_wi_init(struct cfs_hash *hs) -{ - spin_lock_init(&hs->hs_dep_lock); - cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print); -} - -static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) -{ - if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi)) - return; - - spin_lock(&hs->hs_dep_lock); - while (hs->hs_dep_bits != 0) { - spin_unlock(&hs->hs_dep_lock); - cond_resched(); - spin_lock(&hs->hs_dep_lock); - } - spin_unlock(&hs->hs_dep_lock); -} - -#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */ - -static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {} -static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {} - -#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */ - -struct cfs_hash * -cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, - unsigned bkt_bits, unsigned extra_bytes, - unsigned min_theta, unsigned max_theta, - struct cfs_hash_ops *ops, unsigned flags) -{ - struct cfs_hash *hs; - int len; - - CLASSERT(CFS_HASH_THETA_BITS < 15); - - LASSERT(name != NULL); - LASSERT(ops != NULL); - LASSERT(ops->hs_key); - LASSERT(ops->hs_hash); - LASSERT(ops->hs_object); - LASSERT(ops->hs_keycmp); - LASSERT(ops->hs_get != NULL); - LASSERT(ops->hs_put_locked != NULL); - - if ((flags & CFS_HASH_REHASH) != 0) - flags |= CFS_HASH_COUNTER; /* must have counter */ - - LASSERT(cur_bits > 0); - LASSERT(cur_bits >= bkt_bits); - LASSERT(max_bits >= cur_bits && max_bits < 31); - LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits)); - LASSERT(ergo((flags & CFS_HASH_REHASH) != 0, - (flags & CFS_HASH_NO_LOCK) == 0)); - LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, - ops->hs_keycpy != NULL)); - - len = (flags & CFS_HASH_BIGNAME) == 0 ? - CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; - LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len])); - if (hs == NULL) - return NULL; - - strlcpy(hs->hs_name, name, len); - hs->hs_flags = flags; - - atomic_set(&hs->hs_refcount, 1); - atomic_set(&hs->hs_count, 0); - - cfs_hash_lock_setup(hs); - cfs_hash_hlist_setup(hs); - - hs->hs_cur_bits = (__u8)cur_bits; - hs->hs_min_bits = (__u8)cur_bits; - hs->hs_max_bits = (__u8)max_bits; - hs->hs_bkt_bits = (__u8)bkt_bits; - - hs->hs_ops = ops; - hs->hs_extra_bytes = extra_bytes; - hs->hs_rehash_bits = 0; - cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker); - cfs_hash_depth_wi_init(hs); - - if (cfs_hash_with_rehash(hs)) - __cfs_hash_set_theta(hs, min_theta, max_theta); - - hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0, - CFS_HASH_NBKT(hs)); - if (hs->hs_buckets != NULL) - return hs; - - LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len])); - return NULL; -} -EXPORT_SYMBOL(cfs_hash_create); - -/** - * Cleanup libcfs hash @hs. - */ -static void -cfs_hash_destroy(struct cfs_hash *hs) -{ - struct hlist_node *hnode; - struct hlist_node *pos; - struct cfs_hash_bd bd; - int i; - - LASSERT(hs != NULL); - LASSERT(!cfs_hash_is_exiting(hs) && - !cfs_hash_is_iterating(hs)); - - /** - * prohibit further rehashes, don't need any lock because - * I'm the only (last) one can change it. - */ - hs->hs_exiting = 1; - if (cfs_hash_with_rehash(hs)) - cfs_hash_rehash_cancel(hs); - - cfs_hash_depth_wi_cancel(hs); - /* rehash should be done/canceled */ - LASSERT(hs->hs_buckets != NULL && - hs->hs_rehash_buckets == NULL); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - LASSERT(bd.bd_bucket != NULL); - /* no need to take this lock, just for consistent code */ - cfs_hash_bd_lock(hs, &bd, 1); - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - LASSERTF(!cfs_hash_with_assert_empty(hs), - "hash %s bucket %u(%u) is not empty: %u items left\n", - hs->hs_name, bd.bd_bucket->hsb_index, - bd.bd_offset, bd.bd_bucket->hsb_count); - /* can't assert key valicate, because we - * can interrupt rehash */ - cfs_hash_bd_del_locked(hs, &bd, hnode); - cfs_hash_exit(hs, hnode); - } - } - LASSERT(bd.bd_bucket->hsb_count == 0); - cfs_hash_bd_unlock(hs, &bd, 1); - cond_resched(); - } - - LASSERT(atomic_read(&hs->hs_count) == 0); - - cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs), - 0, CFS_HASH_NBKT(hs)); - i = cfs_hash_with_bigname(hs) ? - CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN; - LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i])); -} - -struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs) -{ - if (atomic_inc_not_zero(&hs->hs_refcount)) - return hs; - return NULL; -} -EXPORT_SYMBOL(cfs_hash_getref); - -void cfs_hash_putref(struct cfs_hash *hs) -{ - if (atomic_dec_and_test(&hs->hs_refcount)) - cfs_hash_destroy(hs); -} -EXPORT_SYMBOL(cfs_hash_putref); - -static inline int -cfs_hash_rehash_bits(struct cfs_hash *hs) -{ - if (cfs_hash_with_no_lock(hs) || - !cfs_hash_with_rehash(hs)) - return -EOPNOTSUPP; - - if (unlikely(cfs_hash_is_exiting(hs))) - return -ESRCH; - - if (unlikely(cfs_hash_is_rehashing(hs))) - return -EALREADY; - - if (unlikely(cfs_hash_is_iterating(hs))) - return -EAGAIN; - - /* XXX: need to handle case with max_theta != 2.0 - * and the case with min_theta != 0.5 */ - if ((hs->hs_cur_bits < hs->hs_max_bits) && - (__cfs_hash_theta(hs) > hs->hs_max_theta)) - return hs->hs_cur_bits + 1; - - if (!cfs_hash_with_shrink(hs)) - return 0; - - if ((hs->hs_cur_bits > hs->hs_min_bits) && - (__cfs_hash_theta(hs) < hs->hs_min_theta)) - return hs->hs_cur_bits - 1; - - return 0; -} - -/** - * don't allow inline rehash if: - * - user wants non-blocking change (add/del) on hash table - * - too many elements - */ -static inline int -cfs_hash_rehash_inline(struct cfs_hash *hs) -{ - return !cfs_hash_with_nblk_change(hs) && - atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG; -} - -/** - * Add item @hnode to libcfs hash @hs using @key. The registered - * ops->hs_get function will be called when the item is added. - */ -void -cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) -{ - struct cfs_hash_bd bd; - int bits; - - LASSERT(hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - cfs_hash_bd_get_and_lock(hs, key, &bd, 1); - - cfs_hash_key_validate(hs, key, hnode); - cfs_hash_bd_add_locked(hs, &bd, hnode); - - cfs_hash_bd_unlock(hs, &bd, 1); - - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); -} -EXPORT_SYMBOL(cfs_hash_add); - -static struct hlist_node * -cfs_hash_find_or_add(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode, int noref) -{ - struct hlist_node *ehnode; - struct cfs_hash_bd bds[2]; - int bits = 0; - - LASSERT(hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); - - cfs_hash_key_validate(hs, key, hnode); - ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key, - hnode, noref); - cfs_hash_dual_bd_unlock(hs, bds, 1); - - if (ehnode == hnode) /* new item added */ - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); - - return ehnode; -} - -/** - * Add item @hnode to libcfs hash @hs using @key. The registered - * ops->hs_get function will be called if the item was added. - * Returns 0 on success or -EALREADY on key collisions. - */ -int -cfs_hash_add_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) -{ - return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ? - -EALREADY : 0; -} -EXPORT_SYMBOL(cfs_hash_add_unique); - -/** - * Add item @hnode to libcfs hash @hs using @key. If this @key - * already exists in the hash then ops->hs_get will be called on the - * conflicting entry and that entry will be returned to the caller. - * Otherwise ops->hs_get is called on the item which was added. - */ -void * -cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) -{ - hnode = cfs_hash_find_or_add(hs, key, hnode, 0); - - return cfs_hash_object(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_findadd_unique); - -/** - * Delete item @hnode from the libcfs hash @hs using @key. The @key - * is required to ensure the correct hash bucket is locked since there - * is no direct linkage from the item to the bucket. The object - * removed from the hash will be returned and obs->hs_put is called - * on the removed object. - */ -void * -cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) -{ - void *obj = NULL; - int bits = 0; - struct cfs_hash_bd bds[2]; - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); - - /* NB: do nothing if @hnode is not in hash table */ - if (hnode == NULL || !hlist_unhashed(hnode)) { - if (bds[1].bd_bucket == NULL && hnode != NULL) { - cfs_hash_bd_del_locked(hs, &bds[0], hnode); - } else { - hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, - key, hnode); - } - } - - if (hnode != NULL) { - obj = cfs_hash_object(hs, hnode); - bits = cfs_hash_rehash_bits(hs); - } - - cfs_hash_dual_bd_unlock(hs, bds, 1); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); - - return obj; -} -EXPORT_SYMBOL(cfs_hash_del); - -/** - * Delete item given @key in libcfs hash @hs. The first @key found in - * the hash will be removed, if the key exists multiple times in the hash - * @hs this function must be called once per key. The removed object - * will be returned and ops->hs_put is called on the removed object. - */ -void * -cfs_hash_del_key(struct cfs_hash *hs, const void *key) -{ - return cfs_hash_del(hs, key, NULL); -} -EXPORT_SYMBOL(cfs_hash_del_key); - -/** - * Lookup an item using @key in the libcfs hash @hs and return it. - * If the @key is found in the hash hs->hs_get() is called and the - * matching objects is returned. It is the callers responsibility - * to call the counterpart ops->hs_put using the cfs_hash_put() macro - * when when finished with the object. If the @key was not found - * in the hash @hs NULL is returned. - */ -void * -cfs_hash_lookup(struct cfs_hash *hs, const void *key) -{ - void *obj = NULL; - struct hlist_node *hnode; - struct cfs_hash_bd bds[2]; - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); - - hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key); - if (hnode != NULL) - obj = cfs_hash_object(hs, hnode); - - cfs_hash_dual_bd_unlock(hs, bds, 0); - cfs_hash_unlock(hs, 0); - - return obj; -} -EXPORT_SYMBOL(cfs_hash_lookup); - -static void -cfs_hash_for_each_enter(struct cfs_hash *hs) -{ - LASSERT(!cfs_hash_is_exiting(hs)); - - if (!cfs_hash_with_rehash(hs)) - return; - /* - * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter - * because it's just an unreliable signal to rehash-thread, - * rehash-thread will try to finish rehash ASAP when seeing this. - */ - hs->hs_iterating = 1; - - cfs_hash_lock(hs, 1); - hs->hs_iterators++; - - /* NB: iteration is mostly called by service thread, - * we tend to cancel pending rehash-request, instead of - * blocking service thread, we will relaunch rehash request - * after iteration */ - if (cfs_hash_is_rehashing(hs)) - cfs_hash_rehash_cancel_locked(hs); - cfs_hash_unlock(hs, 1); -} - -static void -cfs_hash_for_each_exit(struct cfs_hash *hs) -{ - int remained; - int bits; - - if (!cfs_hash_with_rehash(hs)) - return; - cfs_hash_lock(hs, 1); - remained = --hs->hs_iterators; - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 1); - /* NB: it's race on cfs_has_t::hs_iterating, see above */ - if (remained == 0) - hs->hs_iterating = 0; - if (bits > 0) { - cfs_hash_rehash(hs, atomic_read(&hs->hs_count) < - CFS_HASH_LOOP_HOG); - } -} - -/** - * For each item in the libcfs hash @hs call the passed callback @func - * and pass to it as an argument each hash item and the private @data. - * - * a) the function may sleep! - * b) during the callback: - * . the bucket lock is held so the callback must never sleep. - * . if @removal_safe is true, use can remove current item by - * cfs_hash_bd_del_locked - */ -static __u64 -cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data, int remove_safe) -{ - struct hlist_node *hnode; - struct hlist_node *pos; - struct cfs_hash_bd bd; - __u64 count = 0; - int excl = !!remove_safe; - int loop = 0; - int i; - - cfs_hash_for_each_enter(hs); - - cfs_hash_lock(hs, 0); - LASSERT(!cfs_hash_is_rehashing(hs)); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - cfs_hash_bd_lock(hs, &bd, excl); - if (func == NULL) { /* only glimpse size */ - count += bd.bd_bucket->hsb_count; - cfs_hash_bd_unlock(hs, &bd, excl); - continue; - } - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - cfs_hash_bucket_validate(hs, &bd, hnode); - count++; - loop++; - if (func(hs, &bd, hnode, data)) { - cfs_hash_bd_unlock(hs, &bd, excl); - goto out; - } - } - } - cfs_hash_bd_unlock(hs, &bd, excl); - if (loop < CFS_HASH_LOOP_HOG) - continue; - loop = 0; - cfs_hash_unlock(hs, 0); - cond_resched(); - cfs_hash_lock(hs, 0); - } - out: - cfs_hash_unlock(hs, 0); - - cfs_hash_for_each_exit(hs); - return count; -} - -struct cfs_hash_cond_arg { - cfs_hash_cond_opt_cb_t func; - void *arg; -}; - -static int -cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - struct cfs_hash_cond_arg *cond = data; - - if (cond->func(cfs_hash_object(hs, hnode), cond->arg)) - cfs_hash_bd_del_locked(hs, bd, hnode); - return 0; -} - -/** - * Delete item from the libcfs hash @hs when @func return true. - * The write lock being hold during loop for each bucket to avoid - * any object be reference. - */ -void -cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data) -{ - struct cfs_hash_cond_arg arg = { - .func = func, - .arg = data, - }; - - cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1); -} -EXPORT_SYMBOL(cfs_hash_cond_del); - -void -cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - cfs_hash_for_each_tight(hs, func, data, 0); -} -EXPORT_SYMBOL(cfs_hash_for_each); - -void -cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - cfs_hash_for_each_tight(hs, func, data, 1); -} -EXPORT_SYMBOL(cfs_hash_for_each_safe); - -static int -cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - *(int *)data = 0; - return 1; /* return 1 to break the loop */ -} - -int -cfs_hash_is_empty(struct cfs_hash *hs) -{ - int empty = 1; - - cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0); - return empty; -} -EXPORT_SYMBOL(cfs_hash_is_empty); - -__u64 -cfs_hash_size_get(struct cfs_hash *hs) -{ - return cfs_hash_with_counter(hs) ? - atomic_read(&hs->hs_count) : - cfs_hash_for_each_tight(hs, NULL, NULL, 0); -} -EXPORT_SYMBOL(cfs_hash_size_get); - -/* - * cfs_hash_for_each_relax: - * Iterate the hash table and call @func on each item without - * any lock. This function can't guarantee to finish iteration - * if these features are enabled: - * - * a. if rehash_key is enabled, an item can be moved from - * one bucket to another bucket - * b. user can remove non-zero-ref item from hash-table, - * so the item can be removed from hash-table, even worse, - * it's possible that user changed key and insert to another - * hash bucket. - * there's no way for us to finish iteration correctly on previous - * two cases, so iteration has to be stopped on change. - */ -static int -cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - struct hlist_node *hnode; - struct hlist_node *tmp; - struct cfs_hash_bd bd; - __u32 version; - int count = 0; - int stop_on_change; - int rc; - int i; - - stop_on_change = cfs_hash_with_rehash_key(hs) || - !cfs_hash_with_no_itemref(hs) || - hs->hs_ops->hs_put_locked == NULL; - cfs_hash_lock(hs, 0); - LASSERT(!cfs_hash_is_rehashing(hs)); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - cfs_hash_bd_lock(hs, &bd, 0); - version = cfs_hash_bd_version_get(&bd); - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - for (hnode = hhead->first; hnode != NULL;) { - cfs_hash_bucket_validate(hs, &bd, hnode); - cfs_hash_get(hs, hnode); - cfs_hash_bd_unlock(hs, &bd, 0); - cfs_hash_unlock(hs, 0); - - rc = func(hs, &bd, hnode, data); - if (stop_on_change) - cfs_hash_put(hs, hnode); - cond_resched(); - count++; - - cfs_hash_lock(hs, 0); - cfs_hash_bd_lock(hs, &bd, 0); - if (!stop_on_change) { - tmp = hnode->next; - cfs_hash_put_locked(hs, hnode); - hnode = tmp; - } else { /* bucket changed? */ - if (version != - cfs_hash_bd_version_get(&bd)) - break; - /* safe to continue because no change */ - hnode = hnode->next; - } - if (rc) /* callback wants to break iteration */ - break; - } - if (rc) /* callback wants to break iteration */ - break; - } - cfs_hash_bd_unlock(hs, &bd, 0); - if (rc) /* callback wants to break iteration */ - break; - } - cfs_hash_unlock(hs, 0); - - return count; -} - -int -cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - if (cfs_hash_with_no_lock(hs) || - cfs_hash_with_rehash_key(hs) || - !cfs_hash_with_no_itemref(hs)) - return -EOPNOTSUPP; - - if (hs->hs_ops->hs_get == NULL || - (hs->hs_ops->hs_put == NULL && - hs->hs_ops->hs_put_locked == NULL)) - return -EOPNOTSUPP; - - cfs_hash_for_each_enter(hs); - cfs_hash_for_each_relax(hs, func, data); - cfs_hash_for_each_exit(hs); - - return 0; -} -EXPORT_SYMBOL(cfs_hash_for_each_nolock); - -/** - * For each hash bucket in the libcfs hash @hs call the passed callback - * @func until all the hash buckets are empty. The passed callback @func - * or the previously registered callback hs->hs_put must remove the item - * from the hash. You may either use the cfs_hash_del() or hlist_del() - * functions. No rwlocks will be held during the callback @func it is - * safe to sleep if needed. This function will not terminate until the - * hash is empty. Note it is still possible to concurrently add new - * items in to the hash. It is the callers responsibility to ensure - * the required locking is in place to prevent concurrent insertions. - */ -int -cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - unsigned i = 0; - - if (cfs_hash_with_no_lock(hs)) - return -EOPNOTSUPP; - - if (hs->hs_ops->hs_get == NULL || - (hs->hs_ops->hs_put == NULL && - hs->hs_ops->hs_put_locked == NULL)) - return -EOPNOTSUPP; - - cfs_hash_for_each_enter(hs); - while (cfs_hash_for_each_relax(hs, func, data)) { - CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", - hs->hs_name, i++); - } - cfs_hash_for_each_exit(hs); - return 0; -} -EXPORT_SYMBOL(cfs_hash_for_each_empty); - -void -cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, - cfs_hash_for_each_cb_t func, void *data) -{ - struct hlist_head *hhead; - struct hlist_node *hnode; - struct cfs_hash_bd bd; - - cfs_hash_for_each_enter(hs); - cfs_hash_lock(hs, 0); - if (hindex >= CFS_HASH_NHLIST(hs)) - goto out; - - cfs_hash_bd_index_set(hs, hindex, &bd); - - cfs_hash_bd_lock(hs, &bd, 0); - hhead = cfs_hash_bd_hhead(hs, &bd); - hlist_for_each(hnode, hhead) { - if (func(hs, &bd, hnode, data)) - break; - } - cfs_hash_bd_unlock(hs, &bd, 0); -out: - cfs_hash_unlock(hs, 0); - cfs_hash_for_each_exit(hs); -} - -EXPORT_SYMBOL(cfs_hash_hlist_for_each); - -/* - * For each item in the libcfs hash @hs which matches the @key call - * the passed callback @func and pass to it as an argument each hash - * item and the private @data. During the callback the bucket lock - * is held so the callback must never sleep. - */ -void -cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, - cfs_hash_for_each_cb_t func, void *data) -{ - struct hlist_node *hnode; - struct cfs_hash_bd bds[2]; - unsigned int i; - - cfs_hash_lock(hs, 0); - - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); - - cfs_hash_for_each_bd(bds, 2, i) { - struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]); - - hlist_for_each(hnode, hlist) { - cfs_hash_bucket_validate(hs, &bds[i], hnode); - - if (cfs_hash_keycmp(hs, key, hnode)) { - if (func(hs, &bds[i], hnode, data)) - break; - } - } - } - - cfs_hash_dual_bd_unlock(hs, bds, 0); - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_for_each_key); - -/** - * Rehash the libcfs hash @hs to the given @bits. This can be used - * to grow the hash size when excessive chaining is detected, or to - * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH - * flag is set in @hs the libcfs hash may be dynamically rehashed - * during addition or removal if the hash's theta value exceeds - * either the hs->hs_min_theta or hs->max_theta values. By default - * these values are tuned to keep the chained hash depth small, and - * this approach assumes a reasonably uniform hashing function. The - * theta thresholds for @hs are tunable via cfs_hash_set_theta(). - */ -void -cfs_hash_rehash_cancel_locked(struct cfs_hash *hs) -{ - int i; - - /* need hold cfs_hash_lock(hs, 1) */ - LASSERT(cfs_hash_with_rehash(hs) && - !cfs_hash_with_no_lock(hs)); - - if (!cfs_hash_is_rehashing(hs)) - return; - - if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) { - hs->hs_rehash_bits = 0; - return; - } - - for (i = 2; cfs_hash_is_rehashing(hs); i++) { - cfs_hash_unlock(hs, 1); - /* raise console warning while waiting too long */ - CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO, - "hash %s is still rehashing, rescheded %d\n", - hs->hs_name, i - 1); - cond_resched(); - cfs_hash_lock(hs, 1); - } -} - -void -cfs_hash_rehash_cancel(struct cfs_hash *hs) -{ - cfs_hash_lock(hs, 1); - cfs_hash_rehash_cancel_locked(hs); - cfs_hash_unlock(hs, 1); -} - -int -cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) -{ - int rc; - - LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs)); - - cfs_hash_lock(hs, 1); - - rc = cfs_hash_rehash_bits(hs); - if (rc <= 0) { - cfs_hash_unlock(hs, 1); - return rc; - } - - hs->hs_rehash_bits = rc; - if (!do_rehash) { - /* launch and return */ - cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi); - cfs_hash_unlock(hs, 1); - return 0; - } - - /* rehash right now */ - cfs_hash_unlock(hs, 1); - - return cfs_hash_rehash_worker(&hs->hs_rehash_wi); -} - -static int -cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) -{ - struct cfs_hash_bd new; - struct hlist_head *hhead; - struct hlist_node *hnode; - struct hlist_node *pos; - void *key; - int c = 0; - - /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */ - cfs_hash_bd_for_each_hlist(hs, old, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - key = cfs_hash_key(hs, hnode); - LASSERT(key != NULL); - /* Validate hnode is in the correct bucket. */ - cfs_hash_bucket_validate(hs, old, hnode); - /* - * Delete from old hash bucket; move to new bucket. - * ops->hs_key must be defined. - */ - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, &new); - cfs_hash_bd_move_locked(hs, old, &new, hnode); - c++; - } - } - - return c; -} - -static int -cfs_hash_rehash_worker(cfs_workitem_t *wi) -{ - struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); - struct cfs_hash_bucket **bkts; - struct cfs_hash_bd bd; - unsigned int old_size; - unsigned int new_size; - int bsize; - int count = 0; - int rc = 0; - int i; - - LASSERT(hs != NULL && cfs_hash_with_rehash(hs)); - - cfs_hash_lock(hs, 0); - LASSERT(cfs_hash_is_rehashing(hs)); - - old_size = CFS_HASH_NBKT(hs); - new_size = CFS_HASH_RH_NBKT(hs); - - cfs_hash_unlock(hs, 0); - - /* - * don't need hs::hs_rwlock for hs::hs_buckets, - * because nobody can change bkt-table except me. - */ - bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets, - old_size, new_size); - cfs_hash_lock(hs, 1); - if (bkts == NULL) { - rc = -ENOMEM; - goto out; - } - - if (bkts == hs->hs_buckets) { - bkts = NULL; /* do nothing */ - goto out; - } - - rc = __cfs_hash_theta(hs); - if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) { - /* free the new allocated bkt-table */ - old_size = new_size; - new_size = CFS_HASH_NBKT(hs); - rc = -EALREADY; - goto out; - } - - LASSERT(hs->hs_rehash_buckets == NULL); - hs->hs_rehash_buckets = bkts; - - rc = 0; - cfs_hash_for_each_bucket(hs, &bd, i) { - if (cfs_hash_is_exiting(hs)) { - rc = -ESRCH; - /* someone wants to destroy the hash, abort now */ - if (old_size < new_size) /* OK to free old bkt-table */ - break; - /* it's shrinking, need free new bkt-table */ - hs->hs_rehash_buckets = NULL; - old_size = new_size; - new_size = CFS_HASH_NBKT(hs); - goto out; - } - - count += cfs_hash_rehash_bd(hs, &bd); - if (count < CFS_HASH_LOOP_HOG || - cfs_hash_is_iterating(hs)) { /* need to finish ASAP */ - continue; - } - - count = 0; - cfs_hash_unlock(hs, 1); - cond_resched(); - cfs_hash_lock(hs, 1); - } - - hs->hs_rehash_count++; - - bkts = hs->hs_buckets; - hs->hs_buckets = hs->hs_rehash_buckets; - hs->hs_rehash_buckets = NULL; - - hs->hs_cur_bits = hs->hs_rehash_bits; -out: - hs->hs_rehash_bits = 0; - if (rc == -ESRCH) /* never be scheduled again */ - cfs_wi_exit(cfs_sched_rehash, wi); - bsize = cfs_hash_bkt_size(hs); - cfs_hash_unlock(hs, 1); - /* can't refer to @hs anymore because it could be destroyed */ - if (bkts != NULL) - cfs_hash_buckets_free(bkts, bsize, new_size, old_size); - if (rc != 0) - CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); - /* return 1 only if cfs_wi_exit is called */ - return rc == -ESRCH; -} - -/** - * Rehash the object referenced by @hnode in the libcfs hash @hs. The - * @old_key must be provided to locate the objects previous location - * in the hash, and the @new_key will be used to reinsert the object. - * Use this function instead of a cfs_hash_add() + cfs_hash_del() - * combo when it is critical that there is no window in time where the - * object is missing from the hash. When an object is being rehashed - * the registered cfs_hash_get() and cfs_hash_put() functions will - * not be called. - */ -void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, - void *new_key, struct hlist_node *hnode) -{ - struct cfs_hash_bd bds[3]; - struct cfs_hash_bd old_bds[2]; - struct cfs_hash_bd new_bd; - - LASSERT(!hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - - cfs_hash_dual_bd_get(hs, old_key, old_bds); - cfs_hash_bd_get(hs, new_key, &new_bd); - - bds[0] = old_bds[0]; - bds[1] = old_bds[1]; - bds[2] = new_bd; - - /* NB: bds[0] and bds[1] are ordered already */ - cfs_hash_bd_order(&bds[1], &bds[2]); - cfs_hash_bd_order(&bds[0], &bds[1]); - - cfs_hash_multi_bd_lock(hs, bds, 3, 1); - if (likely(old_bds[1].bd_bucket == NULL)) { - cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode); - } else { - cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode); - cfs_hash_bd_add_locked(hs, &new_bd, hnode); - } - /* overwrite key inside locks, otherwise may screw up with - * other operations, i.e: rehash */ - cfs_hash_keycpy(hs, hnode, new_key); - - cfs_hash_multi_bd_unlock(hs, bds, 3, 1); - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_rehash_key); - -void cfs_hash_debug_header(struct seq_file *m) -{ - seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n", - CFS_HASH_BIGNAME_LEN, "name"); -} -EXPORT_SYMBOL(cfs_hash_debug_header); - -static struct cfs_hash_bucket ** -cfs_hash_full_bkts(struct cfs_hash *hs) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (hs->hs_rehash_buckets == NULL) - return hs->hs_buckets; - - LASSERT(hs->hs_rehash_bits != 0); - return hs->hs_rehash_bits > hs->hs_cur_bits ? - hs->hs_rehash_buckets : hs->hs_buckets; -} - -static unsigned int -cfs_hash_full_nbkt(struct cfs_hash *hs) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (hs->hs_rehash_buckets == NULL) - return CFS_HASH_NBKT(hs); - - LASSERT(hs->hs_rehash_bits != 0); - return hs->hs_rehash_bits > hs->hs_cur_bits ? - CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs); -} - -void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m) -{ - int dist[8] = { 0, }; - int maxdep = -1; - int maxdepb = -1; - int total = 0; - int theta; - int i; - - cfs_hash_lock(hs, 0); - theta = __cfs_hash_theta(hs); - - seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ", - CFS_HASH_BIGNAME_LEN, hs->hs_name, - 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits, - 1 << hs->hs_max_bits, - __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta), - __cfs_hash_theta_int(hs->hs_min_theta), - __cfs_hash_theta_frac(hs->hs_min_theta), - __cfs_hash_theta_int(hs->hs_max_theta), - __cfs_hash_theta_frac(hs->hs_max_theta), - hs->hs_flags, hs->hs_rehash_count); - - /* - * The distribution is a summary of the chained hash depth in - * each of the libcfs hash buckets. Each buckets hsb_count is - * divided by the hash theta value and used to generate a - * histogram of the hash distribution. A uniform hash will - * result in all hash buckets being close to the average thus - * only the first few entries in the histogram will be non-zero. - * If you hash function results in a non-uniform hash the will - * be observable by outlier bucks in the distribution histogram. - * - * Uniform hash distribution: 128/128/0/0/0/0/0/0 - * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1 - */ - for (i = 0; i < cfs_hash_full_nbkt(hs); i++) { - struct cfs_hash_bd bd; - - bd.bd_bucket = cfs_hash_full_bkts(hs)[i]; - cfs_hash_bd_lock(hs, &bd, 0); - if (maxdep < bd.bd_bucket->hsb_depmax) { - maxdep = bd.bd_bucket->hsb_depmax; - maxdepb = ffz(~maxdep); - } - total += bd.bd_bucket->hsb_count; - dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++; - cfs_hash_bd_unlock(hs, &bd, 0); - } - - seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb); - for (i = 0; i < 8; i++) - seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/'); - - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_debug_str); diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c deleted file mode 100644 index d8230aec9..000000000 --- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c +++ /dev/null @@ -1,242 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: Nathan Rutman - * - * Kernel <-> userspace communication routines. - * Using pipes for all arches. - */ - -#define DEBUG_SUBSYSTEM S_CLASS -#define D_KUC D_OTHER - -#include "../../include/linux/libcfs/libcfs.h" - -/* This is the kernel side (liblustre as well). */ - -/** - * libcfs_kkuc_msg_put - send an message from kernel to userspace - * @param fp to send the message to - * @param payload Payload data. First field of payload is always - * struct kuc_hdr - */ -int libcfs_kkuc_msg_put(struct file *filp, void *payload) -{ - struct kuc_hdr *kuch = (struct kuc_hdr *)payload; - ssize_t count = kuch->kuc_msglen; - loff_t offset = 0; - mm_segment_t fs; - int rc = -ENOSYS; - - if (filp == NULL || IS_ERR(filp)) - return -EBADF; - - if (kuch->kuc_magic != KUC_MAGIC) { - CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic); - return -ENOSYS; - } - - fs = get_fs(); - set_fs(KERNEL_DS); - while (count > 0) { - rc = vfs_write(filp, (void __force __user *)payload, - count, &offset); - if (rc < 0) - break; - count -= rc; - payload += rc; - rc = 0; - } - set_fs(fs); - - if (rc < 0) - CWARN("message send failed (%d)\n", rc); - else - CDEBUG(D_KUC, "Sent message rc=%d, fp=%p\n", rc, filp); - - return rc; -} -EXPORT_SYMBOL(libcfs_kkuc_msg_put); - -/* Broadcast groups are global across all mounted filesystems; - * i.e. registering for a group on 1 fs will get messages for that - * group from any fs */ -/** A single group registration has a uid and a file pointer */ -struct kkuc_reg { - struct list_head kr_chain; - int kr_uid; - struct file *kr_fp; - __u32 kr_data; -}; - -static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {}; -/* Protect message sending against remove and adds */ -static DECLARE_RWSEM(kg_sem); - -/** Add a receiver to a broadcast group - * @param filp pipe to write into - * @param uid identifier for this receiver - * @param group group number - */ -int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, - __u32 data) -{ - struct kkuc_reg *reg; - - if (group > KUC_GRP_MAX) { - CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); - return -EINVAL; - } - - /* fput in group_rem */ - if (filp == NULL) - return -EBADF; - - /* freed in group_rem */ - reg = kmalloc(sizeof(*reg), 0); - if (reg == NULL) - return -ENOMEM; - - reg->kr_fp = filp; - reg->kr_uid = uid; - reg->kr_data = data; - - down_write(&kg_sem); - if (kkuc_groups[group].next == NULL) - INIT_LIST_HEAD(&kkuc_groups[group]); - list_add(®->kr_chain, &kkuc_groups[group]); - up_write(&kg_sem); - - CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group); - - return 0; -} -EXPORT_SYMBOL(libcfs_kkuc_group_add); - -int libcfs_kkuc_group_rem(int uid, int group) -{ - struct kkuc_reg *reg, *next; - - if (kkuc_groups[group].next == NULL) - return 0; - - if (uid == 0) { - /* Broadcast a shutdown message */ - struct kuc_hdr lh; - - lh.kuc_magic = KUC_MAGIC; - lh.kuc_transport = KUC_TRANSPORT_GENERIC; - lh.kuc_msgtype = KUC_MSG_SHUTDOWN; - lh.kuc_msglen = sizeof(lh); - libcfs_kkuc_group_put(group, &lh); - } - - down_write(&kg_sem); - list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { - if ((uid == 0) || (uid == reg->kr_uid)) { - list_del(®->kr_chain); - CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", - reg->kr_uid, reg->kr_fp, group); - if (reg->kr_fp != NULL) - fput(reg->kr_fp); - kfree(reg); - } - } - up_write(&kg_sem); - - return 0; -} -EXPORT_SYMBOL(libcfs_kkuc_group_rem); - -int libcfs_kkuc_group_put(int group, void *payload) -{ - struct kkuc_reg *reg; - int rc = 0; - int one_success = 0; - - down_read(&kg_sem); - list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp != NULL) { - rc = libcfs_kkuc_msg_put(reg->kr_fp, payload); - if (rc == 0) - one_success = 1; - else if (rc == -EPIPE) { - fput(reg->kr_fp); - reg->kr_fp = NULL; - } - } - } - up_read(&kg_sem); - - /* don't return an error if the message has been delivered - * at least to one agent */ - if (one_success) - rc = 0; - - return rc; -} -EXPORT_SYMBOL(libcfs_kkuc_group_put); - -/** - * Calls a callback function for each link of the given kuc group. - * @param group the group to call the function on. - * @param cb_func the function to be called. - * @param cb_arg iextra argument to be passed to the callback function. - */ -int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, - void *cb_arg) -{ - struct kkuc_reg *reg; - int rc = 0; - - if (group > KUC_GRP_MAX) { - CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); - return -EINVAL; - } - - /* no link for this group */ - if (kkuc_groups[group].next == NULL) - return 0; - - down_write(&kg_sem); - list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp != NULL) - rc = cb_func(reg->kr_data, cb_arg); - } - up_write(&kg_sem); - - return rc; -} -EXPORT_SYMBOL(libcfs_kkuc_group_foreach); diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c deleted file mode 100644 index 933525c73..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -/** Global CPU partition table */ -struct cfs_cpt_table *cfs_cpt_table __read_mostly; -EXPORT_SYMBOL(cfs_cpt_table); - -#ifndef HAVE_LIBCFS_CPT - -#define CFS_CPU_VERSION_MAGIC 0xbabecafe - -struct cfs_cpt_table * -cfs_cpt_table_alloc(unsigned int ncpt) -{ - struct cfs_cpt_table *cptab; - - if (ncpt != 1) { - CERROR("Can't support cpu partition number %d\n", ncpt); - return NULL; - } - - LIBCFS_ALLOC(cptab, sizeof(*cptab)); - if (cptab != NULL) { - cptab->ctb_version = CFS_CPU_VERSION_MAGIC; - cptab->ctb_nparts = ncpt; - } - - return cptab; -} -EXPORT_SYMBOL(cfs_cpt_table_alloc); - -void -cfs_cpt_table_free(struct cfs_cpt_table *cptab) -{ - LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC); - - LIBCFS_FREE(cptab, sizeof(*cptab)); -} -EXPORT_SYMBOL(cfs_cpt_table_free); - -#ifdef CONFIG_SMP -int -cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) -{ - int rc; - - rc = snprintf(buf, len, "%d\t: %d\n", 0, 0); - len -= rc; - if (len <= 0) - return -EFBIG; - - return rc; -} -EXPORT_SYMBOL(cfs_cpt_table_print); -#endif /* CONFIG_SMP */ - -int -cfs_cpt_number(struct cfs_cpt_table *cptab) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_number); - -int -cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_weight); - -int -cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_online); - -int -cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpu); - -void -cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_cpu); - -int -cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpumask); - -void -cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_cpumask); - -int -cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_node); - -void -cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_node); - -int -cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_nodemask); - -void -cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_nodemask); - -void -cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) -{ -} -EXPORT_SYMBOL(cfs_cpt_clear); - -int -cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_spread_node); - -int -cfs_cpu_ht_nsiblings(int cpu) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpu_ht_nsiblings); - -int -cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_current); - -int -cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_of_cpu); - -int -cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_bind); - -void -cfs_cpu_fini(void) -{ - if (cfs_cpt_table != NULL) { - cfs_cpt_table_free(cfs_cpt_table); - cfs_cpt_table = NULL; - } -} - -int -cfs_cpu_init(void) -{ - cfs_cpt_table = cfs_cpt_table_alloc(1); - - return cfs_cpt_table != NULL ? 0 : -1; -} - -#endif /* HAVE_LIBCFS_CPT */ diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c deleted file mode 100644 index 15782d9e6..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c +++ /dev/null @@ -1,187 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -/** destroy cpu-partition lock, see libcfs_private.h for more detail */ -void -cfs_percpt_lock_free(struct cfs_percpt_lock *pcl) -{ - LASSERT(pcl->pcl_locks != NULL); - LASSERT(!pcl->pcl_locked); - - cfs_percpt_free(pcl->pcl_locks); - LIBCFS_FREE(pcl, sizeof(*pcl)); -} -EXPORT_SYMBOL(cfs_percpt_lock_free); - -/** - * create cpu-partition lock, see libcfs_private.h for more detail. - * - * cpu-partition lock is designed for large-scale SMP system, so we need to - * reduce cacheline conflict as possible as we can, that's the - * reason we always allocate cacheline-aligned memory block. - */ -struct cfs_percpt_lock * -cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) -{ - struct cfs_percpt_lock *pcl; - spinlock_t *lock; - int i; - - /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */ - LIBCFS_ALLOC(pcl, sizeof(*pcl)); - if (!pcl) - return NULL; - - pcl->pcl_cptab = cptab; - pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock)); - if (!pcl->pcl_locks) { - LIBCFS_FREE(pcl, sizeof(*pcl)); - return NULL; - } - - cfs_percpt_for_each(lock, i, pcl->pcl_locks) - spin_lock_init(lock); - - return pcl; -} -EXPORT_SYMBOL(cfs_percpt_lock_alloc); - -/** - * lock a CPU partition - * - * \a index != CFS_PERCPT_LOCK_EX - * hold private lock indexed by \a index - * - * \a index == CFS_PERCPT_LOCK_EX - * exclusively lock @pcl and nobody can take private lock - */ -void -cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) -{ - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; - - LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt); - - if (ncpt == 1) { - index = 0; - } else { /* serialize with exclusive lock */ - while (pcl->pcl_locked) - cpu_relax(); - } - - if (likely(index != CFS_PERCPT_LOCK_EX)) { - spin_lock(pcl->pcl_locks[index]); - return; - } - - /* exclusive lock request */ - for (i = 0; i < ncpt; i++) { - spin_lock(pcl->pcl_locks[i]); - if (i == 0) { - LASSERT(!pcl->pcl_locked); - /* nobody should take private lock after this - * so I wouldn't starve for too long time */ - pcl->pcl_locked = 1; - } - } -} -EXPORT_SYMBOL(cfs_percpt_lock); - -/** unlock a CPU partition */ -void -cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) -{ - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; - - index = ncpt == 1 ? 0 : index; - - if (likely(index != CFS_PERCPT_LOCK_EX)) { - spin_unlock(pcl->pcl_locks[index]); - return; - } - - for (i = ncpt - 1; i >= 0; i--) { - if (i == 0) { - LASSERT(pcl->pcl_locked); - pcl->pcl_locked = 0; - } - spin_unlock(pcl->pcl_locks[i]); - } -} -EXPORT_SYMBOL(cfs_percpt_unlock); - -/** free cpu-partition refcount */ -void -cfs_percpt_atomic_free(atomic_t **refs) -{ - cfs_percpt_free(refs); -} -EXPORT_SYMBOL(cfs_percpt_atomic_free); - -/** allocate cpu-partition refcount with initial value @init_val */ -atomic_t ** -cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val) -{ - atomic_t **refs; - atomic_t *ref; - int i; - - refs = cfs_percpt_alloc(cptab, sizeof(*ref)); - if (!refs) - return NULL; - - cfs_percpt_for_each(ref, i, refs) - atomic_set(ref, init_val); - return refs; -} -EXPORT_SYMBOL(cfs_percpt_atomic_alloc); - -/** return sum of cpu-partition refs */ -int -cfs_percpt_atomic_summary(atomic_t **refs) -{ - atomic_t *ref; - int i; - int val = 0; - - cfs_percpt_for_each(ref, i, refs) - val += atomic_read(ref); - - return val; -} -EXPORT_SYMBOL(cfs_percpt_atomic_summary); diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c deleted file mode 100644 index 27cf86106..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -struct cfs_var_array { - unsigned int va_count; /* # of buffers */ - unsigned int va_size; /* size of each var */ - struct cfs_cpt_table *va_cptab; /* cpu partition table */ - void *va_ptrs[0]; /* buffer addresses */ -}; - -/* - * free per-cpu data, see more detail in cfs_percpt_free - */ -void -cfs_percpt_free(void *vars) -{ - struct cfs_var_array *arr; - int i; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - for (i = 0; i < arr->va_count; i++) { - if (arr->va_ptrs[i] != NULL) - LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); - } - - LIBCFS_FREE(arr, offsetof(struct cfs_var_array, - va_ptrs[arr->va_count])); -} -EXPORT_SYMBOL(cfs_percpt_free); - -/* - * allocate per cpu-partition variables, returned value is an array of pointers, - * variable can be indexed by CPU partition ID, i.e: - * - * arr = cfs_percpt_alloc(cfs_cpu_pt, size); - * then caller can access memory block for CPU 0 by arr[0], - * memory block for CPU 1 by arr[1]... - * memory block for CPU N by arr[N]... - * - * cacheline aligned. - */ -void * -cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) -{ - struct cfs_var_array *arr; - int count; - int i; - - count = cfs_cpt_number(cptab); - - LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); - if (!arr) - return NULL; - - arr->va_size = size = L1_CACHE_ALIGN(size); - arr->va_count = count; - arr->va_cptab = cptab; - - for (i = 0; i < count; i++) { - LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size); - if (!arr->va_ptrs[i]) { - cfs_percpt_free((void *)&arr->va_ptrs[0]); - return NULL; - } - } - - return (void *)&arr->va_ptrs[0]; -} -EXPORT_SYMBOL(cfs_percpt_alloc); - -/* - * return number of CPUs (or number of elements in per-cpu data) - * according to cptab of @vars - */ -int -cfs_percpt_number(void *vars) -{ - struct cfs_var_array *arr; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - return arr->va_count; -} -EXPORT_SYMBOL(cfs_percpt_number); - -/* - * return memory block shadowed from current CPU - */ -void * -cfs_percpt_current(void *vars) -{ - struct cfs_var_array *arr; - int cpt; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - cpt = cfs_cpt_current(arr->va_cptab, 0); - if (cpt < 0) - return NULL; - - return arr->va_ptrs[cpt]; -} - -void * -cfs_percpt_index(void *vars, int idx) -{ - struct cfs_var_array *arr; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - LASSERT(idx >= 0 && idx < arr->va_count); - return arr->va_ptrs[idx]; -} - -/* - * free variable array, see more detail in cfs_array_alloc - */ -void -cfs_array_free(void *vars) -{ - struct cfs_var_array *arr; - int i; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - for (i = 0; i < arr->va_count; i++) { - if (!arr->va_ptrs[i]) - continue; - - LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); - } - LIBCFS_FREE(arr, offsetof(struct cfs_var_array, - va_ptrs[arr->va_count])); -} -EXPORT_SYMBOL(cfs_array_free); - -/* - * allocate a variable array, returned value is an array of pointers. - * Caller can specify length of array by @count, @size is size of each - * memory block in array. - */ -void * -cfs_array_alloc(int count, unsigned int size) -{ - struct cfs_var_array *arr; - int i; - - LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); - if (!arr) - return NULL; - - arr->va_count = count; - arr->va_size = size; - - for (i = 0; i < count; i++) { - LIBCFS_ALLOC(arr->va_ptrs[i], size); - - if (!arr->va_ptrs[i]) { - cfs_array_free((void *)&arr->va_ptrs[0]); - return NULL; - } - } - - return (void *)&arr->va_ptrs[0]; -} -EXPORT_SYMBOL(cfs_array_alloc); diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c deleted file mode 100644 index 205a3ed43..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c +++ /dev/null @@ -1,564 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * String manipulation functions. - * - * libcfs/libcfs/libcfs_string.c - * - * Author: Nathan Rutman - */ - -#include "../../include/linux/libcfs/libcfs.h" - -/* Convert a text string to a bitmask */ -int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), - int *oldmask, int minmask, int allmask) -{ - const char *debugstr; - char op = '\0'; - int newmask = minmask, i, len, found = 0; - - /* must be a list of tokens separated by whitespace - * and optionally an operator ('+' or '-'). If an operator - * appears first in , '*oldmask' is used as the starting point - * (relative), otherwise minmask is used (absolute). An operator - * applies to all following tokens up to the next operator. */ - while (*str != '\0') { - while (isspace(*str)) - str++; - if (*str == '\0') - break; - if (*str == '+' || *str == '-') { - op = *str++; - if (!found) - /* only if first token is relative */ - newmask = *oldmask; - while (isspace(*str)) - str++; - if (*str == '\0') /* trailing op */ - return -EINVAL; - } - - /* find token length */ - len = 0; - while (str[len] != '\0' && !isspace(str[len]) && - str[len] != '+' && str[len] != '-') - len++; - - /* match token */ - found = 0; - for (i = 0; i < 32; i++) { - debugstr = bit2str(i); - if (debugstr != NULL && - strlen(debugstr) == len && - strncasecmp(str, debugstr, len) == 0) { - if (op == '-') - newmask &= ~(1 << i); - else - newmask |= (1 << i); - found = 1; - break; - } - } - if (!found && len == 3 && - (strncasecmp(str, "ALL", len) == 0)) { - if (op == '-') - newmask = minmask; - else - newmask = allmask; - found = 1; - } - if (!found) { - CWARN("unknown mask '%.*s'.\n" - "mask usage: [+|-] ...\n", len, str); - return -EINVAL; - } - str += len; - } - - *oldmask = newmask; - return 0; -} - -/* get the first string out of @str */ -char *cfs_firststr(char *str, size_t size) -{ - size_t i = 0; - char *end; - - /* trim leading spaces */ - while (i < size && *str && isspace(*str)) { - ++i; - ++str; - } - - /* string with all spaces */ - if (*str == '\0') - goto out; - - end = str; - while (i < size && *end != '\0' && !isspace(*end)) { - ++i; - ++end; - } - - *end = '\0'; -out: - return str; -} -EXPORT_SYMBOL(cfs_firststr); - -char * -cfs_trimwhite(char *str) -{ - char *end; - - while (isspace(*str)) - str++; - - end = str + strlen(str); - while (end > str) { - if (!isspace(end[-1])) - break; - end--; - } - - *end = 0; - return str; -} -EXPORT_SYMBOL(cfs_trimwhite); - -/** - * Extracts tokens from strings. - * - * Looks for \a delim in string \a next, sets \a res to point to - * substring before the delimiter, sets \a next right after the found - * delimiter. - * - * \retval 1 if \a res points to a string of non-whitespace characters - * \retval 0 otherwise - */ -int -cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res) -{ - char *end; - - if (next->ls_str == NULL) - return 0; - - /* skip leading white spaces */ - while (next->ls_len) { - if (!isspace(*next->ls_str)) - break; - next->ls_str++; - next->ls_len--; - } - - if (next->ls_len == 0) /* whitespaces only */ - return 0; - - if (*next->ls_str == delim) { - /* first non-writespace is the delimiter */ - return 0; - } - - res->ls_str = next->ls_str; - end = memchr(next->ls_str, delim, next->ls_len); - if (end == NULL) { - /* there is no the delimeter in the string */ - end = next->ls_str + next->ls_len; - next->ls_str = NULL; - } else { - next->ls_str = end + 1; - next->ls_len -= (end - res->ls_str + 1); - } - - /* skip ending whitespaces */ - while (--end != res->ls_str) { - if (!isspace(*end)) - break; - } - - res->ls_len = end - res->ls_str + 1; - return 1; -} -EXPORT_SYMBOL(cfs_gettok); - -/** - * Converts string to integer. - * - * Accepts decimal and hexadecimal number recordings. - * - * \retval 1 if first \a nob chars of \a str convert to decimal or - * hexadecimal integer in the range [\a min, \a max] - * \retval 0 otherwise - */ -int -cfs_str2num_check(char *str, int nob, unsigned *num, - unsigned min, unsigned max) -{ - char *endp; - - str = cfs_trimwhite(str); - *num = simple_strtoul(str, &endp, 0); - if (endp == str) - return 0; - - for (; endp < str + nob; endp++) { - if (!isspace(*endp)) - return 0; - } - - return (*num >= min && *num <= max); -} -EXPORT_SYMBOL(cfs_str2num_check); - -/** - * Parses \ token of the syntax. If \a bracketed is false, - * \a src should only have a single token which can be \ or \* - * - * \retval pointer to allocated range_expr and initialized - * range_expr::re_lo, range_expr::re_hi and range_expr:re_stride if \a - `* src parses to - * \ | - * \ '-' \ | - * \ '-' \ '/' \ - * \retval 0 will be returned if it can be parsed, otherwise -EINVAL or - * -ENOMEM will be returned. - */ -static int -cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max, - int bracketed, struct cfs_range_expr **expr) -{ - struct cfs_range_expr *re; - struct cfs_lstr tok; - - LIBCFS_ALLOC(re, sizeof(*re)); - if (re == NULL) - return -ENOMEM; - - if (src->ls_len == 1 && src->ls_str[0] == '*') { - re->re_lo = min; - re->re_hi = max; - re->re_stride = 1; - goto out; - } - - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_lo, min, max)) { - /* is parsed */ - re->re_hi = re->re_lo; - re->re_stride = 1; - goto out; - } - - if (!bracketed || !cfs_gettok(src, '-', &tok)) - goto failed; - - if (!cfs_str2num_check(tok.ls_str, tok.ls_len, - &re->re_lo, min, max)) - goto failed; - - /* - */ - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_hi, min, max)) { - /* - is parsed */ - re->re_stride = 1; - goto out; - } - - /* go to check '-' '/' */ - if (cfs_gettok(src, '/', &tok)) { - if (!cfs_str2num_check(tok.ls_str, tok.ls_len, - &re->re_hi, min, max)) - goto failed; - - /* - / ... */ - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_stride, min, max)) { - /* - / is parsed */ - goto out; - } - } - - out: - *expr = re; - return 0; - - failed: - LIBCFS_FREE(re, sizeof(*re)); - return -EINVAL; -} - -/** - * Print the range expression \a re into specified \a buffer. - * If \a bracketed is true, expression does not need additional - * brackets. - * - * \retval number of characters written - */ -static int -cfs_range_expr_print(char *buffer, int count, struct cfs_range_expr *expr, - bool bracketed) -{ - int i; - char s[] = "["; - char e[] = "]"; - - if (bracketed) - s[0] = e[0] = '\0'; - - if (expr->re_lo == expr->re_hi) - i = scnprintf(buffer, count, "%u", expr->re_lo); - else if (expr->re_stride == 1) - i = scnprintf(buffer, count, "%s%u-%u%s", - s, expr->re_lo, expr->re_hi, e); - else - i = scnprintf(buffer, count, "%s%u-%u/%u%s", - s, expr->re_lo, expr->re_hi, - expr->re_stride, e); - return i; -} - -/** - * Print a list of range expressions (\a expr_list) into specified \a buffer. - * If the list contains several expressions, separate them with comma - * and surround the list with brackets. - * - * \retval number of characters written - */ -int -cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list) -{ - struct cfs_range_expr *expr; - int i = 0, j = 0; - int numexprs = 0; - - if (count <= 0) - return 0; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) - numexprs++; - - if (numexprs > 1) - i += scnprintf(buffer + i, count - i, "["); - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - if (j++ != 0) - i += scnprintf(buffer + i, count - i, ","); - i += cfs_range_expr_print(buffer + i, count - i, expr, - numexprs > 1); - } - - if (numexprs > 1) - i += scnprintf(buffer + i, count - i, "]"); - - return i; -} -EXPORT_SYMBOL(cfs_expr_list_print); - -/** - * Matches value (\a value) against ranges expression list \a expr_list. - * - * \retval 1 if \a value matches - * \retval 0 otherwise - */ -int -cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list) -{ - struct cfs_range_expr *expr; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - if (value >= expr->re_lo && value <= expr->re_hi && - ((value - expr->re_lo) % expr->re_stride) == 0) - return 1; - } - - return 0; -} -EXPORT_SYMBOL(cfs_expr_list_match); - -/** - * Convert express list (\a expr_list) to an array of all matched values - * - * \retval N N is total number of all matched values - * \retval 0 if expression list is empty - * \retval < 0 for failure - */ -int -cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) -{ - struct cfs_range_expr *expr; - __u32 *val; - int count = 0; - int i; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (((i - expr->re_lo) % expr->re_stride) == 0) - count++; - } - } - - if (count == 0) /* empty expression list */ - return 0; - - if (count > max) { - CERROR("Number of values %d exceeds max allowed %d\n", - max, count); - return -EINVAL; - } - - LIBCFS_ALLOC(val, sizeof(val[0]) * count); - if (val == NULL) - return -ENOMEM; - - count = 0; - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (((i - expr->re_lo) % expr->re_stride) == 0) - val[count++] = i; - } - } - - *valpp = val; - return count; -} -EXPORT_SYMBOL(cfs_expr_list_values); - -/** - * Frees cfs_range_expr structures of \a expr_list. - * - * \retval none - */ -void -cfs_expr_list_free(struct cfs_expr_list *expr_list) -{ - while (!list_empty(&expr_list->el_exprs)) { - struct cfs_range_expr *expr; - - expr = list_entry(expr_list->el_exprs.next, - struct cfs_range_expr, re_link); - list_del(&expr->re_link); - LIBCFS_FREE(expr, sizeof(*expr)); - } - - LIBCFS_FREE(expr_list, sizeof(*expr_list)); -} -EXPORT_SYMBOL(cfs_expr_list_free); - -/** - * Parses \ token of the syntax. - * - * \retval 0 if \a str parses to \ | \ - * \retval -errno otherwise - */ -int -cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, - struct cfs_expr_list **elpp) -{ - struct cfs_expr_list *expr_list; - struct cfs_range_expr *expr; - struct cfs_lstr src; - int rc; - - LIBCFS_ALLOC(expr_list, sizeof(*expr_list)); - if (expr_list == NULL) - return -ENOMEM; - - src.ls_str = str; - src.ls_len = len; - - INIT_LIST_HEAD(&expr_list->el_exprs); - - if (src.ls_str[0] == '[' && - src.ls_str[src.ls_len - 1] == ']') { - src.ls_str++; - src.ls_len -= 2; - - rc = -EINVAL; - while (src.ls_str != NULL) { - struct cfs_lstr tok; - - if (!cfs_gettok(&src, ',', &tok)) { - rc = -EINVAL; - break; - } - - rc = cfs_range_expr_parse(&tok, min, max, 1, &expr); - if (rc != 0) - break; - - list_add_tail(&expr->re_link, - &expr_list->el_exprs); - } - } else { - rc = cfs_range_expr_parse(&src, min, max, 0, &expr); - if (rc == 0) { - list_add_tail(&expr->re_link, - &expr_list->el_exprs); - } - } - - if (rc != 0) - cfs_expr_list_free(expr_list); - else - *elpp = expr_list; - - return rc; -} -EXPORT_SYMBOL(cfs_expr_list_parse); - -/** - * Frees cfs_expr_list structures of \a list. - * - * For each struct cfs_expr_list structure found on \a list it frees - * range_expr list attached to it and frees the cfs_expr_list itself. - * - * \retval none - */ -void -cfs_expr_list_free_list(struct list_head *list) -{ - struct cfs_expr_list *el; - - while (!list_empty(list)) { - el = list_entry(list->next, - struct cfs_expr_list, el_link); - list_del(&el->el_link); - cfs_expr_list_free(el); - } -} -EXPORT_SYMBOL(cfs_expr_list_free_list); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c deleted file mode 100644 index e52afe35e..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include -#include -#include "../../../include/linux/libcfs/libcfs.h" - -#ifdef CONFIG_SMP - -/** - * modparam for setting number of partitions - * - * 0 : estimate best value based on cores or NUMA nodes - * 1 : disable multiple partitions - * >1 : specify number of partitions - */ -static int cpu_npartitions; -module_param(cpu_npartitions, int, 0444); -MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions"); - -/** - * modparam for setting CPU partitions patterns: - * - * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID, - * number in bracket is processor ID (core or HT) - * - * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket - * are NUMA node ID, number before bracket is CPU partition ID. - * - * NB: If user specified cpu_pattern, cpu_npartitions will be ignored - */ -static char *cpu_pattern = ""; -module_param(cpu_pattern, charp, 0444); -MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern"); - -struct cfs_cpt_data { - /* serialize hotplug etc */ - spinlock_t cpt_lock; - /* reserved for hotplug */ - unsigned long cpt_version; - /* mutex to protect cpt_cpumask */ - struct mutex cpt_mutex; - /* scratch buffer for set/unset_node */ - cpumask_t *cpt_cpumask; -}; - -static struct cfs_cpt_data cpt_data; - -void -cfs_cpt_table_free(struct cfs_cpt_table *cptab) -{ - int i; - - if (cptab->ctb_cpu2cpt != NULL) { - LIBCFS_FREE(cptab->ctb_cpu2cpt, - num_possible_cpus() * - sizeof(cptab->ctb_cpu2cpt[0])); - } - - for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) { - struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; - - if (part->cpt_nodemask != NULL) { - LIBCFS_FREE(part->cpt_nodemask, - sizeof(*part->cpt_nodemask)); - } - - if (part->cpt_cpumask != NULL) - LIBCFS_FREE(part->cpt_cpumask, cpumask_size()); - } - - if (cptab->ctb_parts != NULL) { - LIBCFS_FREE(cptab->ctb_parts, - cptab->ctb_nparts * sizeof(cptab->ctb_parts[0])); - } - - if (cptab->ctb_nodemask != NULL) - LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); - if (cptab->ctb_cpumask != NULL) - LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size()); - - LIBCFS_FREE(cptab, sizeof(*cptab)); -} -EXPORT_SYMBOL(cfs_cpt_table_free); - -struct cfs_cpt_table * -cfs_cpt_table_alloc(unsigned int ncpt) -{ - struct cfs_cpt_table *cptab; - int i; - - LIBCFS_ALLOC(cptab, sizeof(*cptab)); - if (cptab == NULL) - return NULL; - - cptab->ctb_nparts = ncpt; - - LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size()); - LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); - - if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL) - goto failed; - - LIBCFS_ALLOC(cptab->ctb_cpu2cpt, - num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); - if (cptab->ctb_cpu2cpt == NULL) - goto failed; - - memset(cptab->ctb_cpu2cpt, -1, - num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); - - LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0])); - if (cptab->ctb_parts == NULL) - goto failed; - - for (i = 0; i < ncpt; i++) { - struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; - - LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size()); - LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask)); - if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL) - goto failed; - } - - spin_lock(&cpt_data.cpt_lock); - /* Reserved for hotplug */ - cptab->ctb_version = cpt_data.cpt_version; - spin_unlock(&cpt_data.cpt_lock); - - return cptab; - - failed: - cfs_cpt_table_free(cptab); - return NULL; -} -EXPORT_SYMBOL(cfs_cpt_table_alloc); - -int -cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) -{ - char *tmp = buf; - int rc = 0; - int i; - int j; - - for (i = 0; i < cptab->ctb_nparts; i++) { - if (len > 0) { - rc = snprintf(tmp, len, "%d\t: ", i); - len -= rc; - } - - if (len <= 0) { - rc = -EFBIG; - goto out; - } - - tmp += rc; - for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) { - rc = snprintf(tmp, len, "%d ", j); - len -= rc; - if (len <= 0) { - rc = -EFBIG; - goto out; - } - tmp += rc; - } - - *tmp = '\n'; - tmp++; - len--; - } - - out: - if (rc < 0) - return rc; - - return tmp - buf; -} -EXPORT_SYMBOL(cfs_cpt_table_print); - -int -cfs_cpt_number(struct cfs_cpt_table *cptab) -{ - return cptab->ctb_nparts; -} -EXPORT_SYMBOL(cfs_cpt_number); - -int -cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cpumask_weight(cptab->ctb_cpumask) : - cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask); -} -EXPORT_SYMBOL(cfs_cpt_weight); - -int -cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cpumask_any_and(cptab->ctb_cpumask, - cpu_online_mask) < nr_cpu_ids : - cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask, - cpu_online_mask) < nr_cpu_ids; -} -EXPORT_SYMBOL(cfs_cpt_online); - -cpumask_t * -cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask; -} -EXPORT_SYMBOL(cfs_cpt_cpumask); - -nodemask_t * -cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask; -} -EXPORT_SYMBOL(cfs_cpt_nodemask); - -int -cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - int node; - - LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); - - if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { - CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu); - return 0; - } - - if (cptab->ctb_cpu2cpt[cpu] != -1) { - CDEBUG(D_INFO, "CPU %d is already in partition %d\n", - cpu, cptab->ctb_cpu2cpt[cpu]); - return 0; - } - - cptab->ctb_cpu2cpt[cpu] = cpt; - - LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask)); - LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); - - cpumask_set_cpu(cpu, cptab->ctb_cpumask); - cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); - - node = cpu_to_node(cpu); - - /* first CPU of @node in this CPT table */ - if (!node_isset(node, *cptab->ctb_nodemask)) - node_set(node, *cptab->ctb_nodemask); - - /* first CPU of @node in this partition */ - if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)) - node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask); - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpu); - -void -cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - int node; - int i; - - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - if (cpu < 0 || cpu >= nr_cpu_ids) { - CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu); - return; - } - - if (cpt == CFS_CPT_ANY) { - /* caller doesn't know the partition ID */ - cpt = cptab->ctb_cpu2cpt[cpu]; - if (cpt < 0) { /* not set in this CPT-table */ - CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n", - cpt, cptab); - return; - } - - } else if (cpt != cptab->ctb_cpu2cpt[cpu]) { - CDEBUG(D_INFO, - "CPU %d is not in cpu-partition %d\n", cpu, cpt); - return; - } - - LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); - LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask)); - - cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); - cpumask_clear_cpu(cpu, cptab->ctb_cpumask); - cptab->ctb_cpu2cpt[cpu] = -1; - - node = cpu_to_node(cpu); - - LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); - LASSERT(node_isset(node, *cptab->ctb_nodemask)); - - for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) { - /* this CPT has other CPU belonging to this node? */ - if (cpu_to_node(i) == node) - break; - } - - if (i >= nr_cpu_ids) - node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); - - for_each_cpu(i, cptab->ctb_cpumask) { - /* this CPT-table has other CPU belonging to this node? */ - if (cpu_to_node(i) == node) - break; - } - - if (i >= nr_cpu_ids) - node_clear(node, *cptab->ctb_nodemask); - - return; -} -EXPORT_SYMBOL(cfs_cpt_unset_cpu); - -int -cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - int i; - - if (cpumask_weight(mask) == 0 || - cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { - CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", - cpt); - return 0; - } - - for_each_cpu(i, mask) { - if (!cfs_cpt_set_cpu(cptab, cpt, i)) - return 0; - } - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpumask); - -void -cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - int i; - - for_each_cpu(i, mask) - cfs_cpt_unset_cpu(cptab, cpt, i); -} -EXPORT_SYMBOL(cfs_cpt_unset_cpumask); - -int -cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - cpumask_t *mask; - int rc; - - if (node < 0 || node >= MAX_NUMNODES) { - CDEBUG(D_INFO, - "Invalid NUMA id %d for CPU partition %d\n", node, cpt); - return 0; - } - - mutex_lock(&cpt_data.cpt_mutex); - - mask = cpt_data.cpt_cpumask; - cpumask_copy(mask, cpumask_of_node(node)); - - rc = cfs_cpt_set_cpumask(cptab, cpt, mask); - - mutex_unlock(&cpt_data.cpt_mutex); - - return rc; -} -EXPORT_SYMBOL(cfs_cpt_set_node); - -void -cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - cpumask_t *mask; - - if (node < 0 || node >= MAX_NUMNODES) { - CDEBUG(D_INFO, - "Invalid NUMA id %d for CPU partition %d\n", node, cpt); - return; - } - - mutex_lock(&cpt_data.cpt_mutex); - - mask = cpt_data.cpt_cpumask; - cpumask_copy(mask, cpumask_of_node(node)); - - cfs_cpt_unset_cpumask(cptab, cpt, mask); - - mutex_unlock(&cpt_data.cpt_mutex); -} -EXPORT_SYMBOL(cfs_cpt_unset_node); - -int -cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - int i; - - for_each_node_mask(i, *mask) { - if (!cfs_cpt_set_node(cptab, cpt, i)) - return 0; - } - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_nodemask); - -void -cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - int i; - - for_each_node_mask(i, *mask) - cfs_cpt_unset_node(cptab, cpt, i); -} -EXPORT_SYMBOL(cfs_cpt_unset_nodemask); - -void -cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) -{ - int last; - int i; - - if (cpt == CFS_CPT_ANY) { - last = cptab->ctb_nparts - 1; - cpt = 0; - } else { - last = cpt; - } - - for (; cpt <= last; cpt++) { - for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) - cfs_cpt_unset_cpu(cptab, cpt, i); - } -} -EXPORT_SYMBOL(cfs_cpt_clear); - -int -cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) -{ - nodemask_t *mask; - int weight; - int rotor; - int node; - - /* convert CPU partition ID to HW node id */ - - if (cpt < 0 || cpt >= cptab->ctb_nparts) { - mask = cptab->ctb_nodemask; - rotor = cptab->ctb_spread_rotor++; - } else { - mask = cptab->ctb_parts[cpt].cpt_nodemask; - rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++; - } - - weight = nodes_weight(*mask); - LASSERT(weight > 0); - - rotor %= weight; - - for_each_node_mask(node, *mask) { - if (rotor-- == 0) - return node; - } - - LBUG(); - return 0; -} -EXPORT_SYMBOL(cfs_cpt_spread_node); - -int -cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) -{ - int cpu = smp_processor_id(); - int cpt = cptab->ctb_cpu2cpt[cpu]; - - if (cpt < 0) { - if (!remap) - return cpt; - - /* don't return negative value for safety of upper layer, - * instead we shadow the unknown cpu to a valid partition ID */ - cpt = cpu % cptab->ctb_nparts; - } - - return cpt; -} -EXPORT_SYMBOL(cfs_cpt_current); - -int -cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) -{ - LASSERT(cpu >= 0 && cpu < nr_cpu_ids); - - return cptab->ctb_cpu2cpt[cpu]; -} -EXPORT_SYMBOL(cfs_cpt_of_cpu); - -int -cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) -{ - cpumask_t *cpumask; - nodemask_t *nodemask; - int rc; - int i; - - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - if (cpt == CFS_CPT_ANY) { - cpumask = cptab->ctb_cpumask; - nodemask = cptab->ctb_nodemask; - } else { - cpumask = cptab->ctb_parts[cpt].cpt_cpumask; - nodemask = cptab->ctb_parts[cpt].cpt_nodemask; - } - - if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { - CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", - cpt); - return -EINVAL; - } - - for_each_online_cpu(i) { - if (cpumask_test_cpu(i, cpumask)) - continue; - - rc = set_cpus_allowed_ptr(current, cpumask); - set_mems_allowed(*nodemask); - if (rc == 0) - schedule(); /* switch to allowed CPU */ - - return rc; - } - - /* don't need to set affinity because all online CPUs are covered */ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_bind); - -/** - * Choose max to \a number CPUs from \a node and set them in \a cpt. - * We always prefer to choose CPU in the same core/socket. - */ -static int -cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, - cpumask_t *node, int number) -{ - cpumask_t *socket = NULL; - cpumask_t *core = NULL; - int rc = 0; - int cpu; - - LASSERT(number > 0); - - if (number >= cpumask_weight(node)) { - while (!cpumask_empty(node)) { - cpu = cpumask_first(node); - - rc = cfs_cpt_set_cpu(cptab, cpt, cpu); - if (!rc) - return -EINVAL; - cpumask_clear_cpu(cpu, node); - } - return 0; - } - - /* allocate scratch buffer */ - LIBCFS_ALLOC(socket, cpumask_size()); - LIBCFS_ALLOC(core, cpumask_size()); - if (socket == NULL || core == NULL) { - rc = -ENOMEM; - goto out; - } - - while (!cpumask_empty(node)) { - cpu = cpumask_first(node); - - /* get cpumask for cores in the same socket */ - cpumask_copy(socket, topology_core_cpumask(cpu)); - cpumask_and(socket, socket, node); - - LASSERT(!cpumask_empty(socket)); - - while (!cpumask_empty(socket)) { - int i; - - /* get cpumask for hts in the same core */ - cpumask_copy(core, topology_sibling_cpumask(cpu)); - cpumask_and(core, core, node); - - LASSERT(!cpumask_empty(core)); - - for_each_cpu(i, core) { - cpumask_clear_cpu(i, socket); - cpumask_clear_cpu(i, node); - - rc = cfs_cpt_set_cpu(cptab, cpt, i); - if (!rc) { - rc = -EINVAL; - goto out; - } - - if (--number == 0) - goto out; - } - cpu = cpumask_first(socket); - } - } - - out: - if (socket != NULL) - LIBCFS_FREE(socket, cpumask_size()); - if (core != NULL) - LIBCFS_FREE(core, cpumask_size()); - return rc; -} - -#define CPT_WEIGHT_MIN 4u - -static unsigned int -cfs_cpt_num_estimate(void) -{ - unsigned nnode = num_online_nodes(); - unsigned ncpu = num_online_cpus(); - unsigned ncpt; - - if (ncpu <= CPT_WEIGHT_MIN) { - ncpt = 1; - goto out; - } - - /* generate reasonable number of CPU partitions based on total number - * of CPUs, Preferred N should be power2 and match this condition: - * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */ - for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) - ; - - if (ncpt <= nnode) { /* fat numa system */ - while (nnode > ncpt) - nnode >>= 1; - - } else { /* ncpt > nnode */ - while ((nnode << 1) <= ncpt) - nnode <<= 1; - } - - ncpt = nnode; - - out: -#if (BITS_PER_LONG == 32) - /* config many CPU partitions on 32-bit system could consume - * too much memory */ - ncpt = min(2U, ncpt); -#endif - while (ncpu % ncpt != 0) - ncpt--; /* worst case is 1 */ - - return ncpt; -} - -static struct cfs_cpt_table * -cfs_cpt_table_create(int ncpt) -{ - struct cfs_cpt_table *cptab = NULL; - cpumask_t *mask = NULL; - int cpt = 0; - int num; - int rc; - int i; - - rc = cfs_cpt_num_estimate(); - if (ncpt <= 0) - ncpt = rc; - - if (ncpt > num_online_cpus() || ncpt > 4 * rc) { - CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n", - ncpt, rc); - } - - if (num_online_cpus() % ncpt != 0) { - CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n", - (int)num_online_cpus(), ncpt); - goto failed; - } - - cptab = cfs_cpt_table_alloc(ncpt); - if (cptab == NULL) { - CERROR("Failed to allocate CPU map(%d)\n", ncpt); - goto failed; - } - - num = num_online_cpus() / ncpt; - if (num == 0) { - CERROR("CPU changed while setting CPU partition\n"); - goto failed; - } - - LIBCFS_ALLOC(mask, cpumask_size()); - if (mask == NULL) { - CERROR("Failed to allocate scratch cpumask\n"); - goto failed; - } - - for_each_online_node(i) { - cpumask_copy(mask, cpumask_of_node(i)); - - while (!cpumask_empty(mask)) { - struct cfs_cpu_partition *part; - int n; - - if (cpt >= ncpt) - goto failed; - - part = &cptab->ctb_parts[cpt]; - - n = num - cpumask_weight(part->cpt_cpumask); - LASSERT(n > 0); - - rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); - if (rc < 0) - goto failed; - - LASSERT(num >= cpumask_weight(part->cpt_cpumask)); - if (num == cpumask_weight(part->cpt_cpumask)) - cpt++; - } - } - - if (cpt != ncpt || - num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { - CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", - cptab->ctb_nparts, num, cpt, - cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)); - goto failed; - } - - LIBCFS_FREE(mask, cpumask_size()); - - return cptab; - - failed: - CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n", - ncpt, num_online_nodes(), num_online_cpus()); - - if (mask != NULL) - LIBCFS_FREE(mask, cpumask_size()); - - if (cptab != NULL) - cfs_cpt_table_free(cptab); - - return NULL; -} - -static struct cfs_cpt_table * -cfs_cpt_table_create_pattern(char *pattern) -{ - struct cfs_cpt_table *cptab; - char *str = pattern; - int node = 0; - int high; - int ncpt; - int c; - - for (ncpt = 0;; ncpt++) { /* quick scan bracket */ - str = strchr(str, '['); - if (str == NULL) - break; - str++; - } - - str = cfs_trimwhite(pattern); - if (*str == 'n' || *str == 'N') { - pattern = str + 1; - node = 1; - } - - if (ncpt == 0 || - (node && ncpt > num_online_nodes()) || - (!node && ncpt > num_online_cpus())) { - CERROR("Invalid pattern %s, or too many partitions %d\n", - pattern, ncpt); - return NULL; - } - - high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; - - cptab = cfs_cpt_table_alloc(ncpt); - if (cptab == NULL) { - CERROR("Failed to allocate cpu partition table\n"); - return NULL; - } - - for (str = cfs_trimwhite(pattern), c = 0;; c++) { - struct cfs_range_expr *range; - struct cfs_expr_list *el; - char *bracket = strchr(str, '['); - int cpt; - int rc; - int i; - int n; - - if (bracket == NULL) { - if (*str != 0) { - CERROR("Invalid pattern %s\n", str); - goto failed; - } else if (c != ncpt) { - CERROR("expect %d partitions but found %d\n", - ncpt, c); - goto failed; - } - break; - } - - if (sscanf(str, "%d%n", &cpt, &n) < 1) { - CERROR("Invalid cpu pattern %s\n", str); - goto failed; - } - - if (cpt < 0 || cpt >= ncpt) { - CERROR("Invalid partition id %d, total partitions %d\n", - cpt, ncpt); - goto failed; - } - - if (cfs_cpt_weight(cptab, cpt) != 0) { - CERROR("Partition %d has already been set.\n", cpt); - goto failed; - } - - str = cfs_trimwhite(str + n); - if (str != bracket) { - CERROR("Invalid pattern %s\n", str); - goto failed; - } - - bracket = strchr(str, ']'); - if (bracket == NULL) { - CERROR("missing right bracket for cpt %d, %s\n", - cpt, str); - goto failed; - } - - if (cfs_expr_list_parse(str, (bracket - str) + 1, - 0, high, &el) != 0) { - CERROR("Can't parse number range: %s\n", str); - goto failed; - } - - list_for_each_entry(range, &el->el_exprs, re_link) { - for (i = range->re_lo; i <= range->re_hi; i++) { - if ((i - range->re_lo) % range->re_stride != 0) - continue; - - rc = node ? cfs_cpt_set_node(cptab, cpt, i) : - cfs_cpt_set_cpu(cptab, cpt, i); - if (!rc) { - cfs_expr_list_free(el); - goto failed; - } - } - } - - cfs_expr_list_free(el); - - if (!cfs_cpt_online(cptab, cpt)) { - CERROR("No online CPU is found on partition %d\n", cpt); - goto failed; - } - - str = cfs_trimwhite(bracket + 1); - } - - return cptab; - - failed: - cfs_cpt_table_free(cptab); - return NULL; -} - -#ifdef CONFIG_HOTPLUG_CPU -static int -cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - bool warn; - - switch (action) { - case CPU_DEAD: - case CPU_DEAD_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - spin_lock(&cpt_data.cpt_lock); - cpt_data.cpt_version++; - spin_unlock(&cpt_data.cpt_lock); - default: - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) { - CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n", - cpu, action); - break; - } - - mutex_lock(&cpt_data.cpt_mutex); - /* if all HTs in a core are offline, it may break affinity */ - cpumask_copy(cpt_data.cpt_cpumask, - topology_sibling_cpumask(cpu)); - warn = cpumask_any_and(cpt_data.cpt_cpumask, - cpu_online_mask) >= nr_cpu_ids; - mutex_unlock(&cpt_data.cpt_mutex); - CDEBUG(warn ? D_WARNING : D_INFO, - "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", - cpu, action); - } - - return NOTIFY_OK; -} - -static struct notifier_block cfs_cpu_notifier = { - .notifier_call = cfs_cpu_notify, - .priority = 0 -}; - -#endif - -void -cfs_cpu_fini(void) -{ - if (cfs_cpt_table != NULL) - cfs_cpt_table_free(cfs_cpt_table); - -#ifdef CONFIG_HOTPLUG_CPU - unregister_hotcpu_notifier(&cfs_cpu_notifier); -#endif - if (cpt_data.cpt_cpumask != NULL) - LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size()); -} - -int -cfs_cpu_init(void) -{ - LASSERT(cfs_cpt_table == NULL); - - memset(&cpt_data, 0, sizeof(cpt_data)); - - LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size()); - if (cpt_data.cpt_cpumask == NULL) { - CERROR("Failed to allocate scratch buffer\n"); - return -1; - } - - spin_lock_init(&cpt_data.cpt_lock); - mutex_init(&cpt_data.cpt_mutex); - -#ifdef CONFIG_HOTPLUG_CPU - register_hotcpu_notifier(&cfs_cpu_notifier); -#endif - - if (*cpu_pattern != 0) { - cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern); - if (cfs_cpt_table == NULL) { - CERROR("Failed to create cptab from pattern %s\n", - cpu_pattern); - goto failed; - } - - } else { - cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions); - if (cfs_cpt_table == NULL) { - CERROR("Failed to create ptable with npartitions %d\n", - cpu_npartitions); - goto failed; - } - } - - spin_lock(&cpt_data.cpt_lock); - if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) { - spin_unlock(&cpt_data.cpt_lock); - CERROR("CPU hotplug/unplug during setup\n"); - goto failed; - } - spin_unlock(&cpt_data.cpt_lock); - - LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n", - num_online_cpus(), cfs_cpt_number(cfs_cpt_table)); - return 0; - - failed: - cfs_cpu_fini(); - return -1; -} - -#endif diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c deleted file mode 100644 index db0572733..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c +++ /dev/null @@ -1,137 +0,0 @@ -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - */ - -/* - * This is crypto api shash wrappers to zlib_adler32. - */ - -#include -#include -#include -#include "linux-crypto.h" - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -static int adler32_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = 1; - - return 0; -} - -static int adler32_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - *mctx = *(u32 *)key; - return 0; -} - -static int adler32_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *cksump = shash_desc_ctx(desc); - - *cksump = *mctx; - - return 0; -} - -static int adler32_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *cksump = shash_desc_ctx(desc); - - *cksump = zlib_adler32(*cksump, data, len); - return 0; -} - -static int __adler32_finup(u32 *cksump, const u8 *data, unsigned int len, - u8 *out) -{ - *(u32 *)out = zlib_adler32(*cksump, data, len); - return 0; -} - -static int adler32_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __adler32_finup(shash_desc_ctx(desc), data, len, out); -} - -static int adler32_final(struct shash_desc *desc, u8 *out) -{ - u32 *cksump = shash_desc_ctx(desc); - - *(u32 *)out = *cksump; - return 0; -} - -static int adler32_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} - -static struct shash_alg alg = { - .setkey = adler32_setkey, - .init = adler32_init, - .update = adler32_update, - .final = adler32_final, - .finup = adler32_finup, - .digest = adler32_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "adler32", - .cra_driver_name = "adler32-zlib", - .cra_priority = 100, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = adler32_cra_init, - } -}; - -int cfs_crypto_adler32_register(void) -{ - return crypto_register_shash(&alg); -} - -void cfs_crypto_adler32_unregister(void) -{ - crypto_unregister_shash(&alg); -} diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c deleted file mode 100644 index 079d50ebf..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c +++ /dev/null @@ -1,290 +0,0 @@ -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - * - * Copyright (c) 2012, Intel Corporation. - */ - -#include -#include -#include "../../../include/linux/libcfs/libcfs.h" -#include "linux-crypto.h" -/** - * Array of hash algorithm speed in MByte per second - */ -static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX]; - -static int cfs_crypto_hash_alloc(unsigned char alg_id, - const struct cfs_crypto_hash_type **type, - struct hash_desc *desc, unsigned char *key, - unsigned int key_len) -{ - int err = 0; - - *type = cfs_crypto_hash_type(alg_id); - - if (*type == NULL) { - CWARN("Unsupported hash algorithm id = %d, max id is %d\n", - alg_id, CFS_HASH_ALG_MAX); - return -EINVAL; - } - desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); - - if (desc->tfm == NULL) - return -EINVAL; - - if (IS_ERR(desc->tfm)) { - CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", - (*type)->cht_name); - return PTR_ERR(desc->tfm); - } - - desc->flags = 0; - - /** Shash have different logic for initialization then digest - * shash: crypto_hash_setkey, crypto_hash_init - * digest: crypto_digest_init, crypto_digest_setkey - * Skip this function for digest, because we use shash logic at - * cfs_crypto_hash_alloc. - */ - if (key != NULL) - err = crypto_hash_setkey(desc->tfm, key, key_len); - else if ((*type)->cht_key != 0) - err = crypto_hash_setkey(desc->tfm, - (unsigned char *)&((*type)->cht_key), - (*type)->cht_size); - - if (err != 0) { - crypto_free_hash(desc->tfm); - return err; - } - - CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", - (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name, - (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name, - cfs_crypto_hash_speeds[alg_id]); - - return crypto_hash_init(desc); -} - -int cfs_crypto_hash_digest(unsigned char alg_id, - const void *buf, unsigned int buf_len, - unsigned char *key, unsigned int key_len, - unsigned char *hash, unsigned int *hash_len) -{ - struct scatterlist sl; - struct hash_desc hdesc; - int err; - const struct cfs_crypto_hash_type *type; - - if (buf == NULL || buf_len == 0 || hash_len == NULL) - return -EINVAL; - - err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len); - if (err != 0) - return err; - - if (hash == NULL || *hash_len < type->cht_size) { - *hash_len = type->cht_size; - crypto_free_hash(hdesc.tfm); - return -ENOSPC; - } - sg_init_one(&sl, buf, buf_len); - - hdesc.flags = 0; - err = crypto_hash_digest(&hdesc, &sl, sl.length, hash); - crypto_free_hash(hdesc.tfm); - - return err; -} -EXPORT_SYMBOL(cfs_crypto_hash_digest); - -struct cfs_crypto_hash_desc * - cfs_crypto_hash_init(unsigned char alg_id, - unsigned char *key, unsigned int key_len) -{ - - struct hash_desc *hdesc; - int err; - const struct cfs_crypto_hash_type *type; - - hdesc = kmalloc(sizeof(*hdesc), 0); - if (hdesc == NULL) - return ERR_PTR(-ENOMEM); - - err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len); - - if (err) { - kfree(hdesc); - return ERR_PTR(err); - } - return (struct cfs_crypto_hash_desc *)hdesc; -} -EXPORT_SYMBOL(cfs_crypto_hash_init); - -int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, - struct page *page, unsigned int offset, - unsigned int len) -{ - struct scatterlist sl; - - sg_init_table(&sl, 1); - sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK); - - return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length); -} -EXPORT_SYMBOL(cfs_crypto_hash_update_page); - -int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc, - const void *buf, unsigned int buf_len) -{ - struct scatterlist sl; - - sg_init_one(&sl, buf, buf_len); - - return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length); -} -EXPORT_SYMBOL(cfs_crypto_hash_update); - -/* If hash_len pointer is NULL - destroy descriptor. */ -int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, - unsigned char *hash, unsigned int *hash_len) -{ - int err; - int size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm); - - if (hash_len == NULL) { - crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - kfree(hdesc); - return 0; - } - if (hash == NULL || *hash_len < size) { - *hash_len = size; - return -ENOSPC; - } - err = crypto_hash_final((struct hash_desc *) hdesc, hash); - - if (err < 0) { - /* May be caller can fix error */ - return err; - } - crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - kfree(hdesc); - return err; -} -EXPORT_SYMBOL(cfs_crypto_hash_final); - -static void cfs_crypto_performance_test(unsigned char alg_id, - const unsigned char *buf, - unsigned int buf_len) -{ - unsigned long start, end; - int bcount, err = 0; - int sec = 1; /* do test only 1 sec */ - unsigned char hash[64]; - unsigned int hash_len = 64; - - for (start = jiffies, end = start + sec * HZ, bcount = 0; - time_before(jiffies, end); bcount++) { - err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0, - hash, &hash_len); - if (err) - break; - - } - end = jiffies; - - if (err) { - cfs_crypto_hash_speeds[alg_id] = -1; - CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n", - cfs_crypto_hash_name(alg_id), err); - } else { - unsigned long tmp; - - tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * - 1000) / (1024 * 1024); - cfs_crypto_hash_speeds[alg_id] = (int)tmp; - } - CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n", - cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]); -} - -int cfs_crypto_hash_speed(unsigned char hash_alg) -{ - if (hash_alg < CFS_HASH_ALG_MAX) - return cfs_crypto_hash_speeds[hash_alg]; - else - return -1; -} -EXPORT_SYMBOL(cfs_crypto_hash_speed); - -/** - * Do performance test for all hash algorithms. - */ -static int cfs_crypto_test_hashes(void) -{ - unsigned char i; - unsigned char *data; - unsigned int j; - /* Data block size for testing hash. Maximum - * kmalloc size for 2.6.18 kernel is 128K */ - unsigned int data_len = 1 * 128 * 1024; - - data = kmalloc(data_len, 0); - if (data == NULL) { - CERROR("Failed to allocate mem\n"); - return -ENOMEM; - } - - for (j = 0; j < data_len; j++) - data[j] = j & 0xff; - - for (i = 0; i < CFS_HASH_ALG_MAX; i++) - cfs_crypto_performance_test(i, data, data_len); - - kfree(data); - return 0; -} - -static int adler32; - -int cfs_crypto_register(void) -{ - request_module("crc32c"); - - adler32 = cfs_crypto_adler32_register(); - - /* check all algorithms and do performance test */ - cfs_crypto_test_hashes(); - return 0; -} - -void cfs_crypto_unregister(void) -{ - if (adler32 == 0) - cfs_crypto_adler32_unregister(); - - return; -} diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h deleted file mode 100644 index 18e8cd4d8..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h +++ /dev/null @@ -1,29 +0,0 @@ - /* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/** - * Functions for start/stop shash adler32 algorithm. - */ -int cfs_crypto_adler32_register(void); -void cfs_crypto_adler32_unregister(void); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c deleted file mode 100644 index 68515d913..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/linux/linux-curproc.c - * - * Lustre curproc API implementation for Linux kernel - * - * Author: Nikita Danilov - */ - -#include -#include - -#include -#include - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../../include/linux/libcfs/libcfs.h" - -/* - * Implementation of cfs_curproc API (see portals/include/libcfs/curproc.h) - * for Linux kernel. - */ - -void cfs_cap_raise(cfs_cap_t cap) -{ - struct cred *cred; - - cred = prepare_creds(); - if (cred) { - cap_raise(cred->cap_effective, cap); - commit_creds(cred); - } -} - -void cfs_cap_lower(cfs_cap_t cap) -{ - struct cred *cred; - - cred = prepare_creds(); - if (cred) { - cap_lower(cred->cap_effective, cap); - commit_creds(cred); - } -} - -int cfs_cap_raised(cfs_cap_t cap) -{ - return cap_raised(current_cap(), cap); -} - -static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap) -{ - /* XXX lost high byte */ - *cap = kcap.cap[0]; -} - -cfs_cap_t cfs_curproc_cap_pack(void) -{ - cfs_cap_t cap; - - cfs_kernel_cap_pack(current_cap(), &cap); - return cap; -} - -EXPORT_SYMBOL(cfs_cap_raise); -EXPORT_SYMBOL(cfs_cap_lower); -EXPORT_SYMBOL(cfs_cap_raised); -EXPORT_SYMBOL(cfs_curproc_cap_pack); - -/* - * Local variables: - * c-indentation-style: "K&R" - * c-basic-offset: 8 - * tab-width: 8 - * fill-column: 80 - * scroll-step: 1 - * End: - */ diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c deleted file mode 100644 index 59c7bf3cb..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/linux/linux-debug.c - * - * Author: Phil Schwan - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -# define DEBUG_SUBSYSTEM S_LNET - -#include "../../../include/linux/libcfs/libcfs.h" - -#include "../tracefile.h" - -#include - -char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall"; -char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall"; - -/** - * Upcall function once a Lustre log has been dumped. - * - * \param file path of the dumped log - */ -void libcfs_run_debug_log_upcall(char *file) -{ - char *argv[3]; - int rc; - char *envp[] = { - "HOME=/", - "PATH=/sbin:/bin:/usr/sbin:/usr/bin", - NULL}; - - argv[0] = lnet_debug_log_upcall; - - LASSERTF(file != NULL, "called on a null filename\n"); - argv[1] = file; /* only need to pass the path of the file */ - - argv[2] = NULL; - - rc = call_usermodehelper(argv[0], argv, envp, 1); - if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n", - rc, argv[0], argv[1]); - } else { - CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n", - argv[0], argv[1]); - } -} - -void libcfs_run_upcall(char **argv) -{ - int rc; - int argc; - char *envp[] = { - "HOME=/", - "PATH=/sbin:/bin:/usr/sbin:/usr/bin", - NULL}; - - argv[0] = lnet_upcall; - argc = 1; - while (argv[argc] != NULL) - argc++; - - LASSERT(argc >= 2); - - rc = call_usermodehelper(argv[0], argv, envp, 1); - if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n", - rc, argv[0], argv[1], - argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], - argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], - argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], - argc < 6 ? "" : ",..."); - } else { - CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n", - argv[0], argv[1], - argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], - argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], - argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], - argc < 6 ? "" : ",..."); - } -} - -void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata) -{ - char *argv[6]; - char buf[32]; - - snprintf(buf, sizeof(buf), "%d", msgdata->msg_line); - - argv[1] = "LBUG"; - argv[2] = (char *)msgdata->msg_file; - argv[3] = (char *)msgdata->msg_fn; - argv[4] = buf; - argv[5] = NULL; - - libcfs_run_upcall (argv); -} - -/* coverity[+kill] */ -void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) -{ - libcfs_catastrophe = 1; - libcfs_debug_msg(msgdata, "LBUG\n"); - - if (in_interrupt()) { - panic("LBUG in interrupt.\n"); - /* not reached */ - } - - dump_stack(); - if (!libcfs_panic_on_lbug) - libcfs_debug_dumplog(); - libcfs_run_lbug_upcall(msgdata); - if (libcfs_panic_on_lbug) - panic("LBUG"); - set_task_state(current, TASK_UNINTERRUPTIBLE); - while (1) - schedule(); -} - -static int panic_notifier(struct notifier_block *self, unsigned long unused1, - void *unused2) -{ - if (libcfs_panic_in_progress) - return 0; - - libcfs_panic_in_progress = 1; - mb(); - - return 0; -} - -static struct notifier_block libcfs_panic_notifier = { - .notifier_call = panic_notifier, - .next = NULL, - .priority = 10000, -}; - -void libcfs_register_panic_notifier(void) -{ - atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier); -} - -void libcfs_unregister_panic_notifier(void) -{ - atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier); -} - -EXPORT_SYMBOL(libcfs_run_lbug_upcall); -EXPORT_SYMBOL(lbug_with_loc); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c deleted file mode 100644 index 025e2f002..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - */ -/* - * This file creates a memory allocation primitive for Lustre, that - * allows to fallback to vmalloc allocations should regular kernel allocations - * fail due to size or system memory fragmentation. - * - * Author: Oleg Drokin - * - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate Technology. - */ -#include -#include - -#include "../../../include/linux/libcfs/libcfs.h" - -void *libcfs_kvzalloc(size_t size, gfp_t flags) -{ - void *ret; - - ret = kzalloc(size, flags | __GFP_NOWARN); - if (!ret) - ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); - return ret; -} -EXPORT_SYMBOL(libcfs_kvzalloc); - -void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size, - gfp_t flags) -{ - void *ret; - - ret = kzalloc_node(size, flags | __GFP_NOWARN, - cfs_cpt_spread_node(cptab, cpt)); - if (!ret) { - WARN_ON(!(flags & (__GFP_FS|__GFP_HIGH))); - ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); - } - - return ret; -} -EXPORT_SYMBOL(libcfs_kvzalloc_cpt); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c deleted file mode 100644 index 70a99cf01..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../../include/linux/libcfs/libcfs.h" - -#define LNET_MINOR 240 - -int libcfs_ioctl_getdata(char *buf, char *end, void *arg) -{ - struct libcfs_ioctl_hdr *hdr; - struct libcfs_ioctl_data *data; - int orig_len; - - hdr = (struct libcfs_ioctl_hdr *)buf; - data = (struct libcfs_ioctl_data *)buf; - - if (copy_from_user(buf, arg, sizeof(*hdr))) - return -EFAULT; - - if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) { - CERROR("PORTALS: version mismatch kernel vs application\n"); - return -EINVAL; - } - - if (hdr->ioc_len >= end - buf) { - CERROR("PORTALS: user buffer exceeds kernel buffer\n"); - return -EINVAL; - } - - if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) { - CERROR("PORTALS: user buffer too small for ioctl\n"); - return -EINVAL; - } - - orig_len = hdr->ioc_len; - if (copy_from_user(buf, arg, hdr->ioc_len)) - return -EFAULT; - if (orig_len != data->ioc_len) - return -EINVAL; - - if (libcfs_ioctl_is_invalid(data)) { - CERROR("PORTALS: ioctl not correctly formatted\n"); - return -EINVAL; - } - - if (data->ioc_inllen1) - data->ioc_inlbuf1 = &data->ioc_bulk[0]; - - if (data->ioc_inllen2) - data->ioc_inlbuf2 = &data->ioc_bulk[0] + - cfs_size_round(data->ioc_inllen1); - - return 0; -} - -int libcfs_ioctl_popdata(void *arg, void *data, int size) -{ - if (copy_to_user((char *)arg, data, size)) - return -EFAULT; - return 0; -} - -static int -libcfs_psdev_open(struct inode *inode, struct file *file) -{ - struct libcfs_device_userstate **pdu = NULL; - int rc = 0; - - if (!inode) - return -EINVAL; - pdu = (struct libcfs_device_userstate **)&file->private_data; - if (libcfs_psdev_ops.p_open != NULL) - rc = libcfs_psdev_ops.p_open(0, (void *)pdu); - else - return -EPERM; - return rc; -} - -/* called when closing /dev/device */ -static int -libcfs_psdev_release(struct inode *inode, struct file *file) -{ - struct libcfs_device_userstate *pdu; - int rc = 0; - - if (!inode) - return -EINVAL; - pdu = file->private_data; - if (libcfs_psdev_ops.p_close != NULL) - rc = libcfs_psdev_ops.p_close(0, (void *)pdu); - else - rc = -EPERM; - return rc; -} - -static long libcfs_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct cfs_psdev_file pfile; - int rc = 0; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE || - _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR || - _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) { - CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n", - _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd)); - return -EINVAL; - } - - /* Handle platform-dependent IOC requests */ - switch (cmd) { - case IOC_LIBCFS_PANIC: - if (!capable(CFS_CAP_SYS_BOOT)) - return -EPERM; - panic("debugctl-invoked panic"); - return 0; - case IOC_LIBCFS_MEMHOG: - if (!capable(CFS_CAP_SYS_ADMIN)) - return -EPERM; - /* go thought */ - } - - pfile.off = 0; - pfile.private_data = file->private_data; - if (libcfs_psdev_ops.p_ioctl != NULL) - rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg); - else - rc = -EPERM; - return rc; -} - -static const struct file_operations libcfs_fops = { - .unlocked_ioctl = libcfs_ioctl, - .open = libcfs_psdev_open, - .release = libcfs_psdev_release, -}; - -struct miscdevice libcfs_dev = { - .minor = LNET_MINOR, - .name = "lnet", - .fops = &libcfs_fops, -}; diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c deleted file mode 100644 index 890844602..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#include -#include -#include -#include - -#include "../../../include/linux/libcfs/libcfs.h" - -#if defined(CONFIG_KGDB) -#include -#endif - -/** - * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively - * waiting threads, which is not always desirable because all threads will - * be waken up again and again, even user only needs a few of them to be - * active most time. This is not good for performance because cache can - * be polluted by different threads. - * - * LIFO list can resolve this problem because we always wakeup the most - * recent active thread by default. - * - * NB: please don't call non-exclusive & exclusive wait on the same - * waitq if add_wait_queue_exclusive_head is used. - */ -void -add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link) -{ - unsigned long flags; - - spin_lock_irqsave(&waitq->lock, flags); - __add_wait_queue_exclusive(waitq, link); - spin_unlock_irqrestore(&waitq->lock, flags); -} -EXPORT_SYMBOL(add_wait_queue_exclusive_head); - -sigset_t -cfs_block_allsigs(void) -{ - unsigned long flags; - sigset_t old; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - old = current->blocked; - sigfillset(¤t->blocked); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - - return old; -} -EXPORT_SYMBOL(cfs_block_allsigs); - -sigset_t cfs_block_sigs(unsigned long sigs) -{ - unsigned long flags; - sigset_t old; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - old = current->blocked; - sigaddsetmask(¤t->blocked, sigs); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - return old; -} -EXPORT_SYMBOL(cfs_block_sigs); - -/* Block all signals except for the @sigs */ -sigset_t cfs_block_sigsinv(unsigned long sigs) -{ - unsigned long flags; - sigset_t old; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - old = current->blocked; - sigaddsetmask(¤t->blocked, ~sigs); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - - return old; -} -EXPORT_SYMBOL(cfs_block_sigsinv); - -void -cfs_restore_sigs(sigset_t old) -{ - unsigned long flags; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - current->blocked = old; - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); -} -EXPORT_SYMBOL(cfs_restore_sigs); - -int -cfs_signal_pending(void) -{ - return signal_pending(current); -} -EXPORT_SYMBOL(cfs_signal_pending); - -void -cfs_clear_sigpending(void) -{ - unsigned long flags; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - clear_tsk_thread_flag(current, TIF_SIGPENDING); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); -} -EXPORT_SYMBOL(cfs_clear_sigpending); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c deleted file mode 100644 index 64a136cd5..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE - -#include "../../../include/linux/libcfs/libcfs.h" -#include "../tracefile.h" - -/* percents to share the total debug memory for each type */ -static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = { - 80, /* 80% pages for CFS_TCD_TYPE_PROC */ - 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */ - 10 /* 10% pages for CFS_TCD_TYPE_IRQ */ -}; - -char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; - -static DECLARE_RWSEM(cfs_tracefile_sem); - -int cfs_tracefile_init_arch(void) -{ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - - /* initialize trace_data */ - memset(cfs_trace_data, 0, sizeof(cfs_trace_data)); - for (i = 0; i < CFS_TCD_TYPE_MAX; i++) { - cfs_trace_data[i] = - kmalloc(sizeof(union cfs_trace_data_union) * - num_possible_cpus(), GFP_KERNEL); - if (cfs_trace_data[i] == NULL) - goto out; - - } - - /* arch related info initialized */ - cfs_tcd_for_each(tcd, i, j) { - spin_lock_init(&tcd->tcd_lock); - tcd->tcd_pages_factor = pages_factor[i]; - tcd->tcd_type = i; - tcd->tcd_cpu = j; - } - - for (i = 0; i < num_possible_cpus(); i++) - for (j = 0; j < 3; j++) { - cfs_trace_console_buffers[i][j] = - kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE, - GFP_KERNEL); - - if (cfs_trace_console_buffers[i][j] == NULL) - goto out; - } - - return 0; - -out: - cfs_tracefile_fini_arch(); - printk(KERN_ERR "lnet: Not enough memory\n"); - return -ENOMEM; -} - -void cfs_tracefile_fini_arch(void) -{ - int i; - int j; - - for (i = 0; i < num_possible_cpus(); i++) - for (j = 0; j < 3; j++) { - kfree(cfs_trace_console_buffers[i][j]); - cfs_trace_console_buffers[i][j] = NULL; - } - - for (i = 0; cfs_trace_data[i] != NULL; i++) { - kfree(cfs_trace_data[i]); - cfs_trace_data[i] = NULL; - } -} - -void cfs_tracefile_read_lock(void) -{ - down_read(&cfs_tracefile_sem); -} - -void cfs_tracefile_read_unlock(void) -{ - up_read(&cfs_tracefile_sem); -} - -void cfs_tracefile_write_lock(void) -{ - down_write(&cfs_tracefile_sem); -} - -void cfs_tracefile_write_unlock(void) -{ - up_write(&cfs_tracefile_sem); -} - -cfs_trace_buf_type_t cfs_trace_buf_idx_get(void) -{ - if (in_irq()) - return CFS_TCD_TYPE_IRQ; - else if (in_softirq()) - return CFS_TCD_TYPE_SOFTIRQ; - else - return CFS_TCD_TYPE_PROC; -} - -/* - * The walking argument indicates the locking comes from all tcd types - * iterator and we must lock it and dissable local irqs to avoid deadlocks - * with other interrupt locks that might be happening. See LU-1311 - * for details. - */ -int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking) - __acquires(&tcd->tc_lock) -{ - __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); - if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) - spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags); - else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) - spin_lock_bh(&tcd->tcd_lock); - else if (unlikely(walking)) - spin_lock_irq(&tcd->tcd_lock); - else - spin_lock(&tcd->tcd_lock); - return 1; -} - -void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking) - __releases(&tcd->tcd_lock) -{ - __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); - if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) - spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags); - else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) - spin_unlock_bh(&tcd->tcd_lock); - else if (unlikely(walking)) - spin_unlock_irq(&tcd->tcd_lock); - else - spin_unlock(&tcd->tcd_lock); -} - -int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd, - struct cfs_trace_page *tage) -{ - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - return tcd->tcd_cpu == tage->cpu; -} - -void -cfs_set_ptldebug_header(struct ptldebug_header *header, - struct libcfs_debug_msg_data *msgdata, - unsigned long stack) -{ - struct timespec64 ts; - - ktime_get_real_ts64(&ts); - - header->ph_subsys = msgdata->msg_subsys; - header->ph_mask = msgdata->msg_mask; - header->ph_cpu_id = smp_processor_id(); - header->ph_type = cfs_trace_buf_idx_get(); - /* y2038 safe since all user space treats this as unsigned, but - * will overflow in 2106 */ - header->ph_sec = (u32)ts.tv_sec; - header->ph_usec = ts.tv_nsec / NSEC_PER_USEC; - header->ph_stack = stack; - header->ph_pid = current->pid; - header->ph_line_num = msgdata->msg_line; - header->ph_extern_pid = 0; - return; -} - -static char * -dbghdr_to_err_string(struct ptldebug_header *hdr) -{ - switch (hdr->ph_subsys) { - case S_LND: - case S_LNET: - return "LNetError"; - default: - return "LustreError"; - } -} - -static char * -dbghdr_to_info_string(struct ptldebug_header *hdr) -{ - switch (hdr->ph_subsys) { - case S_LND: - case S_LNET: - return "LNet"; - default: - return "Lustre"; - } -} - -void cfs_print_to_console(struct ptldebug_header *hdr, int mask, - const char *buf, int len, const char *file, - const char *fn) -{ - char *prefix = "Lustre", *ptype = NULL; - - if ((mask & D_EMERG) != 0) { - prefix = dbghdr_to_err_string(hdr); - ptype = KERN_EMERG; - } else if ((mask & D_ERROR) != 0) { - prefix = dbghdr_to_err_string(hdr); - ptype = KERN_ERR; - } else if ((mask & D_WARNING) != 0) { - prefix = dbghdr_to_info_string(hdr); - ptype = KERN_WARNING; - } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) { - prefix = dbghdr_to_info_string(hdr); - ptype = KERN_INFO; - } - - if ((mask & D_CONSOLE) != 0) { - printk("%s%s: %.*s", ptype, prefix, len, buf); - } else { - printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, - hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num, - fn, len, buf); - } - return; -} - -int cfs_trace_max_debug_mb(void) -{ - int total_mb = (totalram_pages >> (20 - PAGE_SHIFT)); - - return max(512, (total_mb * 80)/100); -} diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c deleted file mode 100644 index 329d78ce2..000000000 --- a/drivers/staging/lustre/lustre/libcfs/module.c +++ /dev/null @@ -1,765 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include -#include - -# define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" -#include - -#include "../../include/linux/libcfs/libcfs_crypto.h" -#include "../../include/linux/lnet/lib-lnet.h" -#include "../../include/linux/lnet/lnet.h" -#include "tracefile.h" - -MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Portals v3.1"); -MODULE_LICENSE("GPL"); - -static struct dentry *lnet_debugfs_root; - -static void kportal_memhog_free(struct libcfs_device_userstate *ldu) -{ - struct page **level0p = &ldu->ldu_memhog_root_page; - struct page **level1p; - struct page **level2p; - int count1; - int count2; - - if (*level0p != NULL) { - - level1p = (struct page **)page_address(*level0p); - count1 = 0; - - while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) && - *level1p != NULL) { - - level2p = (struct page **)page_address(*level1p); - count2 = 0; - - while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) && - *level2p != NULL) { - - __free_page(*level2p); - ldu->ldu_memhog_pages--; - level2p++; - count2++; - } - - __free_page(*level1p); - ldu->ldu_memhog_pages--; - level1p++; - count1++; - } - - __free_page(*level0p); - ldu->ldu_memhog_pages--; - - *level0p = NULL; - } - - LASSERT(ldu->ldu_memhog_pages == 0); -} - -static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages, - gfp_t flags) -{ - struct page **level0p; - struct page **level1p; - struct page **level2p; - int count1; - int count2; - - LASSERT(ldu->ldu_memhog_pages == 0); - LASSERT(ldu->ldu_memhog_root_page == NULL); - - if (npages < 0) - return -EINVAL; - - if (npages == 0) - return 0; - - level0p = &ldu->ldu_memhog_root_page; - *level0p = alloc_page(flags); - if (*level0p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level1p = (struct page **)page_address(*level0p); - count1 = 0; - memset(level1p, 0, PAGE_CACHE_SIZE); - - while (ldu->ldu_memhog_pages < npages && - count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - - if (cfs_signal_pending()) - return -EINTR; - - *level1p = alloc_page(flags); - if (*level1p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level2p = (struct page **)page_address(*level1p); - count2 = 0; - memset(level2p, 0, PAGE_CACHE_SIZE); - - while (ldu->ldu_memhog_pages < npages && - count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - - if (cfs_signal_pending()) - return -EINTR; - - *level2p = alloc_page(flags); - if (*level2p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level2p++; - count2++; - } - - level1p++; - count1++; - } - - return 0; -} - -/* called when opening /dev/device */ -static int libcfs_psdev_open(unsigned long flags, void *args) -{ - struct libcfs_device_userstate *ldu; - - try_module_get(THIS_MODULE); - - LIBCFS_ALLOC(ldu, sizeof(*ldu)); - if (ldu != NULL) { - ldu->ldu_memhog_pages = 0; - ldu->ldu_memhog_root_page = NULL; - } - *(struct libcfs_device_userstate **)args = ldu; - - return 0; -} - -/* called when closing /dev/device */ -static int libcfs_psdev_release(unsigned long flags, void *args) -{ - struct libcfs_device_userstate *ldu; - - ldu = (struct libcfs_device_userstate *)args; - if (ldu != NULL) { - kportal_memhog_free(ldu); - LIBCFS_FREE(ldu, sizeof(*ldu)); - } - - module_put(THIS_MODULE); - return 0; -} - -static DECLARE_RWSEM(ioctl_list_sem); -static LIST_HEAD(ioctl_list); - -int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand) -{ - int rc = 0; - - down_write(&ioctl_list_sem); - if (!list_empty(&hand->item)) - rc = -EBUSY; - else - list_add_tail(&hand->item, &ioctl_list); - up_write(&ioctl_list_sem); - - return rc; -} -EXPORT_SYMBOL(libcfs_register_ioctl); - -int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand) -{ - int rc = 0; - - down_write(&ioctl_list_sem); - if (list_empty(&hand->item)) - rc = -ENOENT; - else - list_del_init(&hand->item); - up_write(&ioctl_list_sem); - - return rc; -} -EXPORT_SYMBOL(libcfs_deregister_ioctl); - -static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, - void *arg, struct libcfs_ioctl_data *data) -{ - int err = -EINVAL; - - switch (cmd) { - case IOC_LIBCFS_CLEAR_DEBUG: - libcfs_debug_clear_buffer(); - return 0; - /* - * case IOC_LIBCFS_PANIC: - * Handled in arch/cfs_module.c - */ - case IOC_LIBCFS_MARK_DEBUG: - if (data->ioc_inlbuf1 == NULL || - data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') - return -EINVAL; - libcfs_debug_mark_buffer(data->ioc_inlbuf1); - return 0; - case IOC_LIBCFS_MEMHOG: - if (pfile->private_data == NULL) { - err = -EINVAL; - } else { - kportal_memhog_free(pfile->private_data); - /* XXX The ioc_flags is not GFP flags now, need to be fixed */ - err = kportal_memhog_alloc(pfile->private_data, - data->ioc_count, - data->ioc_flags); - if (err != 0) - kportal_memhog_free(pfile->private_data); - } - break; - - default: { - struct libcfs_ioctl_handler *hand; - - err = -EINVAL; - down_read(&ioctl_list_sem); - list_for_each_entry(hand, &ioctl_list, item) { - err = hand->handle_ioctl(cmd, data); - if (err != -EINVAL) { - if (err == 0) - err = libcfs_ioctl_popdata(arg, - data, sizeof(*data)); - break; - } - } - up_read(&ioctl_list_sem); - break; - } - } - - return err; -} - -static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg) -{ - char *buf; - struct libcfs_ioctl_data *data; - int err = 0; - - LIBCFS_ALLOC_GFP(buf, 1024, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - /* 'cmd' and permissions get checked in our arch-specific caller */ - if (libcfs_ioctl_getdata(buf, buf + 800, arg)) { - CERROR("PORTALS ioctl: data error\n"); - err = -EINVAL; - goto out; - } - data = (struct libcfs_ioctl_data *)buf; - - err = libcfs_ioctl_int(pfile, cmd, arg, data); - -out: - LIBCFS_FREE(buf, 1024); - return err; -} - -struct cfs_psdev_ops libcfs_psdev_ops = { - libcfs_psdev_open, - libcfs_psdev_release, - NULL, - NULL, - libcfs_ioctl -}; - -static int proc_call_handler(void *data, int write, loff_t *ppos, - void __user *buffer, size_t *lenp, - int (*handler)(void *data, int write, - loff_t pos, void __user *buffer, int len)) -{ - int rc = handler(data, write, *ppos, buffer, *lenp); - - if (rc < 0) - return rc; - - if (write) { - *ppos += *lenp; - } else { - *lenp = rc; - *ppos += rc; - } - return 0; -} - -static int __proc_dobitmasks(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - const int tmpstrlen = 512; - char *tmpstr; - int rc; - unsigned int *mask = data; - int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0; - int is_printk = (mask == &libcfs_printk) ? 1 : 0; - - rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen); - if (rc < 0) - return rc; - - if (!write) { - libcfs_debug_mask2str(tmpstr, tmpstrlen, *mask, is_subsys); - rc = strlen(tmpstr); - - if (pos >= rc) { - rc = 0; - } else { - rc = cfs_trace_copyout_string(buffer, nob, - tmpstr + pos, "\n"); - } - } else { - rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob); - if (rc < 0) { - kfree(tmpstr); - return rc; - } - - rc = libcfs_debug_str2mask(mask, tmpstr, is_subsys); - /* Always print LBUG/LASSERT to console, so keep this mask */ - if (is_printk) - *mask |= D_EMERG; - } - - kfree(tmpstr); - return rc; -} - -static int proc_dobitmasks(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dobitmasks); -} - -static int __proc_dump_kernel(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - if (!write) - return 0; - - return cfs_trace_dump_debug_buffer_usrstr(buffer, nob); -} - -static int proc_dump_kernel(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dump_kernel); -} - -static int __proc_daemon_file(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - if (!write) { - int len = strlen(cfs_tracefile); - - if (pos >= len) - return 0; - - return cfs_trace_copyout_string(buffer, nob, - cfs_tracefile + pos, "\n"); - } - - return cfs_trace_daemon_command_usrstr(buffer, nob); -} - -static int proc_daemon_file(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_daemon_file); -} - -static int libcfs_force_lbug(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - if (write) - LBUG(); - return 0; -} - -static int proc_fail_loc(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int rc; - long old_fail_loc = cfs_fail_loc; - - rc = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); - if (old_fail_loc != cfs_fail_loc) - wake_up(&cfs_race_waitq); - return rc; -} - -static int __proc_cpt_table(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - char *buf = NULL; - int len = 4096; - int rc = 0; - - if (write) - return -EPERM; - - LASSERT(cfs_cpt_table != NULL); - - while (1) { - LIBCFS_ALLOC(buf, len); - if (buf == NULL) - return -ENOMEM; - - rc = cfs_cpt_table_print(cfs_cpt_table, buf, len); - if (rc >= 0) - break; - - if (rc == -EFBIG) { - LIBCFS_FREE(buf, len); - len <<= 1; - continue; - } - goto out; - } - - if (pos >= rc) { - rc = 0; - goto out; - } - - rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL); - out: - if (buf != NULL) - LIBCFS_FREE(buf, len); - return rc; -} - -static int proc_cpt_table(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_cpt_table); -} - -static struct ctl_table lnet_table[] = { - /* - * NB No .strategy entries have been provided since sysctl(8) prefers - * to go via /proc for portability. - */ - { - .procname = "debug", - .data = &libcfs_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "subsystem_debug", - .data = &libcfs_subsystem_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "printk", - .data = &libcfs_printk, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "cpu_partition_table", - .maxlen = 128, - .mode = 0444, - .proc_handler = &proc_cpt_table, - }, - - { - .procname = "upcall", - .data = lnet_upcall, - .maxlen = sizeof(lnet_upcall), - .mode = 0644, - .proc_handler = &proc_dostring, - }, - { - .procname = "debug_log_upcall", - .data = lnet_debug_log_upcall, - .maxlen = sizeof(lnet_debug_log_upcall), - .mode = 0644, - .proc_handler = &proc_dostring, - }, - { - .procname = "catastrophe", - .data = &libcfs_catastrophe, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .procname = "dump_kernel", - .maxlen = 256, - .mode = 0200, - .proc_handler = &proc_dump_kernel, - }, - { - .procname = "daemon_file", - .mode = 0644, - .maxlen = 256, - .proc_handler = &proc_daemon_file, - }, - { - .procname = "force_lbug", - .data = NULL, - .maxlen = 0, - .mode = 0200, - .proc_handler = &libcfs_force_lbug - }, - { - .procname = "fail_loc", - .data = &cfs_fail_loc, - .maxlen = sizeof(cfs_fail_loc), - .mode = 0644, - .proc_handler = &proc_fail_loc - }, - { - .procname = "fail_val", - .data = &cfs_fail_val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - } -}; - -static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = { - { "console_ratelimit", - "/sys/module/libcfs/parameters/libcfs_console_ratelimit"}, - { "debug_path", - "/sys/module/libcfs/parameters/libcfs_debug_file_path"}, - { "panic_on_lbug", - "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"}, - { "libcfs_console_backoff", - "/sys/module/libcfs/parameters/libcfs_console_backoff"}, - { "debug_mb", - "/sys/module/libcfs/parameters/libcfs_debug_mb"}, - { "console_min_delay_centisecs", - "/sys/module/libcfs/parameters/libcfs_console_min_delay"}, - { "console_max_delay_centisecs", - "/sys/module/libcfs/parameters/libcfs_console_max_delay"}, - {}, -}; - -static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf, - size_t count, loff_t *ppos) -{ - struct ctl_table *table = filp->private_data; - int error; - - error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos); - if (!error) - error = count; - - return error; -} - -static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct ctl_table *table = filp->private_data; - int error; - - error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos); - if (!error) - error = count; - - return error; -} - -static const struct file_operations lnet_debugfs_file_operations = { - .open = simple_open, - .read = lnet_debugfs_read, - .write = lnet_debugfs_write, - .llseek = default_llseek, -}; - -void lustre_insert_debugfs(struct ctl_table *table, - const struct lnet_debugfs_symlink_def *symlinks) -{ - struct dentry *entry; - - if (lnet_debugfs_root == NULL) - lnet_debugfs_root = debugfs_create_dir("lnet", NULL); - - /* Even if we cannot create, just ignore it altogether) */ - if (IS_ERR_OR_NULL(lnet_debugfs_root)) - return; - - for (; table->procname; table++) - entry = debugfs_create_file(table->procname, table->mode, - lnet_debugfs_root, table, - &lnet_debugfs_file_operations); - - for (; symlinks && symlinks->name; symlinks++) - entry = debugfs_create_symlink(symlinks->name, - lnet_debugfs_root, - symlinks->target); - -} -EXPORT_SYMBOL_GPL(lustre_insert_debugfs); - -static void lustre_remove_debugfs(void) -{ - if (lnet_debugfs_root != NULL) - debugfs_remove_recursive(lnet_debugfs_root); - - lnet_debugfs_root = NULL; -} - -static int init_libcfs_module(void) -{ - int rc; - - rc = libcfs_debug_init(5 * 1024 * 1024); - if (rc < 0) { - pr_err("LustreError: libcfs_debug_init: %d\n", rc); - return rc; - } - - rc = cfs_cpu_init(); - if (rc != 0) - goto cleanup_debug; - - rc = misc_register(&libcfs_dev); - if (rc) { - CERROR("misc_register: error %d\n", rc); - goto cleanup_cpu; - } - - rc = cfs_wi_startup(); - if (rc) { - CERROR("initialize workitem: error %d\n", rc); - goto cleanup_deregister; - } - - /* max to 4 threads, should be enough for rehash */ - rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4); - rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY, - rc, &cfs_sched_rehash); - if (rc != 0) { - CERROR("Startup workitem scheduler: error: %d\n", rc); - goto cleanup_deregister; - } - - rc = cfs_crypto_register(); - if (rc) { - CERROR("cfs_crypto_register: error %d\n", rc); - goto cleanup_wi; - } - - lustre_insert_debugfs(lnet_table, lnet_debugfs_symlinks); - - CDEBUG(D_OTHER, "portals setup OK\n"); - return 0; - cleanup_wi: - cfs_wi_shutdown(); - cleanup_deregister: - misc_deregister(&libcfs_dev); -cleanup_cpu: - cfs_cpu_fini(); - cleanup_debug: - libcfs_debug_cleanup(); - return rc; -} - -static void exit_libcfs_module(void) -{ - int rc; - - lustre_remove_debugfs(); - - if (cfs_sched_rehash) { - cfs_wi_sched_destroy(cfs_sched_rehash); - cfs_sched_rehash = NULL; - } - - cfs_crypto_unregister(); - cfs_wi_shutdown(); - - misc_deregister(&libcfs_dev); - - cfs_cpu_fini(); - - rc = libcfs_debug_cleanup(); - if (rc) - pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc); -} - -MODULE_VERSION("1.0.0"); - -module_init(init_libcfs_module); -module_exit(exit_libcfs_module); diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lustre/libcfs/prng.c deleted file mode 100644 index 4147664ff..000000000 --- a/drivers/staging/lustre/lustre/libcfs/prng.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/prng.c - * - * concatenation of following two 16-bit multiply with carry generators - * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16, - * number and carry packed within the same 32 bit integer. - * algorithm recommended by Marsaglia -*/ - -#include "../../include/linux/libcfs/libcfs.h" - -/* -From: George Marsaglia -Newsgroups: sci.math -Subject: Re: A RANDOM NUMBER GENERATOR FOR C -Date: Tue, 30 Sep 1997 05:29:35 -0700 - - * You may replace the two constants 36969 and 18000 by any - * pair of distinct constants from this list: - * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584 - * 19599 19950 20088 20508 20544 20664 20814 20970 21153 21243 - * 21423 21723 21954 22125 22188 22293 22860 22938 22965 22974 - * 23109 23124 23163 23208 23508 23520 23553 23658 23865 24114 - * 24219 24660 24699 24864 24948 25023 25308 25443 26004 26088 - * 26154 26550 26679 26838 27183 27258 27753 27795 27810 27834 - * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013 - * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083 - * (or any other 16-bit constants k for which both k*2^16-1 - * and k*2^15-1 are prime) */ - -#define RANDOM_CONST_A 18030 -#define RANDOM_CONST_B 29013 - -static unsigned int seed_x = 521288629; -static unsigned int seed_y = 362436069; - -/** - * cfs_rand - creates new seeds - * - * First it creates new seeds from the previous seeds. Then it generates a - * new pseudo random number for use. - * - * Returns a pseudo-random 32-bit integer - */ -unsigned int cfs_rand(void) -{ - seed_x = RANDOM_CONST_A * (seed_x & 65535) + (seed_x >> 16); - seed_y = RANDOM_CONST_B * (seed_y & 65535) + (seed_y >> 16); - - return ((seed_x << 16) + (seed_y & 65535)); -} -EXPORT_SYMBOL(cfs_rand); - -/** - * cfs_srand - sets the initial seed - * @seed1 : (seed_x) should have the most entropy in the low bits of the word - * @seed2 : (seed_y) should have the most entropy in the high bits of the word - * - * Replaces the original seeds with new values. Used to generate a new pseudo - * random numbers. - */ -void cfs_srand(unsigned int seed1, unsigned int seed2) -{ - if (seed1) - seed_x = seed1; /* use default seeds if parameter is 0 */ - if (seed2) - seed_y = seed2; -} -EXPORT_SYMBOL(cfs_srand); - -/** - * cfs_get_random_bytes - generate a bunch of random numbers - * @buf : buffer to fill with random numbers - * @size: size of passed in buffer - * - * Fills a buffer with random bytes - */ -void cfs_get_random_bytes(void *buf, int size) -{ - int *p = buf; - int rem, tmp; - - LASSERT(size >= 0); - - rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size); - if (rem) { - get_random_bytes(&tmp, sizeof(tmp)); - tmp ^= cfs_rand(); - memcpy(buf, &tmp, rem); - p = buf + rem; - size -= rem; - } - - while (size >= sizeof(int)) { - get_random_bytes(&tmp, sizeof(tmp)); - *p = cfs_rand() ^ tmp; - size -= sizeof(int); - p++; - } - buf = p; - if (size) { - get_random_bytes(&tmp, sizeof(tmp)); - tmp ^= cfs_rand(); - memcpy(buf, &tmp, size); - } -} -EXPORT_SYMBOL(cfs_get_random_bytes); diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c deleted file mode 100644 index 65c4f1ab0..000000000 --- a/drivers/staging/lustre/lustre/libcfs/tracefile.c +++ /dev/null @@ -1,1165 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/tracefile.c - * - * Author: Zach Brown - * Author: Phil Schwan - */ - -#define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE -#include "tracefile.h" - -#include "../../include/linux/libcfs/libcfs.h" - -/* XXX move things up to the top, comment */ -union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned; - -char cfs_tracefile[TRACEFILE_NAME_SIZE]; -long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; -static struct tracefiled_ctl trace_tctl; -static DEFINE_MUTEX(cfs_trace_thread_mutex); -static int thread_running; - -static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); - -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd); - -static inline struct cfs_trace_page * -cfs_tage_from_list(struct list_head *list) -{ - return list_entry(list, struct cfs_trace_page, linkage); -} - -static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) -{ - struct page *page; - struct cfs_trace_page *tage; - - /* My caller is trying to free memory */ - if (!in_interrupt() && memory_pressure_get()) - return NULL; - - /* - * Don't spam console with allocation failures: they will be reported - * by upper layer anyway. - */ - gfp |= __GFP_NOWARN; - page = alloc_page(gfp); - if (page == NULL) - return NULL; - - tage = kmalloc(sizeof(*tage), gfp); - if (tage == NULL) { - __free_page(page); - return NULL; - } - - tage->page = page; - atomic_inc(&cfs_tage_allocated); - return tage; -} - -static void cfs_tage_free(struct cfs_trace_page *tage) -{ - __LASSERT(tage != NULL); - __LASSERT(tage->page != NULL); - - __free_page(tage->page); - kfree(tage); - atomic_dec(&cfs_tage_allocated); -} - -static void cfs_tage_to_tail(struct cfs_trace_page *tage, - struct list_head *queue) -{ - __LASSERT(tage != NULL); - __LASSERT(queue != NULL); - - list_move_tail(&tage->linkage, queue); -} - -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, - struct list_head *stock) -{ - int i; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) { - struct cfs_trace_page *tage; - - tage = cfs_tage_alloc(gfp); - if (tage == NULL) - break; - list_add_tail(&tage->linkage, stock); - } - return i; -} - -/* return a page that has 'len' bytes left at the end */ -static struct cfs_trace_page * -cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) -{ - struct cfs_trace_page *tage; - - if (tcd->tcd_cur_pages > 0) { - __LASSERT(!list_empty(&tcd->tcd_pages)); - tage = cfs_tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= PAGE_CACHE_SIZE) - return tage; - } - - if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { - if (tcd->tcd_cur_stock_pages > 0) { - tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); - --tcd->tcd_cur_stock_pages; - list_del_init(&tage->linkage); - } else { - tage = cfs_tage_alloc(GFP_ATOMIC); - if (unlikely(tage == NULL)) { - if ((!memory_pressure_get() || - in_interrupt()) && printk_ratelimit()) - printk(KERN_WARNING - "cannot allocate a tage (%ld)\n", - tcd->tcd_cur_pages); - return NULL; - } - } - - tage->used = 0; - tage->cpu = smp_processor_id(); - tage->type = tcd->tcd_type; - list_add_tail(&tage->linkage, &tcd->tcd_pages); - tcd->tcd_cur_pages++; - - if (tcd->tcd_cur_pages > 8 && thread_running) { - struct tracefiled_ctl *tctl = &trace_tctl; - /* - * wake up tracefiled to process some pages. - */ - wake_up(&tctl->tctl_waitq); - } - return tage; - } - return NULL; -} - -static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) -{ - int pgcount = tcd->tcd_cur_pages / 10; - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - if (printk_ratelimit()) - printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n", - pgcount + 1, tcd->tcd_cur_pages); - - INIT_LIST_HEAD(&pc.pc_pages); - - list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { - if (pgcount-- == 0) - break; - - list_move_tail(&tage->linkage, &pc.pc_pages); - tcd->tcd_cur_pages--; - } - put_pages_on_tcd_daemon_list(&pc, tcd); -} - -/* return a page that has 'len' bytes left at the end */ -static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, - unsigned long len) -{ - struct cfs_trace_page *tage; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - if (len > PAGE_CACHE_SIZE) { - pr_err("cowardly refusing to write %lu bytes in a page\n", len); - return NULL; - } - - tage = cfs_trace_get_tage_try(tcd, len); - if (tage != NULL) - return tage; - if (thread_running) - cfs_tcd_shrink(tcd); - if (tcd->tcd_cur_pages > 0) { - tage = cfs_tage_from_list(tcd->tcd_pages.next); - tage->used = 0; - cfs_tage_to_tail(tage, &tcd->tcd_pages); - } - return tage; -} - -int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, - const char *format, ...) -{ - va_list args; - int rc; - - va_start(args, format); - rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); - va_end(args); - - return rc; -} -EXPORT_SYMBOL(libcfs_debug_msg); - -int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, - const char *format1, va_list args, - const char *format2, ...) -{ - struct cfs_trace_cpu_data *tcd = NULL; - struct ptldebug_header header = {0}; - struct cfs_trace_page *tage; - /* string_buf is used only if tcd != NULL, and is always set then */ - char *string_buf = NULL; - char *debug_buf; - int known_size; - int needed = 85; /* average message length */ - int max_nob; - va_list ap; - int depth; - int i; - int remain; - int mask = msgdata->msg_mask; - const char *file = kbasename(msgdata->msg_file); - struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; - - tcd = cfs_trace_get_tcd(); - - /* cfs_trace_get_tcd() grabs a lock, which disables preemption and - * pins us to a particular CPU. This avoids an smp_processor_id() - * warning on Linux when debugging is enabled. */ - cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); - - if (tcd == NULL) /* arch may not log in IRQ context */ - goto console; - - if (tcd->tcd_cur_pages == 0) - header.ph_flags |= PH_FLAG_FIRST_RECORD; - - if (tcd->tcd_shutting_down) { - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - depth = __current_nesting_level(); - known_size = strlen(file) + 1 + depth; - if (msgdata->msg_fn) - known_size += strlen(msgdata->msg_fn) + 1; - - if (libcfs_debug_binary) - known_size += sizeof(header); - - /*/ - * '2' used because vsnprintf return real size required for output - * _without_ terminating NULL. - * if needed is to small for this format. - */ - for (i = 0; i < 2; i++) { - tage = cfs_trace_get_tage(tcd, needed + known_size + 1); - if (tage == NULL) { - if (needed + known_size > PAGE_CACHE_SIZE) - mask |= D_ERROR; - - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - string_buf = (char *)page_address(tage->page) + - tage->used + known_size; - - max_nob = PAGE_CACHE_SIZE - tage->used - known_size; - if (max_nob <= 0) { - printk(KERN_EMERG "negative max_nob: %d\n", - max_nob); - mask |= D_ERROR; - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - needed = 0; - if (format1) { - va_copy(ap, args); - needed = vsnprintf(string_buf, max_nob, format1, ap); - va_end(ap); - } - - if (format2) { - remain = max_nob - needed; - if (remain < 0) - remain = 0; - - va_start(ap, format2); - needed += vsnprintf(string_buf + needed, remain, - format2, ap); - va_end(ap); - } - - if (needed < max_nob) /* well. printing ok.. */ - break; - } - - if (*(string_buf+needed-1) != '\n') - printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n", - file, msgdata->msg_line, msgdata->msg_fn); - - header.ph_len = known_size + needed; - debug_buf = (char *)page_address(tage->page) + tage->used; - - if (libcfs_debug_binary) { - memcpy(debug_buf, &header, sizeof(header)); - tage->used += sizeof(header); - debug_buf += sizeof(header); - } - - /* indent message according to the nesting level */ - while (depth-- > 0) { - *(debug_buf++) = '.'; - ++tage->used; - } - - strcpy(debug_buf, file); - tage->used += strlen(file) + 1; - debug_buf += strlen(file) + 1; - - if (msgdata->msg_fn) { - strcpy(debug_buf, msgdata->msg_fn); - tage->used += strlen(msgdata->msg_fn) + 1; - debug_buf += strlen(msgdata->msg_fn) + 1; - } - - __LASSERT(debug_buf == string_buf); - - tage->used += needed; - __LASSERT (tage->used <= PAGE_CACHE_SIZE); - -console: - if ((mask & libcfs_printk) == 0) { - /* no console output requested */ - if (tcd != NULL) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (cdls != NULL) { - if (libcfs_console_ratelimit && - cdls->cdls_next != 0 && /* not first time ever */ - !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { - /* skipping a console message */ - cdls->cdls_count++; - if (tcd != NULL) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (cfs_time_after(cfs_time_current(), cdls->cdls_next + - libcfs_console_max_delay - + cfs_time_seconds(10))) { - /* last timeout was a long time ago */ - cdls->cdls_delay /= libcfs_console_backoff * 4; - } else { - cdls->cdls_delay *= libcfs_console_backoff; - } - - if (cdls->cdls_delay < libcfs_console_min_delay) - cdls->cdls_delay = libcfs_console_min_delay; - else if (cdls->cdls_delay > libcfs_console_max_delay) - cdls->cdls_delay = libcfs_console_max_delay; - - /* ensure cdls_next is never zero after it's been seen */ - cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; - } - - if (tcd != NULL) { - cfs_print_to_console(&header, mask, string_buf, needed, file, - msgdata->msg_fn); - cfs_trace_put_tcd(tcd); - } else { - string_buf = cfs_trace_get_console_buffer(); - - needed = 0; - if (format1 != NULL) { - va_copy(ap, args); - needed = vsnprintf(string_buf, - CFS_TRACE_CONSOLE_BUFFER_SIZE, - format1, ap); - va_end(ap); - } - if (format2 != NULL) { - remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed; - if (remain > 0) { - va_start(ap, format2); - needed += vsnprintf(string_buf+needed, remain, - format2, ap); - va_end(ap); - } - } - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); - - put_cpu(); - } - - if (cdls != NULL && cdls->cdls_count != 0) { - string_buf = cfs_trace_get_console_buffer(); - - needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, - "Skipped %d previous similar message%s\n", - cdls->cdls_count, - (cdls->cdls_count > 1) ? "s" : ""); - - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); - - put_cpu(); - cdls->cdls_count = 0; - } - - return 0; -} -EXPORT_SYMBOL(libcfs_debug_vmsg2); - -void -cfs_trace_assertion_failed(const char *str, - struct libcfs_debug_msg_data *msgdata) -{ - struct ptldebug_header hdr; - - libcfs_panic_in_progress = 1; - libcfs_catastrophe = 1; - mb(); - - cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); - - cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), - msgdata->msg_file, msgdata->msg_fn); - - panic("Lustre debug assertion failure\n"); - - /* not reached */ -} - -static void -panic_collect_pages(struct page_collection *pc) -{ - /* Do the collect_pages job on a single CPU: assumes that all other - * CPUs have been stopped during a panic. If this isn't true for some - * arch, this will have to be implemented separately in each arch. */ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - - INIT_LIST_HEAD(&pc->pc_pages); - - cfs_tcd_for_each(tcd, i, j) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } -} - -static void collect_pages_on_all_cpus(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } - } -} - -static void collect_pages(struct page_collection *pc) -{ - INIT_LIST_HEAD(&pc->pc_pages); - - if (libcfs_panic_in_progress) - panic_collect_pages(pc); - else - collect_pages_on_all_cpus(pc); -} - -static void put_pages_back_on_all_cpus(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - struct list_head *cur_head; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - cur_head = tcd->tcd_pages.next; - - list_for_each_entry_safe(tage, tmp, &pc->pc_pages, - linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - if (tage->cpu != cpu || tage->type != i) - continue; - - cfs_tage_to_tail(tage, cur_head); - tcd->tcd_cur_pages++; - } - } - } -} - -static void put_pages_back(struct page_collection *pc) -{ - if (!libcfs_panic_in_progress) - put_pages_back_on_all_cpus(pc); -} - -/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that - * we have a good amount of data at all times for dumping during an LBUG, even - * if we have been steadily writing (and otherwise discarding) pages via the - * debug daemon. */ -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd) -{ - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) - continue; - - cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); - tcd->tcd_cur_daemon_pages++; - - if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { - struct cfs_trace_page *victim; - - __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); - victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); - - __LASSERT_TAGE_INVARIANT(victim); - - list_del(&victim->linkage); - cfs_tage_free(victim); - tcd->tcd_cur_daemon_pages--; - } - } -} - -static void put_pages_on_daemon_list(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) - put_pages_on_tcd_daemon_list(pc, tcd); - } -} - -void cfs_trace_debug_print(void) -{ - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - char *p, *file, *fn; - struct page *page; - - __LASSERT_TAGE_INVARIANT(tage); - - page = tage->page; - p = page_address(page); - while (p < ((char *)page_address(page) + tage->used)) { - struct ptldebug_header *hdr; - int len; - - hdr = (void *)p; - p += sizeof(*hdr); - file = p; - p += strlen(file) + 1; - fn = p; - p += strlen(fn) + 1; - len = hdr->ph_len - (int)(p - (char *)hdr); - - cfs_print_to_console(hdr, D_EMERG, p, len, file, fn); - - p += len; - } - - list_del(&tage->linkage); - cfs_tage_free(tage); - } -} - -int cfs_tracefile_dump_all_pages(char *filename) -{ - struct page_collection pc; - struct file *filp; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - char *buf; - int rc; - - DECL_MMSPACE; - - cfs_tracefile_write_lock(); - - filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600); - if (IS_ERR(filp)) { - rc = PTR_ERR(filp); - filp = NULL; - pr_err("LustreError: can't open %s for dump: rc %d\n", - filename, rc); - goto out; - } - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) { - rc = 0; - goto close; - } - - /* ok, for now, just write the pages. in the future we'll be building - * iobufs with the pages and calling generic_direct_IO */ - MMSPACE_OPEN; - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &filp->f_pos); - kunmap(tage->page); - - if (rc != (int)tage->used) { - printk(KERN_WARNING "wanted to write %u but wrote %d\n", - tage->used, rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - break; - } - list_del(&tage->linkage); - cfs_tage_free(tage); - } - MMSPACE_CLOSE; - rc = vfs_fsync(filp, 1); - if (rc) - pr_err("sync returns %d\n", rc); -close: - filp_close(filp, NULL); -out: - cfs_tracefile_write_unlock(); - return rc; -} - -void cfs_trace_flush_pages(void) -{ - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - cfs_tage_free(tage); - } -} - -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char __user *usr_buffer, int usr_buffer_nob) -{ - int nob; - - if (usr_buffer_nob > knl_buffer_nob) - return -EOVERFLOW; - - if (copy_from_user((void *)knl_buffer, - usr_buffer, usr_buffer_nob)) - return -EFAULT; - - nob = strnlen(knl_buffer, usr_buffer_nob); - while (nob-- >= 0) /* strip trailing whitespace */ - if (!isspace(knl_buffer[nob])) - break; - - if (nob < 0) /* empty string */ - return -EINVAL; - - if (nob == knl_buffer_nob) /* no space to terminate */ - return -EOVERFLOW; - - knl_buffer[nob + 1] = 0; /* terminate */ - return 0; -} -EXPORT_SYMBOL(cfs_trace_copyin_string); - -int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, - const char *knl_buffer, char *append) -{ - /* NB if 'append' != NULL, it's a single character to append to the - * copied out string - usually "\n", for /proc entries and "" (i.e. a - * terminating zero byte) for sysctl entries */ - int nob = strlen(knl_buffer); - - if (nob > usr_buffer_nob) - nob = usr_buffer_nob; - - if (copy_to_user(usr_buffer, knl_buffer, nob)) - return -EFAULT; - - if (append != NULL && nob < usr_buffer_nob) { - if (copy_to_user(usr_buffer + nob, append, 1)) - return -EFAULT; - - nob++; - } - - return nob; -} -EXPORT_SYMBOL(cfs_trace_copyout_string); - -int cfs_trace_allocate_string_buffer(char **str, int nob) -{ - if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ - return -EINVAL; - - *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); - if (*str == NULL) - return -ENOMEM; - - return 0; -} - -int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) -{ - char *str; - int rc; - - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; - - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc != 0) - goto out; - - if (str[0] != '/') { - rc = -EINVAL; - goto out; - } - rc = cfs_tracefile_dump_all_pages(str); -out: - kfree(str); - return rc; -} - -int cfs_trace_daemon_command(char *str) -{ - int rc = 0; - - cfs_tracefile_write_lock(); - - if (strcmp(str, "stop") == 0) { - cfs_tracefile_write_unlock(); - cfs_trace_stop_thread(); - cfs_tracefile_write_lock(); - memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); - - } else if (strncmp(str, "size=", 5) == 0) { - cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0); - if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480) - cfs_tracefile_size = CFS_TRACEFILE_SIZE; - else - cfs_tracefile_size <<= 20; - - } else if (strlen(str) >= sizeof(cfs_tracefile)) { - rc = -ENAMETOOLONG; - } else if (str[0] != '/') { - rc = -EINVAL; - } else { - strcpy(cfs_tracefile, str); - - printk(KERN_INFO - "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n", - cfs_tracefile, - (long)(cfs_tracefile_size >> 10)); - - cfs_trace_start_thread(); - } - - cfs_tracefile_write_unlock(); - return rc; -} - -int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) -{ - char *str; - int rc; - - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; - - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc == 0) - rc = cfs_trace_daemon_command(str); - - kfree(str); - return rc; -} - -int cfs_trace_set_debug_mb(int mb) -{ - int i; - int j; - int pages; - int limit = cfs_trace_max_debug_mb(); - struct cfs_trace_cpu_data *tcd; - - if (mb < num_possible_cpus()) { - printk(KERN_WARNING - "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n", - mb, num_possible_cpus()); - mb = num_possible_cpus(); - } - - if (mb > limit) { - printk(KERN_WARNING - "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n", - mb, limit); - mb = limit; - } - - mb /= num_possible_cpus(); - pages = mb << (20 - PAGE_CACHE_SHIFT); - - cfs_tracefile_write_lock(); - - cfs_tcd_for_each(tcd, i, j) - tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; - - cfs_tracefile_write_unlock(); - - return 0; -} - -int cfs_trace_get_debug_mb(void) -{ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - int total_pages = 0; - - cfs_tracefile_read_lock(); - - cfs_tcd_for_each(tcd, i, j) - total_pages += tcd->tcd_max_pages; - - cfs_tracefile_read_unlock(); - - return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; -} - -static int tracefiled(void *arg) -{ - struct page_collection pc; - struct tracefiled_ctl *tctl = arg; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - struct file *filp; - char *buf; - int last_loop = 0; - int rc; - - DECL_MMSPACE; - - /* we're started late enough that we pick up init's fs context */ - /* this is so broken in uml? what on earth is going on? */ - - complete(&tctl->tctl_start); - - while (1) { - wait_queue_t __wait; - - pc.pc_want_daemon_pages = 0; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) - goto end_loop; - - filp = NULL; - cfs_tracefile_read_lock(); - if (cfs_tracefile[0] != 0) { - filp = filp_open(cfs_tracefile, - O_CREAT | O_RDWR | O_LARGEFILE, - 0600); - if (IS_ERR(filp)) { - rc = PTR_ERR(filp); - filp = NULL; - printk(KERN_WARNING "couldn't open %s: %d\n", - cfs_tracefile, rc); - } - } - cfs_tracefile_read_unlock(); - if (filp == NULL) { - put_pages_on_daemon_list(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - goto end_loop; - } - - MMSPACE_OPEN; - - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) { - static loff_t f_pos; - - __LASSERT_TAGE_INVARIANT(tage); - - if (f_pos >= (off_t)cfs_tracefile_size) - f_pos = 0; - else if (f_pos > i_size_read(file_inode(filp))) - f_pos = i_size_read(file_inode(filp)); - - buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &f_pos); - kunmap(tage->page); - - if (rc != (int)tage->used) { - printk(KERN_WARNING "wanted to write %u but wrote %d\n", - tage->used, rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - break; - } - } - MMSPACE_CLOSE; - - filp_close(filp, NULL); - put_pages_on_daemon_list(&pc); - if (!list_empty(&pc.pc_pages)) { - int i; - - printk(KERN_ALERT "Lustre: trace pages aren't empty\n"); - pr_err("total cpus(%d): ", - num_possible_cpus()); - for (i = 0; i < num_possible_cpus(); i++) - if (cpu_online(i)) - pr_cont("%d(on) ", i); - else - pr_cont("%d(off) ", i); - pr_cont("\n"); - - i = 0; - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) - pr_err("page %d belongs to cpu %d\n", - ++i, tage->cpu); - pr_err("There are %d pages unwritten\n", i); - } - __LASSERT(list_empty(&pc.pc_pages)); -end_loop: - if (atomic_read(&tctl->tctl_shutdown)) { - if (last_loop == 0) { - last_loop = 1; - continue; - } else { - break; - } - } - init_waitqueue_entry(&__wait, current); - add_wait_queue(&tctl->tctl_waitq, &__wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); - remove_wait_queue(&tctl->tctl_waitq, &__wait); - } - complete(&tctl->tctl_stop); - return 0; -} - -int cfs_trace_start_thread(void) -{ - struct tracefiled_ctl *tctl = &trace_tctl; - int rc = 0; - - mutex_lock(&cfs_trace_thread_mutex); - if (thread_running) - goto out; - - init_completion(&tctl->tctl_start); - init_completion(&tctl->tctl_stop); - init_waitqueue_head(&tctl->tctl_waitq); - atomic_set(&tctl->tctl_shutdown, 0); - - if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) { - rc = -ECHILD; - goto out; - } - - wait_for_completion(&tctl->tctl_start); - thread_running = 1; -out: - mutex_unlock(&cfs_trace_thread_mutex); - return rc; -} - -void cfs_trace_stop_thread(void) -{ - struct tracefiled_ctl *tctl = &trace_tctl; - - mutex_lock(&cfs_trace_thread_mutex); - if (thread_running) { - printk(KERN_INFO - "Lustre: shutting down debug daemon thread...\n"); - atomic_set(&tctl->tctl_shutdown, 1); - wait_for_completion(&tctl->tctl_stop); - thread_running = 0; - } - mutex_unlock(&cfs_trace_thread_mutex); -} - -int cfs_tracefile_init(int max_pages) -{ - struct cfs_trace_cpu_data *tcd; - int i; - int j; - int rc; - int factor; - - rc = cfs_tracefile_init_arch(); - if (rc != 0) - return rc; - - cfs_tcd_for_each(tcd, i, j) { - /* tcd_pages_factor is initialized int tracefile_init_arch. */ - factor = tcd->tcd_pages_factor; - INIT_LIST_HEAD(&tcd->tcd_pages); - INIT_LIST_HEAD(&tcd->tcd_stock_pages); - INIT_LIST_HEAD(&tcd->tcd_daemon_pages); - tcd->tcd_cur_pages = 0; - tcd->tcd_cur_stock_pages = 0; - tcd->tcd_cur_daemon_pages = 0; - tcd->tcd_max_pages = (max_pages * factor) / 100; - LASSERT(tcd->tcd_max_pages > 0); - tcd->tcd_shutting_down = 0; - } - - return 0; -} - -static void trace_cleanup_on_all_cpus(void) -{ - struct cfs_trace_cpu_data *tcd; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - tcd->tcd_shutting_down = 1; - - list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, - linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - cfs_tage_free(tage); - } - - tcd->tcd_cur_pages = 0; - } - } -} - -static void cfs_trace_cleanup(void) -{ - struct page_collection pc; - - INIT_LIST_HEAD(&pc.pc_pages); - - trace_cleanup_on_all_cpus(); - - cfs_tracefile_fini_arch(); -} - -void cfs_tracefile_exit(void) -{ - cfs_trace_stop_thread(); - cfs_trace_cleanup(); -} diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lustre/libcfs/tracefile.h deleted file mode 100644 index 7bf1471a5..000000000 --- a/drivers/staging/lustre/lustre/libcfs/tracefile.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LIBCFS_TRACEFILE_H__ -#define __LIBCFS_TRACEFILE_H__ - -#include "../../include/linux/libcfs/libcfs.h" - -typedef enum { - CFS_TCD_TYPE_PROC = 0, - CFS_TCD_TYPE_SOFTIRQ, - CFS_TCD_TYPE_IRQ, - CFS_TCD_TYPE_MAX -} cfs_trace_buf_type_t; - -/* trace file lock routines */ - -#define TRACEFILE_NAME_SIZE 1024 -extern char cfs_tracefile[TRACEFILE_NAME_SIZE]; -extern long long cfs_tracefile_size; - -void libcfs_run_debug_log_upcall(char *file); - -int cfs_tracefile_init_arch(void); -void cfs_tracefile_fini_arch(void); - -void cfs_tracefile_read_lock(void); -void cfs_tracefile_read_unlock(void); -void cfs_tracefile_write_lock(void); -void cfs_tracefile_write_unlock(void); - -int cfs_tracefile_dump_all_pages(char *filename); -void cfs_trace_debug_print(void); -void cfs_trace_flush_pages(void); -int cfs_trace_start_thread(void); -void cfs_trace_stop_thread(void); -int cfs_tracefile_init(int max_pages); -void cfs_tracefile_exit(void); - -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char __user *usr_buffer, int usr_buffer_nob); -int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, - const char *knl_str, char *append); -int cfs_trace_allocate_string_buffer(char **str, int nob); -int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob); -int cfs_trace_daemon_command(char *str); -int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob); -int cfs_trace_set_debug_mb(int mb); -int cfs_trace_get_debug_mb(void); - -void libcfs_debug_dumplog_internal(void *arg); -void libcfs_register_panic_notifier(void); -void libcfs_unregister_panic_notifier(void); -extern int libcfs_panic_in_progress; -int cfs_trace_max_debug_mb(void); - -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) -#define TCD_STOCK_PAGES (TCD_MAX_PAGES) -#define CFS_TRACEFILE_SIZE (500 << 20) - -#ifdef LUSTRE_TRACEFILE_PRIVATE - -/* - * Private declare for tracefile - */ -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) -#define TCD_STOCK_PAGES (TCD_MAX_PAGES) - -#define CFS_TRACEFILE_SIZE (500 << 20) - -/* Size of a buffer for sprinting console messages if we can't get a page - * from system */ -#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024 - -union cfs_trace_data_union { - struct cfs_trace_cpu_data { - /* - * Even though this structure is meant to be per-CPU, locking - * is needed because in some places the data may be accessed - * from other CPUs. This lock is directly used in trace_get_tcd - * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and - * tcd_for_each_type_lock - */ - spinlock_t tcd_lock; - unsigned long tcd_lock_flags; - - /* - * pages with trace records not yet processed by tracefiled. - */ - struct list_head tcd_pages; - /* number of pages on ->tcd_pages */ - unsigned long tcd_cur_pages; - - /* - * pages with trace records already processed by - * tracefiled. These pages are kept in memory, so that some - * portion of log can be written in the event of LBUG. This - * list is maintained in LRU order. - * - * Pages are moved to ->tcd_daemon_pages by tracefiled() - * (put_pages_on_daemon_list()). LRU pages from this list are - * discarded when list grows too large. - */ - struct list_head tcd_daemon_pages; - /* number of pages on ->tcd_daemon_pages */ - unsigned long tcd_cur_daemon_pages; - - /* - * Maximal number of pages allowed on ->tcd_pages and - * ->tcd_daemon_pages each. - * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current - * implementation. - */ - unsigned long tcd_max_pages; - - /* - * preallocated pages to write trace records into. Pages from - * ->tcd_stock_pages are moved to ->tcd_pages by - * portals_debug_msg(). - * - * This list is necessary, because on some platforms it's - * impossible to perform efficient atomic page allocation in a - * non-blockable context. - * - * Such platforms fill ->tcd_stock_pages "on occasion", when - * tracing code is entered in blockable context. - * - * trace_get_tage_try() tries to get a page from - * ->tcd_stock_pages first and resorts to atomic page - * allocation only if this queue is empty. ->tcd_stock_pages - * is replenished when tracing code is entered in blocking - * context (darwin-tracefile.c:trace_get_tcd()). We try to - * maintain TCD_STOCK_PAGES (40 by default) pages in this - * queue. Atomic allocation is only required if more than - * TCD_STOCK_PAGES pagesful are consumed by trace records all - * emitted in non-blocking contexts. Which is quite unlikely. - */ - struct list_head tcd_stock_pages; - /* number of pages on ->tcd_stock_pages */ - unsigned long tcd_cur_stock_pages; - - unsigned short tcd_shutting_down; - unsigned short tcd_cpu; - unsigned short tcd_type; - /* The factors to share debug memory. */ - unsigned short tcd_pages_factor; - } tcd; - char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))]; -}; - -#define TCD_MAX_TYPES 8 -extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS]; - -#define cfs_tcd_for_each(tcd, i, j) \ - for (i = 0; cfs_trace_data[i] != NULL; i++) \ - for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ - j < num_possible_cpus(); \ - j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) - -#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \ - for (i = 0; cfs_trace_data[i] && \ - (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ - cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) - -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct page_collection { - struct list_head pc_pages; - /* - * if this flag is set, collect_pages() will spill both - * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, - * only ->tcd_pages are spilled. - */ - int pc_want_daemon_pages; -}; - -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct tracefiled_ctl { - struct completion tctl_start; - struct completion tctl_stop; - wait_queue_head_t tctl_waitq; - pid_t tctl_pid; - atomic_t tctl_shutdown; -}; - -/* - * small data-structure for each page owned by tracefiled. - */ -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct cfs_trace_page { - /* - * page itself - */ - struct page *page; - /* - * linkage into one of the lists in trace_data_union or - * page_collection - */ - struct list_head linkage; - /* - * number of bytes used within this page - */ - unsigned int used; - /* - * cpu that owns this page - */ - unsigned short cpu; - /* - * type(context) of this page - */ - unsigned short type; -}; - -void cfs_set_ptldebug_header(struct ptldebug_header *header, - struct libcfs_debug_msg_data *m, - unsigned long stack); -void cfs_print_to_console(struct ptldebug_header *hdr, int mask, - const char *buf, int len, const char *file, - const char *fn); - -int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking); -void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking); - -extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; -cfs_trace_buf_type_t cfs_trace_buf_idx_get(void); - -static inline char * -cfs_trace_get_console_buffer(void) -{ - unsigned int i = get_cpu(); - unsigned int j = cfs_trace_buf_idx_get(); - - return cfs_trace_console_buffers[i][j]; -} - -static inline struct cfs_trace_cpu_data * -cfs_trace_get_tcd(void) -{ - struct cfs_trace_cpu_data *tcd = - &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd; - - cfs_trace_lock_tcd(tcd, 0); - - return tcd; -} - -static inline void -cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd) -{ - cfs_trace_unlock_tcd(tcd, 0); - - put_cpu(); -} - -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, - struct list_head *stock); - -int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd, - struct cfs_trace_page *tage); - -void cfs_trace_assertion_failed(const char *str, - struct libcfs_debug_msg_data *m); - -/* ASSERTION that is safe to use within the debug system */ -#define __LASSERT(cond) \ -do { \ - if (unlikely(!(cond))) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ - cfs_trace_assertion_failed("ASSERTION("#cond") failed", \ - &msgdata); \ - } \ -} while (0) - -#define __LASSERT_TAGE_INVARIANT(tage) \ -do { \ - __LASSERT(tage != NULL); \ - __LASSERT(tage->page != NULL); \ - __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ - __LASSERT(page_count(tage->page) > 0); \ -} while (0) - -#endif /* LUSTRE_TRACEFILE_PRIVATE */ - -#endif /* __LIBCFS_TRACEFILE_H__ */ diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c deleted file mode 100644 index 60bb88a00..000000000 --- a/drivers/staging/lustre/lustre/libcfs/workitem.c +++ /dev/null @@ -1,465 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/workitem.c - * - * Author: Isaac Huang - * Liang Zhen - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -#define CFS_WS_NAME_LEN 16 - -struct cfs_wi_sched { - struct list_head ws_list; /* chain on global list */ - /** serialised workitems */ - spinlock_t ws_lock; - /** where schedulers sleep */ - wait_queue_head_t ws_waitq; - /** concurrent workitems */ - struct list_head ws_runq; - /** rescheduled running-workitems, a workitem can be rescheduled - * while running in wi_action(), but we don't to execute it again - * unless it returns from wi_action(), so we put it on ws_rerunq - * while rescheduling, and move it to runq after it returns - * from wi_action() */ - struct list_head ws_rerunq; - /** CPT-table for this scheduler */ - struct cfs_cpt_table *ws_cptab; - /** CPT id for affinity */ - int ws_cpt; - /** number of scheduled workitems */ - int ws_nscheduled; - /** started scheduler thread, protected by cfs_wi_data::wi_glock */ - unsigned int ws_nthreads:30; - /** shutting down, protected by cfs_wi_data::wi_glock */ - unsigned int ws_stopping:1; - /** serialize starting thread, protected by cfs_wi_data::wi_glock */ - unsigned int ws_starting:1; - /** scheduler name */ - char ws_name[CFS_WS_NAME_LEN]; -}; - -static struct cfs_workitem_data { - /** serialize */ - spinlock_t wi_glock; - /** list of all schedulers */ - struct list_head wi_scheds; - /** WI module is initialized */ - int wi_init; - /** shutting down the whole WI module */ - int wi_stopping; -} cfs_wi_data; - -static inline int -cfs_wi_sched_cansleep(struct cfs_wi_sched *sched) -{ - spin_lock(&sched->ws_lock); - if (sched->ws_stopping) { - spin_unlock(&sched->ws_lock); - return 0; - } - - if (!list_empty(&sched->ws_runq)) { - spin_unlock(&sched->ws_lock); - return 0; - } - spin_unlock(&sched->ws_lock); - return 1; -} - -/* XXX: - * 0. it only works when called from wi->wi_action. - * 1. when it returns no one shall try to schedule the workitem. - */ -void -cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) -{ - LASSERT(!in_interrupt()); /* because we use plain spinlock */ - LASSERT(!sched->ws_stopping); - - spin_lock(&sched->ws_lock); - - LASSERT(wi->wi_running); - if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!list_empty(&wi->wi_list)); - list_del_init(&wi->wi_list); - - LASSERT(sched->ws_nscheduled > 0); - sched->ws_nscheduled--; - } - - LASSERT(list_empty(&wi->wi_list)); - - wi->wi_scheduled = 1; /* LBUG future schedule attempts */ - spin_unlock(&sched->ws_lock); - - return; -} -EXPORT_SYMBOL(cfs_wi_exit); - -/** - * cancel schedule request of workitem \a wi - */ -int -cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) -{ - int rc; - - LASSERT(!in_interrupt()); /* because we use plain spinlock */ - LASSERT(!sched->ws_stopping); - - /* - * return 0 if it's running already, otherwise return 1, which - * means the workitem will not be scheduled and will not have - * any race with wi_action. - */ - spin_lock(&sched->ws_lock); - - rc = !(wi->wi_running); - - if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!list_empty(&wi->wi_list)); - list_del_init(&wi->wi_list); - - LASSERT(sched->ws_nscheduled > 0); - sched->ws_nscheduled--; - - wi->wi_scheduled = 0; - } - - LASSERT (list_empty(&wi->wi_list)); - - spin_unlock(&sched->ws_lock); - return rc; -} -EXPORT_SYMBOL(cfs_wi_deschedule); - -/* - * Workitem scheduled with (serial == 1) is strictly serialised not only with - * itself, but also with others scheduled this way. - * - * Now there's only one static serialised queue, but in the future more might - * be added, and even dynamic creation of serialised queues might be supported. - */ -void -cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) -{ - LASSERT(!in_interrupt()); /* because we use plain spinlock */ - LASSERT(!sched->ws_stopping); - - spin_lock(&sched->ws_lock); - - if (!wi->wi_scheduled) { - LASSERT (list_empty(&wi->wi_list)); - - wi->wi_scheduled = 1; - sched->ws_nscheduled++; - if (!wi->wi_running) { - list_add_tail(&wi->wi_list, &sched->ws_runq); - wake_up(&sched->ws_waitq); - } else { - list_add(&wi->wi_list, &sched->ws_rerunq); - } - } - - LASSERT (!list_empty(&wi->wi_list)); - spin_unlock(&sched->ws_lock); - return; -} -EXPORT_SYMBOL(cfs_wi_schedule); - -static int -cfs_wi_scheduler (void *arg) -{ - struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; - - cfs_block_allsigs(); - - /* CPT affinity scheduler? */ - if (sched->ws_cptab != NULL) - if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0) - CWARN("Failed to bind %s on CPT %d\n", - sched->ws_name, sched->ws_cpt); - - spin_lock(&cfs_wi_data.wi_glock); - - LASSERT(sched->ws_starting == 1); - sched->ws_starting--; - sched->ws_nthreads++; - - spin_unlock(&cfs_wi_data.wi_glock); - - spin_lock(&sched->ws_lock); - - while (!sched->ws_stopping) { - int nloops = 0; - int rc; - cfs_workitem_t *wi; - - while (!list_empty(&sched->ws_runq) && - nloops < CFS_WI_RESCHED) { - wi = list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); - LASSERT(wi->wi_scheduled && !wi->wi_running); - - list_del_init(&wi->wi_list); - - LASSERT(sched->ws_nscheduled > 0); - sched->ws_nscheduled--; - - wi->wi_running = 1; - wi->wi_scheduled = 0; - - spin_unlock(&sched->ws_lock); - nloops++; - - rc = (*wi->wi_action) (wi); - - spin_lock(&sched->ws_lock); - if (rc != 0) /* WI should be dead, even be freed! */ - continue; - - wi->wi_running = 0; - if (list_empty(&wi->wi_list)) - continue; - - LASSERT(wi->wi_scheduled); - /* wi is rescheduled, should be on rerunq now, we - * move it to runq so it can run action now */ - list_move_tail(&wi->wi_list, &sched->ws_runq); - } - - if (!list_empty(&sched->ws_runq)) { - spin_unlock(&sched->ws_lock); - /* don't sleep because some workitems still - * expect me to come back soon */ - cond_resched(); - spin_lock(&sched->ws_lock); - continue; - } - - spin_unlock(&sched->ws_lock); - rc = wait_event_interruptible_exclusive(sched->ws_waitq, - !cfs_wi_sched_cansleep(sched)); - spin_lock(&sched->ws_lock); - } - - spin_unlock(&sched->ws_lock); - - spin_lock(&cfs_wi_data.wi_glock); - sched->ws_nthreads--; - spin_unlock(&cfs_wi_data.wi_glock); - - return 0; -} - -void -cfs_wi_sched_destroy(struct cfs_wi_sched *sched) -{ - int i; - - LASSERT(cfs_wi_data.wi_init); - LASSERT(!cfs_wi_data.wi_stopping); - - spin_lock(&cfs_wi_data.wi_glock); - if (sched->ws_stopping) { - CDEBUG(D_INFO, "%s is in progress of stopping\n", - sched->ws_name); - spin_unlock(&cfs_wi_data.wi_glock); - return; - } - - LASSERT(!list_empty(&sched->ws_list)); - sched->ws_stopping = 1; - - spin_unlock(&cfs_wi_data.wi_glock); - - i = 2; - wake_up_all(&sched->ws_waitq); - - spin_lock(&cfs_wi_data.wi_glock); - while (sched->ws_nthreads > 0) { - CDEBUG(is_power_of_2(++i) ? D_WARNING : D_NET, - "waiting for %d threads of WI sched[%s] to terminate\n", - sched->ws_nthreads, sched->ws_name); - - spin_unlock(&cfs_wi_data.wi_glock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 20); - spin_lock(&cfs_wi_data.wi_glock); - } - - list_del(&sched->ws_list); - - spin_unlock(&cfs_wi_data.wi_glock); - LASSERT(sched->ws_nscheduled == 0); - - LIBCFS_FREE(sched, sizeof(*sched)); -} -EXPORT_SYMBOL(cfs_wi_sched_destroy); - -int -cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, - int cpt, int nthrs, struct cfs_wi_sched **sched_pp) -{ - struct cfs_wi_sched *sched; - int rc; - - LASSERT(cfs_wi_data.wi_init); - LASSERT(!cfs_wi_data.wi_stopping); - LASSERT(cptab == NULL || cpt == CFS_CPT_ANY || - (cpt >= 0 && cpt < cfs_cpt_number(cptab))); - - LIBCFS_ALLOC(sched, sizeof(*sched)); - if (sched == NULL) - return -ENOMEM; - - strlcpy(sched->ws_name, name, CFS_WS_NAME_LEN); - - sched->ws_cptab = cptab; - sched->ws_cpt = cpt; - - spin_lock_init(&sched->ws_lock); - init_waitqueue_head(&sched->ws_waitq); - INIT_LIST_HEAD(&sched->ws_runq); - INIT_LIST_HEAD(&sched->ws_rerunq); - INIT_LIST_HEAD(&sched->ws_list); - - rc = 0; - while (nthrs > 0) { - char name[16]; - struct task_struct *task; - - spin_lock(&cfs_wi_data.wi_glock); - while (sched->ws_starting > 0) { - spin_unlock(&cfs_wi_data.wi_glock); - schedule(); - spin_lock(&cfs_wi_data.wi_glock); - } - - sched->ws_starting++; - spin_unlock(&cfs_wi_data.wi_glock); - - if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) { - snprintf(name, sizeof(name), "%s_%02d_%02u", - sched->ws_name, sched->ws_cpt, - sched->ws_nthreads); - } else { - snprintf(name, sizeof(name), "%s_%02u", - sched->ws_name, sched->ws_nthreads); - } - - task = kthread_run(cfs_wi_scheduler, sched, "%s", name); - if (!IS_ERR(task)) { - nthrs--; - continue; - } - rc = PTR_ERR(task); - - CERROR("Failed to create thread for WI scheduler %s: %d\n", - name, rc); - - spin_lock(&cfs_wi_data.wi_glock); - - /* make up for cfs_wi_sched_destroy */ - list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); - sched->ws_starting--; - - spin_unlock(&cfs_wi_data.wi_glock); - - cfs_wi_sched_destroy(sched); - return rc; - } - spin_lock(&cfs_wi_data.wi_glock); - list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); - spin_unlock(&cfs_wi_data.wi_glock); - - *sched_pp = sched; - return 0; -} -EXPORT_SYMBOL(cfs_wi_sched_create); - -int -cfs_wi_startup(void) -{ - memset(&cfs_wi_data, 0, sizeof(cfs_wi_data)); - - spin_lock_init(&cfs_wi_data.wi_glock); - INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); - cfs_wi_data.wi_init = 1; - - return 0; -} - -void -cfs_wi_shutdown(void) -{ - struct cfs_wi_sched *sched; - - spin_lock(&cfs_wi_data.wi_glock); - cfs_wi_data.wi_stopping = 1; - spin_unlock(&cfs_wi_data.wi_glock); - - /* nobody should contend on this list */ - list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { - sched->ws_stopping = 1; - wake_up_all(&sched->ws_waitq); - } - - list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { - spin_lock(&cfs_wi_data.wi_glock); - - while (sched->ws_nthreads != 0) { - spin_unlock(&cfs_wi_data.wi_glock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 20); - spin_lock(&cfs_wi_data.wi_glock); - } - spin_unlock(&cfs_wi_data.wi_glock); - } - while (!list_empty(&cfs_wi_data.wi_scheds)) { - sched = list_entry(cfs_wi_data.wi_scheds.next, - struct cfs_wi_sched, ws_list); - list_del(&sched->ws_list); - LIBCFS_FREE(sched, sizeof(*sched)); - } - - cfs_wi_data.wi_stopping = 0; - cfs_wi_data.wi_init = 0; -} diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c index 3d6745e63..dd1c82701 100644 --- a/drivers/staging/lustre/lustre/llite/dcache.c +++ b/drivers/staging/lustre/lustre/llite/dcache.c @@ -60,9 +60,9 @@ static void ll_release(struct dentry *de) { struct ll_dentry_data *lld; - LASSERT(de != NULL); + LASSERT(de); lld = ll_d2d(de); - if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */ + if (!lld) /* NFS copies the de->d_op methods (bug 4655) */ return; if (lld->lld_it) { @@ -80,7 +80,8 @@ static void ll_release(struct dentry *de) * This avoids a race where ll_lookup_it() instantiates a dentry, but we get * an AST before calling d_revalidate_it(). The dentry still exists (marked * INVALID) so d_lookup() matches it, but we have no lock on it (so - * lock_match() fails) and we spin around real_lookup(). */ + * lock_match() fails) and we spin around real_lookup(). + */ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) @@ -117,7 +118,8 @@ static inline int return_if_equal(struct ldlm_lock *lock, void *data) /* find any ldlm lock of the inode in mdc and lov * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int find_cbdata(struct inode *inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); @@ -131,7 +133,7 @@ static int find_cbdata(struct inode *inode) return rc; lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) + if (!lsm) return rc; rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL); @@ -163,10 +165,12 @@ static int ll_ddelete(const struct dentry *de) /* Disable this piece of code temporarily because this is called * inside dcache_lock so it's not appropriate to do lots of work * here. ATTENTION: Before this piece of code enabling, LU-2487 must be - * resolved. */ + * resolved. + */ #if 0 /* if not ldlm lock for this inode, set i_nlink to 0 so that - * this inode can be recycled later b=20433 */ + * this inode can be recycled later b=20433 + */ if (d_really_is_positive(de) && !find_cbdata(d_inode(de))) clear_nlink(d_inode(de)); #endif @@ -178,19 +182,16 @@ static int ll_ddelete(const struct dentry *de) int ll_d_init(struct dentry *de) { - LASSERT(de != NULL); - CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n", - de, de, de->d_parent, d_inode(de), - d_count(de)); + de, de, de->d_parent, d_inode(de), d_count(de)); - if (de->d_fsdata == NULL) { + if (!de->d_fsdata) { struct ll_dentry_data *lld; lld = kzalloc(sizeof(*lld), GFP_NOFS); if (likely(lld)) { spin_lock(&de->d_lock); - if (likely(de->d_fsdata == NULL)) { + if (likely(!de->d_fsdata)) { de->d_fsdata = lld; __d_lustre_invalidate(de); } else { @@ -218,7 +219,8 @@ void ll_intent_drop_lock(struct lookup_intent *it) ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode); /* bug 494: intent_release may be called multiple times, from - * this thread and we don't want to double-decref this lock */ + * this thread and we don't want to double-decref this lock + */ it->d.lustre.it_lock_mode = 0; if (it->d.lustre.it_remote_lock_mode != 0) { handle.cookie = it->d.lustre.it_remote_lock_handle; @@ -251,8 +253,6 @@ void ll_invalidate_aliases(struct inode *inode) { struct dentry *dentry; - LASSERT(inode != NULL); - CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n", inode->i_ino, inode->i_generation, inode); @@ -286,9 +286,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request, void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) { - LASSERT(it != NULL); - - if (it->d.lustre.it_lock_mode && inode != NULL) { + if (it->d.lustre.it_lock_mode && inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", @@ -300,7 +298,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) { /* on 2.6 there are situation when several lookups and * revalidations may be requested during single operation. - * therefore, we don't release intent here -bzzz */ + * therefore, we don't release intent here -bzzz + */ ll_intent_drop_lock(it); } } @@ -328,7 +327,7 @@ static int ll_revalidate_dentry(struct dentry *dentry, if (lookup_flags & LOOKUP_RCU) return -ECHILD; - do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL); + do_statahead_enter(dir, &dentry, !d_inode(dentry)); ll_statahead_mark(dir, dentry); return 1; } diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 8982f7d1b..e4c82883e 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -55,6 +55,7 @@ #include "../include/lustre_lite.h" #include "../include/lustre_dlm.h" #include "../include/lustre_fid.h" +#include "../include/lustre_kernelcomm.h" #include "llite_internal.h" /* @@ -133,9 +134,8 @@ * a header lu_dirpage which describes the start/end hash, and whether this * page is empty (contains no dir entry) or hash collide with next page. * After client receives reply, several pages will be integrated into dir page - * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the - * lu_dirpage for this integrated page will be adjusted. See - * lmv_adjust_dirpages(). + * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage + * for this integrated page will be adjusted. See lmv_adjust_dirpages(). * */ @@ -152,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) struct page **page_pool; struct page *page; struct lu_dirpage *dp; - int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; + int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT; int nrdpgs = 0; /* number of pages read actually */ int npages; int i; @@ -189,13 +189,11 @@ static int ll_dir_filler(void *_hash, struct page *page0) } else if (rc == 0) { body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); /* Checked by mdc_readpage() */ - LASSERT(body != NULL); - if (body->valid & OBD_MD_FLSIZE) cl_isize_write(inode, body->size); - nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1) - >> PAGE_CACHE_SHIFT; + nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1) + >> PAGE_SHIFT; SetPageUptodate(page0); } unlock_page(page0); @@ -210,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) page = page_pool[i]; if (rc < 0 || i >= nrdpgs) { - page_cache_release(page); + put_page(page); continue; } @@ -231,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n", offset, ret); } - page_cache_release(page); + put_page(page); } if (page_pool != &page0) @@ -244,11 +242,11 @@ void ll_release_page(struct page *page, int remove) kunmap(page); if (remove) { lock_page(page); - if (likely(page->mapping != NULL)) + if (likely(page->mapping)) truncate_complete_page(page->mapping, page); unlock_page(page); } - page_cache_release(page); + put_page(page); } /* @@ -274,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, if (found > 0 && !radix_tree_exceptional_entry(page)) { struct lu_dirpage *dp; - page_cache_get(page); + get_page(page); spin_unlock_irq(&mapping->tree_lock); /* * In contrast to find_lock_page() we are sure that directory @@ -314,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, page = NULL; } } else { - page_cache_release(page); + put_page(page); page = ERR_PTR(-EIO); } @@ -333,7 +331,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, struct lustre_handle lockh; struct lu_dirpage *dp; struct page *page; - ldlm_mode_t mode; + enum ldlm_mode mode; int rc; __u64 start = 0; __u64 end = 0; @@ -356,7 +354,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, struct md_op_data *op_data; op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); + LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) return (void *)op_data; @@ -369,8 +367,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, if (request) ptlrpc_req_finished(request); if (rc < 0) { - CERROR("lock enqueue: "DFID" at %llu: rc %d\n", - PFID(ll_inode2fid(dir)), hash, rc); + CERROR("lock enqueue: " DFID " at %llu: rc %d\n", + PFID(ll_inode2fid(dir)), hash, rc); return ERR_PTR(rc); } @@ -380,7 +378,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, &it.d.lustre.it_lock_handle, dir, NULL); } else { /* for cross-ref object, l_ast_data of the lock may not be set, - * we reset it here */ + * we reset it here + */ md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie, dir, NULL); } @@ -392,7 +391,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, CERROR("dir page locate: "DFID" at %llu: rc %ld\n", PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page)); goto out_unlock; - } else if (page != NULL) { + } else if (page) { /* * XXX nikita: not entirely correct handling of a corner case: * suppose hash chain of entries with hash value HASH crosses @@ -498,7 +497,7 @@ int ll_dir_read(struct inode *inode, struct dir_context *ctx) __u64 next; dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL && !done; + for (ent = lu_dirent_start(dp); ent && !done; ent = lu_dirent_next(ent)) { __u16 type; int namelen; @@ -688,7 +687,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, struct obd_device *mgc = lsi->lsi_mgc; int lum_size; - if (lump != NULL) { + if (lump) { /* * This is coming from userspace, so should be in * local endian. But the MDS would like it in little @@ -724,7 +723,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, if (IS_ERR(op_data)) return PTR_ERR(op_data); - if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC)) + if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC)) op_data->op_cli_flags |= CLI_SET_MEA; /* swabbing is done in lov_setstripe() on server side */ @@ -738,8 +737,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, } /* In the following we use the fact that LOV_USER_MAGIC_V1 and - LOV_USER_MAGIC_V3 have the same initial fields so we do not - need to make the distinction between the 2 versions */ + * LOV_USER_MAGIC_V3 have the same initial fields so we do not + * need to make the distinction between the 2 versions + */ if (set_default && mgc->u.cli.cl_mgc_mgsexp) { char *param = NULL; char *buf; @@ -811,7 +811,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); lmmsize = body->eadatasize; @@ -823,7 +822,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - LASSERT(lmm != NULL); /* * This is coming from the MDS, so is probably in @@ -879,7 +877,7 @@ int ll_get_mdt_idx(struct inode *inode) /** * Generic handler to do any pre-copy work. * - * It send a first hsm_progress (with extent length == 0) to coordinator as a + * It sends a first hsm_progress (with extent length == 0) to coordinator as a * first information for it that real work has started. * * Moreover, for a ARCHIVE request, it will sample the file data version and @@ -931,8 +929,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy) goto progress; } - /* Store it the hsm_copy for later copytool use. - * Always modified even if no lsm. */ + /* Store in the hsm_copy for later copytool use. + * Always modified even if no lsm. + */ copy->hc_data_version = data_version; } @@ -1008,12 +1007,14 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) goto progress; } - /* Store it the hsm_copy for later copytool use. - * Always modified even if no lsm. */ + /* Store in the hsm_copy for later copytool use. + * Always modified even if no lsm. + */ hpk.hpk_data_version = data_version; /* File could have been stripped during archiving, so we need - * to check anyway. */ + * to check anyway. + */ if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) && (copy->hc_data_version != data_version)) { CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. " @@ -1025,7 +1026,8 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) * the cdt will loop on retried archive requests. * The policy engine will ask for a new archive later * when the file will not be modified for some tunable - * time */ + * time + */ /* we do not notify caller */ hpk.hpk_flags &= ~HP_FLAG_RETRY; /* hpk_errval must be >= 0 */ @@ -1153,7 +1155,8 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) return rc; } /* If QIF_SPACE is not set, client should collect the - * space usage from OSSs by itself */ + * space usage from OSSs by itself + */ if (cmd == Q_GETQUOTA && !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) && !oqctl->qc_dqblk.dqb_curspace) { @@ -1204,7 +1207,8 @@ out: /* This function tries to get a single name component, * to send to the server. No actual path traversal involved, - * so we limit to NAME_MAX */ + * so we limit to NAME_MAX + */ static char *ll_getname(const char __user *filename) { int ret = 0, len; @@ -1252,7 +1256,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ll_iocontrol(inode, file, cmd, arg); case FSFILT_IOC_GETVERSION_OLD: case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int *)arg); + return put_user(inode->i_generation, (int __user *)arg); /* We need to special case any other ioctls we want to handle, * to send them to the MDS/OST as appropriate and to properly * network encode the arg field. @@ -1266,7 +1270,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (mdtidx < 0) return mdtidx; - if (put_user((int)mdtidx, (int *)arg)) + if (put_user((int)mdtidx, (int __user *)arg)) return -EFAULT; return 0; @@ -1278,7 +1282,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) char *filename; struct md_op_data *op_data; - rc = obd_ioctl_getdata(&buf, &len, (void *)arg); + rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); if (rc) return rc; data = (void *)buf; @@ -1320,12 +1324,12 @@ out_free: int len; int rc; - rc = obd_ioctl_getdata(&buf, &len, (void *)arg); + rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); if (rc) return rc; data = (void *)buf; - if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL || + if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) { rc = -EINVAL; goto lmv_out_free; @@ -1363,8 +1367,8 @@ lmv_out_free: case LL_IOC_LOV_SETSTRIPE: { struct lov_user_md_v3 lumv3; struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; - struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg; - struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg; + struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; + struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; int set_default = 0; @@ -1389,7 +1393,7 @@ lmv_out_free: return rc; } case LL_IOC_LMV_GETSTRIPE: { - struct lmv_user_md *lump = (struct lmv_user_md *)arg; + struct lmv_user_md __user *lump = (void __user *)arg; struct lmv_user_md lum; struct lmv_user_md *tmp; int lum_size; @@ -1422,7 +1426,7 @@ lmv_out_free: tmp->lum_objects[0].lum_mds = mdtindex; memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode), sizeof(struct lu_fid)); - if (copy_to_user((void *)arg, tmp, lum_size)) { + if (copy_to_user((void __user *)arg, tmp, lum_size)) { rc = -EFAULT; goto free_lmv; } @@ -1433,13 +1437,13 @@ free_lmv: case LL_IOC_LOV_SWAP_LAYOUTS: return -EPERM; case LL_IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void *)arg); + return ll_obd_statfs(inode, (void __user *)arg); case LL_IOC_LOV_GETSTRIPE: case LL_IOC_MDC_GETINFO: case IOC_MDC_GETFILEINFO: case IOC_MDC_GETFILESTRIPE: { struct ptlrpc_request *request = NULL; - struct lov_user_md *lump; + struct lov_user_md __user *lump; struct lov_mds_md *lmm = NULL; struct mdt_body *body; char *filename = NULL; @@ -1447,7 +1451,7 @@ free_lmv: if (cmd == IOC_MDC_GETFILEINFO || cmd == IOC_MDC_GETFILESTRIPE) { - filename = ll_getname((const char *)arg); + filename = ll_getname((const char __user *)arg); if (IS_ERR(filename)) return PTR_ERR(filename); @@ -1460,7 +1464,7 @@ free_lmv: if (request) { body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); + LASSERT(body); } else { goto out_req; } @@ -1476,11 +1480,11 @@ free_lmv: if (cmd == IOC_MDC_GETFILESTRIPE || cmd == LL_IOC_LOV_GETSTRIPE) { - lump = (struct lov_user_md *)arg; + lump = (struct lov_user_md __user *)arg; } else { - struct lov_user_mds_data *lmdp; + struct lov_user_mds_data __user *lmdp; - lmdp = (struct lov_user_mds_data *)arg; + lmdp = (struct lov_user_mds_data __user *)arg; lump = &lmdp->lmd_lmm; } if (copy_to_user(lump, lmm, lmmsize)) { @@ -1492,7 +1496,7 @@ free_lmv: } skip_lmm: if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) { - struct lov_user_mds_data *lmdp; + struct lov_user_mds_data __user *lmdp; lstat_t st = { 0 }; st.st_dev = inode->i_sb->s_dev; @@ -1502,14 +1506,14 @@ skip_lmm: st.st_gid = body->gid; st.st_rdev = body->rdev; st.st_size = body->size; - st.st_blksize = PAGE_CACHE_SIZE; + st.st_blksize = PAGE_SIZE; st.st_blocks = body->blocks; st.st_atime = body->atime; st.st_mtime = body->mtime; st.st_ctime = body->ctime; st.st_ino = inode->i_ino; - lmdp = (struct lov_user_mds_data *)arg; + lmdp = (struct lov_user_mds_data __user *)arg; if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) { rc = -EFAULT; goto out_req; @@ -1523,14 +1527,14 @@ out_req: return rc; } case IOC_LOV_GETINFO: { - struct lov_user_mds_data *lumd; + struct lov_user_mds_data __user *lumd; struct lov_stripe_md *lsm; - struct lov_user_md *lum; + struct lov_user_md __user *lum; struct lov_mds_md *lmm; int lmmsize; lstat_t st; - lumd = (struct lov_user_mds_data *)arg; + lumd = (struct lov_user_mds_data __user *)arg; lum = &lumd->lmd_lmm; rc = ll_get_max_mdsize(sbi, &lmmsize); @@ -1538,7 +1542,7 @@ out_req: return rc; lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS); - if (lmm == NULL) + if (!lmm) return -ENOMEM; if (copy_from_user(lmm, lum, lmmsize)) { rc = -EFAULT; @@ -1636,8 +1640,8 @@ free_lmm: NULL); if (rc) { CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void *)arg, check, - sizeof(*check))) + if (copy_to_user((void __user *)arg, check, + sizeof(*check))) CDEBUG(D_QUOTA, "copy_to_user failed\n"); goto out_poll; } @@ -1646,8 +1650,8 @@ free_lmm: NULL); if (rc) { CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void *)arg, check, - sizeof(*check))) + if (copy_to_user((void __user *)arg, check, + sizeof(*check))) CDEBUG(D_QUOTA, "copy_to_user failed\n"); goto out_poll; } @@ -1662,14 +1666,15 @@ out_poll: if (!qctl) return -ENOMEM; - if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) { + if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) { rc = -EFAULT; goto out_quotactl; } rc = quotactl_ioctl(sbi, qctl); - if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl))) + if (rc == 0 && copy_to_user((void __user *)arg, qctl, + sizeof(*qctl))) rc = -EFAULT; out_quotactl: @@ -1686,7 +1691,6 @@ out_quotactl: if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); rc = rct_add(&sbi->ll_rct, current_pid(), arg); if (!rc) fd->fd_flags |= LL_FILE_RMTACL; @@ -1699,7 +1703,7 @@ out_quotactl: int count, vallen; struct obd_export *exp; - if (copy_from_user(&count, (int *)arg, sizeof(int))) + if (copy_from_user(&count, (int __user *)arg, sizeof(int))) return -EFAULT; /* get ost count when count is zero, get mdt count otherwise */ @@ -1712,34 +1716,35 @@ out_quotactl: return rc; } - if (copy_to_user((int *)arg, &count, sizeof(int))) + if (copy_to_user((int __user *)arg, &count, sizeof(int))) return -EFAULT; return 0; } case LL_IOC_PATH2FID: - if (copy_to_user((void *)arg, ll_inode2fid(inode), - sizeof(struct lu_fid))) + if (copy_to_user((void __user *)arg, ll_inode2fid(inode), + sizeof(struct lu_fid))) return -EFAULT; return 0; case LL_IOC_GET_CONNECT_FLAGS: { - return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg); + return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, + (void __user *)arg); } case OBD_IOC_CHANGELOG_SEND: case OBD_IOC_CHANGELOG_CLEAR: if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg, + rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, sizeof(struct ioc_changelog)); return rc; case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void *)arg); + return ll_fid2path(inode, (void __user *)arg); case LL_IOC_HSM_REQUEST: { struct hsm_user_request *hur; ssize_t totalsize; - hur = memdup_user((void *)arg, sizeof(*hur)); + hur = memdup_user((void __user *)arg, sizeof(*hur)); if (IS_ERR(hur)) return PTR_ERR(hur); @@ -1754,11 +1759,11 @@ out_quotactl: return -E2BIG; hur = libcfs_kvzalloc(totalsize, GFP_NOFS); - if (hur == NULL) + if (!hur) return -ENOMEM; /* Copy the whole struct */ - if (copy_from_user(hur, (void *)arg, totalsize)) { + if (copy_from_user(hur, (void __user *)arg, totalsize)) { kvfree(hur); return -EFAULT; } @@ -1794,7 +1799,7 @@ out_quotactl: struct hsm_progress_kernel hpk; struct hsm_progress hp; - if (copy_from_user(&hp, (void *)arg, sizeof(hp))) + if (copy_from_user(&hp, (void __user *)arg, sizeof(hp))) return -EFAULT; hpk.hpk_fid = hp.hp_fid; @@ -1805,13 +1810,14 @@ out_quotactl: hpk.hpk_data_version = 0; /* File may not exist in Lustre; all progress - * reported to Lustre root */ + * reported to Lustre root + */ rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk, NULL); return rc; } case LL_IOC_HSM_CT_START: - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg, + rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, sizeof(struct lustre_kernelcomm)); return rc; @@ -1819,12 +1825,12 @@ out_quotactl: struct hsm_copy *copy; int rc; - copy = memdup_user((char *)arg, sizeof(*copy)); + copy = memdup_user((char __user *)arg, sizeof(*copy)); if (IS_ERR(copy)) return PTR_ERR(copy); rc = ll_ioc_copy_start(inode->i_sb, copy); - if (copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) rc = -EFAULT; kfree(copy); @@ -1834,19 +1840,20 @@ out_quotactl: struct hsm_copy *copy; int rc; - copy = memdup_user((char *)arg, sizeof(*copy)); + copy = memdup_user((char __user *)arg, sizeof(*copy)); if (IS_ERR(copy)) return PTR_ERR(copy); rc = ll_ioc_copy_end(inode->i_sb, copy); - if (copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) rc = -EFAULT; kfree(copy); return rc; } default: - return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg); + return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, + (void __user *)arg); } } diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 39e2ffd5f..cf619af3c 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -64,8 +64,8 @@ static struct ll_file_data *ll_file_data_get(void) { struct ll_file_data *fd; - fd = kmem_cache_alloc(ll_file_data_slab, GFP_NOFS | __GFP_ZERO); - if (fd == NULL) + fd = kmem_cache_zalloc(ll_file_data_slab, GFP_NOFS); + if (!fd) return NULL; fd->fd_write_failed = false; return fd; @@ -73,7 +73,7 @@ static struct ll_file_data *ll_file_data_get(void) static void ll_file_data_put(struct ll_file_data *fd) { - if (fd != NULL) + if (fd) kmem_cache_free(ll_file_data_slab, fd); } @@ -134,7 +134,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, int epoch_close = 1; int rc; - if (obd == NULL) { + if (!obd) { /* * XXX: in case of LMV, is this correct to access * ->exp_handle? @@ -153,7 +153,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, } ll_prepare_close(inode, op_data, och); - if (data_version != NULL) { + if (data_version) { /* Pass in data_version implies release. */ op_data->op_bias |= MDS_HSM_RELEASE; op_data->op_data_version = *data_version; @@ -166,7 +166,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, /* This close must have the epoch closed. */ LASSERT(epoch_close); /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. */ + * OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); if (rc) { CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n", @@ -179,7 +180,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, } /* DATA_MODIFIED flag was successfully sent on close, cancel data - * modification flag. */ + * modification flag. + */ if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) { struct ll_inode_info *lli = ll_i2info(inode); @@ -242,7 +244,8 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode) mutex_lock(&lli->lli_och_mutex); if (*och_usecount > 0) { /* There are still users of this handle, so skip - * freeing it. */ + * freeing it. + */ mutex_unlock(&lli->lli_och_mutex); return 0; } @@ -251,9 +254,10 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode) *och_p = NULL; mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { /* There might be a race and this handle may already - be closed. */ + * be closed. + */ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, NULL); } @@ -276,26 +280,29 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode, if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { bool lease_broken; /* Usually the lease is not released when the - * application crashed, we need to release here. */ + * application crashed, we need to release here. + */ rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken); - CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n", - PFID(&lli->lli_fid), rc, lease_broken); + CDEBUG(rc ? D_ERROR : D_INODE, + "Clean up lease " DFID " %d/%d\n", + PFID(&lli->lli_fid), rc, lease_broken); fd->fd_lease_och = NULL; } - if (fd->fd_och != NULL) { + if (fd->fd_och) { rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL); fd->fd_och = NULL; goto out; } /* Let's see if we have good enough OPEN lock on the file and if - we can skip talking to MDS */ + * we can skip talking to MDS + */ mutex_lock(&lli->lli_och_mutex); if (fd->fd_omode & FMODE_WRITE) { @@ -343,7 +350,6 @@ int ll_file_release(struct inode *inode, struct file *file) if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) { fd->fd_flags &= ~LL_FILE_RMTACL; rct_del(&sbi->ll_rct, current_pid()); @@ -355,11 +361,12 @@ int ll_file_release(struct inode *inode, struct file *file) if (!is_root_inode(inode)) ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); + LASSERT(fd); - /* The last ref on @file, maybe not the owner pid of statahead. + /* The last ref on @file, maybe not be the owner pid of statahead. * Different processes can open the same dir, "ll_opendir_key" means: - * it is me that should stop the statahead thread. */ + * it is me that should stop the statahead thread. + */ if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd && lli->lli_opendir_pid != 0) ll_stop_statahead(inode, lli->lli_opendir_key); @@ -396,16 +403,16 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm, __u32 opc = LUSTRE_OPC_ANY; int rc; - /* Usually we come here only for NFSD, and we want open lock. - But we can also get here with pre 2.6.15 patchless kernels, and in - that case that lock is also ok */ + /* Usually we come here only for NFSD, and we want open lock. */ /* We can also get here if there was cached open handle in revalidate_it * but it disappeared while we were getting from there to ll_file_open. * But this means this file was closed and immediately opened which - * makes a good candidate for using OPEN lock */ + * makes a good candidate for using OPEN lock + */ /* If lmmsize & lmm are not 0, we are just setting stripe info - * parameters. No need for the open lock */ - if (lmm == NULL && lmmsize == 0) { + * parameters. No need for the open lock + */ + if (!lmm && lmmsize == 0) { itp->it_flags |= MDS_OPEN_LOCK; if (itp->it_flags & FMODE_WRITE) opc = LUSTRE_OPC_CREATE; @@ -426,7 +433,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm, * with messages with -ESTALE errors. */ if (!it_disposition(itp, DISP_OPEN_OPEN) || - it_open_error(DISP_OPEN_OPEN, itp)) + it_open_error(DISP_OPEN_OPEN, itp)) goto out; ll_release_openhandle(inode, itp); goto out; @@ -492,7 +499,7 @@ static int ll_local_open(struct file *file, struct lookup_intent *it, LASSERT(!LUSTRE_FPRIVATE(file)); - LASSERT(fd != NULL); + LASSERT(fd); if (och) { struct ptlrpc_request *req = it->d.lustre.it_data; @@ -543,7 +550,7 @@ int ll_file_open(struct inode *inode, struct file *file) file->private_data = NULL; /* prevent ll_local_open assertion */ fd = ll_file_data_get(); - if (fd == NULL) { + if (!fd) { rc = -ENOMEM; goto out_openerr; } @@ -551,7 +558,7 @@ int ll_file_open(struct inode *inode, struct file *file) fd->fd_file = file; if (S_ISDIR(inode->i_mode)) { spin_lock(&lli->lli_sa_lock); - if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL && + if (!lli->lli_opendir_key && !lli->lli_sai && lli->lli_opendir_pid == 0) { lli->lli_opendir_key = fd; lli->lli_opendir_pid = current_pid(); @@ -568,7 +575,8 @@ int ll_file_open(struct inode *inode, struct file *file) if (!it || !it->d.lustre.it_disposition) { /* Convert f_flags into access mode. We cannot use file->f_mode, * because everything but O_ACCMODE mask was stripped from - * there */ + * there + */ if ((oit.it_flags + 1) & O_ACCMODE) oit.it_flags++; if (file->f_flags & O_TRUNC) @@ -577,17 +585,20 @@ int ll_file_open(struct inode *inode, struct file *file) /* kernel only call f_op->open in dentry_open. filp_open calls * dentry_open after call to open_namei that checks permissions. * Only nfsd_open call dentry_open directly without checking - * permissions and because of that this code below is safe. */ + * permissions and because of that this code below is safe. + */ if (oit.it_flags & (FMODE_WRITE | FMODE_READ)) oit.it_flags |= MDS_OPEN_OWNEROVERRIDE; /* We do not want O_EXCL here, presumably we opened the file - * already? XXX - NFS implications? */ + * already? XXX - NFS implications? + */ oit.it_flags &= ~O_EXCL; /* bug20584, if "it_flags" contains O_CREAT, the file will be * created if necessary, then "IT_CREAT" should be set to keep - * consistent with it */ + * consistent with it + */ if (oit.it_flags & O_CREAT) oit.it_op |= IT_CREAT; @@ -611,7 +622,8 @@ restart: if (*och_p) { /* Open handle is present */ if (it_disposition(it, DISP_OPEN_OPEN)) { /* Well, there's extra open request that we do not need, - let's close it somehow. This will decref request. */ + * let's close it somehow. This will decref request. + */ rc = it_open_error(DISP_OPEN_OPEN, it); if (rc) { mutex_unlock(&lli->lli_och_mutex); @@ -632,10 +644,11 @@ restart: LASSERT(*och_usecount == 0); if (!it->d.lustre.it_disposition) { /* We cannot just request lock handle now, new ELC code - means that one of other OPEN locks for this file - could be cancelled, and since blocking ast handler - would attempt to grab och_mutex as well, that would - result in a deadlock */ + * means that one of other OPEN locks for this file + * could be cancelled, and since blocking ast handler + * would attempt to grab och_mutex as well, that would + * result in a deadlock + */ mutex_unlock(&lli->lli_och_mutex); it->it_create_mode |= M_CHECK_STALE; rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it); @@ -655,9 +668,11 @@ restart: /* md_intent_lock() didn't get a request ref if there was an * open error, so don't do cleanup on the request here - * (bug 3430) */ + * (bug 3430) + */ /* XXX (green): Should not we bail out on any error here, not - * just open error? */ + * just open error? + */ rc = it_open_error(DISP_OPEN_OPEN, it); if (rc) goto out_och_free; @@ -672,8 +687,9 @@ restart: fd = NULL; /* Must do this outside lli_och_mutex lock to prevent deadlock where - different kind of OPEN lock for this same inode gets cancelled - by ldlm_cancel_lru */ + * different kind of OPEN lock for this same inode gets cancelled + * by ldlm_cancel_lru + */ if (!S_ISREG(inode->i_mode)) goto out_och_free; @@ -712,7 +728,8 @@ out_openerr: } static int ll_md_blocking_lease_ast(struct ldlm_lock *lock, - struct ldlm_lock_desc *desc, void *data, int flag) + struct ldlm_lock_desc *desc, + void *data, int flag) { int rc; struct lustre_handle lockh; @@ -752,7 +769,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, if (fmode != FMODE_WRITE && fmode != FMODE_READ) return ERR_PTR(-EINVAL); - if (file != NULL) { + if (file) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct obd_client_handle **och_p; @@ -764,18 +781,18 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, /* Get the openhandle of the file */ rc = -EBUSY; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { mutex_unlock(&lli->lli_och_mutex); return ERR_PTR(rc); } - if (fd->fd_och == NULL) { + if (!fd->fd_och) { if (file->f_mode & FMODE_WRITE) { - LASSERT(lli->lli_mds_write_och != NULL); + LASSERT(lli->lli_mds_write_och); och_p = &lli->lli_mds_write_och; och_usecount = &lli->lli_open_fd_write_count; } else { - LASSERT(lli->lli_mds_read_och != NULL); + LASSERT(lli->lli_mds_read_och); och_p = &lli->lli_mds_read_och; och_usecount = &lli->lli_open_fd_read_count; } @@ -790,7 +807,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, if (rc < 0) /* more than 1 opener */ return ERR_PTR(rc); - LASSERT(fd->fd_och != NULL); + LASSERT(fd->fd_och); old_handle = fd->fd_och->och_fh; } @@ -799,7 +816,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, return ERR_PTR(-ENOMEM); op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); + LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { rc = PTR_ERR(op_data); goto out; @@ -811,13 +828,14 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, it.it_flags = fmode | open_flags; it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE; rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req, - ll_md_blocking_lease_ast, + ll_md_blocking_lease_ast, /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise * it can be cancelled which may mislead applications that the lease is * broken; * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast - * doesn't deal with openhandle, so normal openhandle will be leaked. */ + * doesn't deal with openhandle, so normal openhandle will be leaked. + */ LDLM_FL_NO_LRU | LDLM_FL_EXCL); ll_finish_md_op_data(op_data); ptlrpc_req_finished(req); @@ -847,8 +865,8 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) { /* open lock must return for lease */ CERROR(DFID "lease granted but no open lock, %d/%llu.\n", - PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode, - it.d.lustre.it_lock_bits); + PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode, + it.d.lustre.it_lock_bits); rc = -EPROTO; goto out_close; } @@ -864,7 +882,7 @@ out_close: /* cancel open lock */ if (it.d.lustre.it_lock_mode != 0) { ldlm_lock_decref_and_cancel(&och->och_lease_handle, - it.d.lustre.it_lock_mode); + it.d.lustre.it_lock_mode); it.d.lustre.it_lock_mode = 0; } out_release_it: @@ -886,19 +904,19 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode, int rc; lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); cancelled = ldlm_is_cancel(lock); unlock_res_and_lock(lock); ldlm_lock_put(lock); } - CDEBUG(D_INODE, "lease for "DFID" broken? %d\n", - PFID(&ll_i2info(inode)->lli_fid), cancelled); + CDEBUG(D_INODE, "lease for " DFID " broken? %d\n", + PFID(&ll_i2info(inode)->lli_fid), cancelled); if (!cancelled) ldlm_cli_cancel(&och->och_lease_handle, 0); - if (lease_broken != NULL) + if (lease_broken) *lease_broken = cancelled; rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, @@ -914,7 +932,7 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, struct obd_info oinfo = { }; int rc; - LASSERT(lsm != NULL); + LASSERT(lsm); oinfo.oi_md = lsm; oinfo.oi_oa = obdo; @@ -933,8 +951,8 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, } set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("can't allocate ptlrpc set\n"); + if (!set) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); rc = -ENOMEM; } else { rc = obd_getattr_async(exp, &oinfo, set); @@ -986,7 +1004,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode) ll_inode_size_lock(inode); /* merge timestamps the most recently obtained from mds with - timestamps obtained from osts */ + * timestamps obtained from osts + */ LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime; LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime; LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime; @@ -1009,8 +1028,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode) if (lvb.lvb_mtime < attr->cat_mtime) lvb.lvb_mtime = attr->cat_mtime; - CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n", - PFID(&lli->lli_fid), attr->cat_size); + CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n", + PFID(&lli->lli_fid), attr->cat_size); cl_isize_write_nolock(inode, attr->cat_size); inode->i_blocks = attr->cat_blocks; @@ -1155,12 +1174,13 @@ restart: out: cl_io_fini(env, io); /* If any bit been read/written (result != 0), we just return - * short read/write instead of restart io. */ + * short read/write instead of restart io. + */ if ((result == 0 || result == -ENODATA) && io->ci_need_restart) { CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n", iot == CIT_READ ? "read" : "write", file, *ppos, count); - LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob); + LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob); goto restart; } @@ -1221,7 +1241,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) args->u.normal.via_iocb = iocb; result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE, - &iocb->ki_pos, iov_iter_count(from)); + &iocb->ki_pos, iov_iter_count(from)); cl_env_put(env, &refcheck); return result; } @@ -1260,8 +1280,8 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx) int rc = 0; struct lov_stripe_md *lsm = NULL, *lsm2; - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) return -ENOMEM; lsm = ccc_inode_lsm_get(inode); @@ -1274,7 +1294,7 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx) (lsm->lsm_stripe_count)); lsm2 = libcfs_kvzalloc(lsm_size, GFP_NOFS); - if (lsm2 == NULL) { + if (!lsm2) { rc = -ENOMEM; goto out; } @@ -1307,7 +1327,7 @@ static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg) if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg, + if (copy_from_user(&ucreat, (struct ll_recreate_obj __user *)arg, sizeof(ucreat))) return -EFAULT; @@ -1325,7 +1345,7 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg) if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid))) + if (copy_from_user(&fid, (struct lu_fid __user *)arg, sizeof(fid))) return -EFAULT; fid_to_ostid(&fid, &oi); @@ -1341,7 +1361,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, int rc = 0; lsm = ccc_inode_lsm_get(inode); - if (lsm != NULL) { + if (lsm) { ccc_inode_lsm_put(inode, lsm); CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n", inode->i_ino); @@ -1401,18 +1421,16 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); /* checked by mdc_getattr_name */ lmmsize = body->eadatasize; if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) || - lmmsize == 0) { + lmmsize == 0) { rc = -ENODATA; goto out; } lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - LASSERT(lmm != NULL); if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) && (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) { @@ -1433,7 +1451,8 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, stripe_count = 0; /* if function called for directory - we should - * avoid swab not existent lsm objects */ + * avoid swab not existent lsm objects + */ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) { lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm); if (S_ISREG(body->mode)) @@ -1457,7 +1476,7 @@ out: } static int ll_lov_setea(struct inode *inode, struct file *file, - unsigned long arg) + unsigned long arg) { int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE; struct lov_user_md *lump; @@ -1469,16 +1488,16 @@ static int ll_lov_setea(struct inode *inode, struct file *file, return -EPERM; lump = libcfs_kvzalloc(lum_size, GFP_NOFS); - if (lump == NULL) + if (!lump) return -ENOMEM; - if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) { + if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) { kvfree(lump); return -EFAULT; } rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump, - lum_size); + lum_size); cl_lov_delay_create_clear(&file->f_flags); kvfree(lump); @@ -1488,12 +1507,12 @@ static int ll_lov_setea(struct inode *inode, struct file *file, static int ll_lov_setstripe(struct inode *inode, struct file *file, unsigned long arg) { - struct lov_user_md_v3 lumv3; - struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; - struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg; - struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg; - int lum_size, rc; - int flags = FMODE_WRITE; + struct lov_user_md_v3 lumv3; + struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; + struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; + struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; + int lum_size, rc; + int flags = FMODE_WRITE; /* first try with v1 which is smaller than v3 */ lum_size = sizeof(struct lov_user_md_v1); @@ -1518,7 +1537,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file, ll_layout_refresh(inode, &gen); lsm = ccc_inode_lsm_get(inode); rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), - 0, lsm, (void *)arg); + 0, lsm, (void __user *)arg); ccc_inode_lsm_put(inode, lsm); } return rc; @@ -1530,9 +1549,9 @@ static int ll_lov_getstripe(struct inode *inode, unsigned long arg) int rc = -ENODATA; lsm = ccc_inode_lsm_get(inode); - if (lsm != NULL) + if (lsm) rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0, - lsm, (void *)arg); + lsm, (void __user *)arg); ccc_inode_lsm_put(inode, lsm); return rc; } @@ -1560,7 +1579,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg) spin_unlock(&lli->lli_lock); return -EINVAL; } - LASSERT(fd->fd_grouplock.cg_lock == NULL); + LASSERT(!fd->fd_grouplock.cg_lock); spin_unlock(&lli->lli_lock); rc = cl_get_grouplock(cl_i2info(inode)->lli_clob, @@ -1597,11 +1616,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file, CWARN("no group lock held\n"); return -EINVAL; } - LASSERT(fd->fd_grouplock.cg_lock != NULL); + LASSERT(fd->fd_grouplock.cg_lock); if (fd->fd_grouplock.cg_gid != arg) { CWARN("group lock %lu doesn't match current id %lu\n", - arg, fd->fd_grouplock.cg_gid); + arg, fd->fd_grouplock.cg_gid); spin_unlock(&lli->lli_lock); return -EINVAL; } @@ -1688,7 +1707,7 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, } lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) + if (!lsm) return -ENOENT; /* If the stripe_count > 1 and the application does not understand @@ -1782,9 +1801,10 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) int rc = 0; /* Get the extent count so we can calculate the size of - * required fiemap buffer */ + * required fiemap buffer + */ if (get_user(extent_count, - &((struct ll_user_fiemap __user *)arg)->fm_extent_count)) + &((struct ll_user_fiemap __user *)arg)->fm_extent_count)) return -EFAULT; if (extent_count >= @@ -1794,7 +1814,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) sizeof(struct ll_fiemap_extent)); fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (fiemap_s == NULL) + if (!fiemap_s) return -ENOMEM; /* get the fiemap value */ @@ -1806,11 +1826,12 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) /* If fm_extent_count is non-zero, read the first extent since * it is used to calculate end_offset and device from previous - * fiemap call. */ + * fiemap call. + */ if (extent_count) { if (copy_from_user(&fiemap_s->fm_extents[0], - (char __user *)arg + sizeof(*fiemap_s), - sizeof(struct ll_fiemap_extent))) { + (char __user *)arg + sizeof(*fiemap_s), + sizeof(struct ll_fiemap_extent))) { rc = -EFAULT; goto error; } @@ -1826,7 +1847,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) ret_bytes += (fiemap_s->fm_mapped_extents * sizeof(struct ll_fiemap_extent)); - if (copy_to_user((void *)arg, fiemap_s, ret_bytes)) + if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes)) rc = -EFAULT; error: @@ -1917,13 +1938,14 @@ int ll_hsm_release(struct inode *inode) /* Release the file. * NB: lease lock handle is released in mdc_hsm_release_pack() because - * we still need it to pack l_remote_handle to MDT. */ + * we still need it to pack l_remote_handle to MDT. + */ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, &data_version); och = NULL; out: - if (och != NULL && !IS_ERR(och)) /* close the file */ + if (och && !IS_ERR(och)) /* close the file */ ll_lease_close(och, inode, NULL); return rc; @@ -2007,7 +2029,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, } /* to be able to restore mtime and atime after swap - * we need to first save them */ + * we need to first save them + */ if (lsl->sl_flags & (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) { llss->ia1.ia_mtime = llss->inode1->i_mtime; @@ -2019,7 +2042,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, } /* ultimate check, before swapping the layouts we check if - * dataversion has changed (if requested) */ + * dataversion has changed (if requested) + */ if (llss->check_dv1) { rc = ll_data_version(llss->inode1, &dv, 0); if (rc) @@ -2042,9 +2066,11 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, /* struct md_op_data is used to send the swap args to the mdt * only flags is missing, so we use struct mdc_swap_layouts - * through the md_op_data->op_data */ + * through the md_op_data->op_data + */ /* flags from user space have to be converted before they are send to - * server, no flag is sent today, they are only used on the client */ + * server, no flag is sent today, they are only used on the client + */ msl.msl_flags = 0; rc = -ENOMEM; op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0, @@ -2113,7 +2139,8 @@ static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss) return -EINVAL; /* Non-root users are forbidden to set or clear flags which are - * NOT defined in HSM_USER_MASK. */ + * NOT defined in HSM_USER_MASK. + */ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) && !capable(CFS_CAP_SYS_ADMIN)) return -EPERM; @@ -2211,14 +2238,14 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case LL_IOC_GETFLAGS: /* Get the current value of the file flags */ - return put_user(fd->fd_flags, (int *)arg); + return put_user(fd->fd_flags, (int __user *)arg); case LL_IOC_SETFLAGS: case LL_IOC_CLRFLAGS: /* Set or clear specific file flags */ /* XXX This probably needs checks to ensure the flags are * not abused, and to handle any flag side effects. */ - if (get_user(flags, (int *) arg)) + if (get_user(flags, (int __user *)arg)) return -EFAULT; if (cmd == LL_IOC_SETFLAGS) { @@ -2242,15 +2269,15 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct file *file2; struct lustre_swap_layouts lsl; - if (copy_from_user(&lsl, (char *)arg, - sizeof(struct lustre_swap_layouts))) + if (copy_from_user(&lsl, (char __user *)arg, + sizeof(struct lustre_swap_layouts))) return -EFAULT; if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */ return -EPERM; file2 = fget(lsl.sl_fd); - if (file2 == NULL) + if (!file2) return -EBADF; rc = -EPERM; @@ -2272,13 +2299,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ll_iocontrol(inode, file, cmd, arg); case FSFILT_IOC_GETVERSION_OLD: case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int *)arg); + return put_user(inode->i_generation, (int __user *)arg); case LL_IOC_GROUP_LOCK: return ll_get_grouplock(inode, file, arg); case LL_IOC_GROUP_UNLOCK: return ll_put_grouplock(inode, file, arg); case IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void *)arg); + return ll_obd_statfs(inode, (void __user *)arg); /* We need to special case any other ioctls we want to handle, * to send them to the MDS/OST as appropriate and to properly @@ -2289,25 +2316,26 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case LL_IOC_FLUSHCTX: return ll_flush_ctx(inode); case LL_IOC_PATH2FID: { - if (copy_to_user((void *)arg, ll_inode2fid(inode), + if (copy_to_user((void __user *)arg, ll_inode2fid(inode), sizeof(struct lu_fid))) return -EFAULT; return 0; } case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void *)arg); + return ll_fid2path(inode, (void __user *)arg); case LL_IOC_DATA_VERSION: { struct ioc_data_version idv; int rc; - if (copy_from_user(&idv, (char *)arg, sizeof(idv))) + if (copy_from_user(&idv, (char __user *)arg, sizeof(idv))) return -EFAULT; rc = ll_data_version(inode, &idv.idv_version, - !(idv.idv_flags & LL_DV_NOFLUSH)); + !(idv.idv_flags & LL_DV_NOFLUSH)); - if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv))) + if (rc == 0 && copy_to_user((char __user *)arg, &idv, + sizeof(idv))) return -EFAULT; return rc; @@ -2320,7 +2348,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (mdtidx < 0) return mdtidx; - if (put_user((int)mdtidx, (int *)arg)) + if (put_user(mdtidx, (int __user *)arg)) return -EFAULT; return 0; @@ -2347,7 +2375,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), op_data, NULL); - if (copy_to_user((void *)arg, hus, sizeof(*hus))) + if (copy_to_user((void __user *)arg, hus, sizeof(*hus))) rc = -EFAULT; ll_finish_md_op_data(op_data); @@ -2358,7 +2386,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct hsm_state_set *hss; int rc; - hss = memdup_user((char *)arg, sizeof(*hss)); + hss = memdup_user((char __user *)arg, sizeof(*hss)); if (IS_ERR(hss)) return PTR_ERR(hss); @@ -2386,7 +2414,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), op_data, NULL); - if (copy_to_user((char *)arg, hca, sizeof(*hca))) + if (copy_to_user((char __user *)arg, hca, sizeof(*hca))) rc = -EFAULT; ll_finish_md_op_data(op_data); @@ -2412,13 +2440,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case F_UNLCK: mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { och = fd->fd_lease_och; fd->fd_lease_och = NULL; } mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { mode = och->och_flags & (FMODE_READ|FMODE_WRITE); rc = ll_lease_close(och, inode, &lease_broken); @@ -2443,12 +2471,12 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = 0; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och == NULL) { + if (!fd->fd_lease_och) { fd->fd_lease_och = och; och = NULL; } mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { /* impossible now that only excl is supported for now */ ll_lease_close(och, inode, &lease_broken); rc = -EBUSY; @@ -2461,11 +2489,11 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = 0; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { struct obd_client_handle *och = fd->fd_lease_och; lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (!ldlm_is_cancel(lock)) rc = och->och_flags & @@ -2480,7 +2508,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case LL_IOC_HSM_IMPORT: { struct hsm_user_import *hui; - hui = memdup_user((void *)arg, sizeof(*hui)); + hui = memdup_user((void __user *)arg, sizeof(*hui)); if (IS_ERR(hui)) return PTR_ERR(hui); @@ -2497,7 +2525,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, - (void *)arg); + (void __user *)arg); } } } @@ -2536,15 +2564,17 @@ static int ll_flush(struct file *file, fl_owner_t id) LASSERT(!S_ISDIR(inode->i_mode)); /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. */ + * failed for pages in this mapping. + */ rc = lli->lli_async_rc; lli->lli_async_rc = 0; err = lov_read_and_clear_async_rc(lli->lli_clob); if (rc == 0) rc = err; - /* The application has been told write failure already. - * Do not report failure again. */ + /* The application has been told about write failure already. + * Do not report failure again. + */ if (fd->fd_write_failed) return 0; return rc ? -EIO : 0; @@ -2612,7 +2642,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) inode_lock(inode); /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. */ + * failed for pages in this mapping. + */ if (!S_ISDIR(inode->i_mode)) { err = lli->lli_async_rc; lli->lli_async_rc = 0; @@ -2683,7 +2714,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) * I guess between lockd processes) and then compares pid. * As such we assign pid to the owner field to make it all work, * conflict with normal locks is unlikely since pid space and - * pointer space for current->files are not intersecting */ + * pointer space for current->files are not intersecting + */ if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner) flock.l_flock.owner = (unsigned long)file_lock->fl_pid; @@ -2699,7 +2731,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) * order to process an unlock request we need all of the same * information that is given with a normal read or write record * lock request. To avoid creating another ldlm unlock (cancel) - * message we'll treat a LCK_NL flock request as an unlock. */ + * message we'll treat a LCK_NL flock request as an unlock. + */ einfo.ei_mode = LCK_NL; break; case F_WRLCK: @@ -2707,7 +2740,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) break; default: CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", - file_lock->fl_type); + file_lock->fl_type); return -ENOTSUPP; } @@ -2730,7 +2763,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) #endif flags = LDLM_FL_TEST_LOCK; /* Save the old mode so that if the mode in the lock changes we - * can decrement the appropriate reader or writer refcount. */ + * can decrement the appropriate reader or writer refcount. + */ file_lock->fl_type = einfo.ei_mode; break; default: @@ -2757,7 +2791,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) if (rc2 && file_lock->fl_type != F_UNLCK) { einfo.ei_mode = LCK_NL; md_enqueue(sbi->ll_md_exp, &einfo, NULL, - op_data, &lockh, &flock, 0, NULL /* req */, flags); + op_data, &lockh, &flock, 0, NULL /* req */, flags); rc = rc2; } @@ -2782,11 +2816,12 @@ ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock) * \param l_req_mode [IN] searched lock mode * \retval boolean, true iff all bits are found */ -int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode) +int ll_have_md_lock(struct inode *inode, __u64 *bits, + enum ldlm_mode l_req_mode) { struct lustre_handle lockh; ldlm_policy_data_t policy; - ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ? + enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ? (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode; struct lu_fid *fid; __u64 flags; @@ -2822,13 +2857,13 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode) return *bits == 0; } -ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - ldlm_mode_t mode) +enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, + struct lustre_handle *lockh, __u64 flags, + enum ldlm_mode mode) { ldlm_policy_data_t policy = { .l_inodebits = {bits} }; struct lu_fid *fid; - ldlm_mode_t rc; + enum ldlm_mode rc; fid = &ll_i2info(inode)->lli_fid; CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid)); @@ -2866,8 +2901,6 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) struct obd_export *exp; int rc = 0; - LASSERT(inode != NULL); - CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n", inode->i_ino, inode->i_generation, inode, dentry); @@ -2875,7 +2908,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC. * But under CMD case, it caused some lock issues, should be fixed - * with new CMD ibits lock. See bug 12718 */ + * with new CMD ibits lock. See bug 12718 + */ if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) { struct lookup_intent oit = { .it_op = IT_GETATTR }; struct md_op_data *op_data; @@ -2893,7 +2927,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) oit.it_create_mode |= M_CHECK_STALE; rc = md_intent_lock(exp, op_data, NULL, 0, /* we are not interested in name - based lookup */ + * based lookup + */ &oit, 0, &req, ll_md_blocking_ast, 0); ll_finish_md_op_data(op_data); @@ -2910,9 +2945,10 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) } /* Unlinked? Unhash dentry, so it is not picked up later by - do_lookup() -> ll_revalidate_it(). We cannot use d_drop - here to preserve get_cwd functionality on 2.6. - Bug 10503 */ + * do_lookup() -> ll_revalidate_it(). We cannot use d_drop + * here to preserve get_cwd functionality on 2.6. + * Bug 10503 + */ if (!d_inode(dentry)->i_nlink) d_lustre_invalidate(dentry, 0); @@ -3026,26 +3062,33 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, sizeof(struct ll_fiemap_extent)); fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (fiemap == NULL) + if (!fiemap) return -ENOMEM; fiemap->fm_flags = fieinfo->fi_flags; fiemap->fm_extent_count = fieinfo->fi_extents_max; fiemap->fm_start = start; fiemap->fm_length = len; - if (extent_count > 0) - memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start, - sizeof(struct ll_fiemap_extent)); + if (extent_count > 0 && + copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start, + sizeof(struct ll_fiemap_extent)) != 0) { + rc = -EFAULT; + goto out; + } rc = ll_do_fiemap(inode, fiemap, num_bytes); fieinfo->fi_flags = fiemap->fm_flags; fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents; - if (extent_count > 0) - memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0], - fiemap->fm_mapped_extents * - sizeof(struct ll_fiemap_extent)); + if (extent_count > 0 && + copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0], + fiemap->fm_mapped_extents * + sizeof(struct ll_fiemap_extent)) != 0) { + rc = -EFAULT; + goto out; + } +out: kvfree(fiemap); return rc; } @@ -3067,13 +3110,12 @@ int ll_inode_permission(struct inode *inode, int mask) { int rc = 0; -#ifdef MAY_NOT_BLOCK if (mask & MAY_NOT_BLOCK) return -ECHILD; -#endif /* as root inode are NOT getting validated in lookup operation, - * need to do it before permission check. */ + * need to do it before permission check. + */ if (is_root_inode(inode)) { rc = __ll_inode_revalidate(inode->i_sb->s_root, @@ -3173,8 +3215,7 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd) unsigned int size; struct llioc_data *in_data = NULL; - if (cb == NULL || cmd == NULL || - count > LLIOC_MAX_CMD || count < 0) + if (!cb || !cmd || count > LLIOC_MAX_CMD || count < 0) return NULL; size = sizeof(*in_data) + count * sizeof(unsigned int); @@ -3200,7 +3241,7 @@ void ll_iocontrol_unregister(void *magic) { struct llioc_data *tmp; - if (magic == NULL) + if (!magic) return; down_write(&llioc.ioc_sem); @@ -3254,7 +3295,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) struct lu_env *env; int result; - if (lli->lli_clob == NULL) + if (!lli->lli_clob) return 0; env = cl_env_nested_get(&nest); @@ -3267,13 +3308,14 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) if (conf->coc_opc == OBJECT_CONF_SET) { struct ldlm_lock *lock = conf->coc_lock; - LASSERT(lock != NULL); + LASSERT(lock); LASSERT(ldlm_has_layout(lock)); if (result == 0) { /* it can only be allowed to match after layout is * applied to inode otherwise false layout would be * seen. Applying layout should happen before dropping - * the intent lock. */ + * the intent lock. + */ ldlm_lock_allow_match(lock); } } @@ -3296,14 +3338,15 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY), lock->l_lvb_data, lock->l_lvb_len); - if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY)) + if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY)) return 0; /* if layout lock was granted right away, the layout is returned * within DLM_LVB of dlm reply; otherwise if the lock was ever * blocked and then granted via completion ast, we have to fetch * layout here. Please note that we can't use the LVB buffer in - * completion AST because it doesn't have a large enough buffer */ + * completion AST because it doesn't have a large enough buffer + */ rc = ll_get_default_mdsize(sbi, &lmmsize); if (rc == 0) rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), @@ -3313,7 +3356,7 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) return rc; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -3325,20 +3368,20 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) } lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize); - if (lmm == NULL) { + if (!lmm) { rc = -EFAULT; goto out; } lvbdata = libcfs_kvzalloc(lmmsize, GFP_NOFS); - if (lvbdata == NULL) { + if (!lvbdata) { rc = -ENOMEM; goto out; } memcpy(lvbdata, lmm, lmmsize); lock_res_and_lock(lock); - if (lock->l_lvb_data != NULL) + if (lock->l_lvb_data) kvfree(lock->l_lvb_data); lock->l_lvb_data = lvbdata; @@ -3354,8 +3397,8 @@ out: * Apply the layout to the inode. Layout lock is held and will be released * in this function. */ -static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, - struct inode *inode, __u32 *gen, bool reconf) +static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, + struct inode *inode, __u32 *gen, bool reconf) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); @@ -3369,10 +3412,10 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, LASSERT(lustre_handle_is_used(lockh)); lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); + LASSERT(lock); LASSERT(ldlm_has_layout(lock)); - LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n", + LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d", inode, PFID(&lli->lli_fid), reconf); /* in case this is a caching lock and reinstate with new inode */ @@ -3382,12 +3425,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY); unlock_res_and_lock(lock); /* checking lvb_ready is racy but this is okay. The worst case is - * that multi processes may configure the file on the same time. */ + * that multi processes may configure the file on the same time. + */ if (lvb_ready || !reconf) { rc = -ENODATA; if (lvb_ready) { /* layout_gen must be valid if layout lock is not - * cancelled and stripe has already set */ + * cancelled and stripe has already set + */ *gen = ll_layout_version_get(lli); rc = 0; } @@ -3401,26 +3446,28 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, /* for layout lock, lmm is returned in lock's lvb. * lvb_data is immutable if the lock is held so it's safe to access it * without res lock. See the description in ldlm_lock_decref_internal() - * for the condition to free lvb_data of layout lock */ - if (lock->l_lvb_data != NULL) { + * for the condition to free lvb_data of layout lock + */ + if (lock->l_lvb_data) { rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm, lock->l_lvb_data, lock->l_lvb_len); if (rc >= 0) { *gen = LL_LAYOUT_GEN_EMPTY; - if (md.lsm != NULL) + if (md.lsm) *gen = md.lsm->lsm_layout_gen; rc = 0; } else { - CERROR("%s: file "DFID" unpackmd error: %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid), rc); + CERROR("%s: file " DFID " unpackmd error: %d\n", + ll_get_fsname(inode->i_sb, NULL, 0), + PFID(&lli->lli_fid), rc); } } if (rc < 0) goto out; /* set layout to file. Unlikely this will fail as old layout was - * surely eliminated */ + * surely eliminated + */ memset(&conf, 0, sizeof(conf)); conf.coc_opc = OBJECT_CONF_SET; conf.coc_inode = inode; @@ -3428,7 +3475,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, conf.u.coc_md = &md; rc = ll_layout_conf(inode, &conf); - if (md.lsm != NULL) + if (md.lsm) obd_free_memmd(sbi->ll_dt_exp, &md.lsm); /* refresh layout failed, need to wait */ @@ -3440,9 +3487,9 @@ out: /* wait for IO to complete if it's still being used. */ if (wait_layout) { - CDEBUG(D_INODE, "%s: %p/"DFID" wait for layout reconf.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - inode, PFID(&lli->lli_fid)); + CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n", + ll_get_fsname(inode->i_sb, NULL, 0), + inode, PFID(&lli->lli_fid)); memset(&conf, 0, sizeof(conf)); conf.coc_opc = OBJECT_CONF_WAIT; @@ -3451,8 +3498,8 @@ out: if (rc == 0) rc = -EAGAIN; - CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n", - PFID(&lli->lli_fid), rc); + CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n", + PFID(&lli->lli_fid), rc); } return rc; } @@ -3477,7 +3524,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) struct md_op_data *op_data; struct lookup_intent it; struct lustre_handle lockh; - ldlm_mode_t mode; + enum ldlm_mode mode; struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS, .ei_mode = LCK_CR, @@ -3499,7 +3546,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) again: /* mostly layout lock is caching on the local side, so try to match - * it before grabbing layout lock mutex. */ + * it before grabbing layout lock mutex. + */ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0, LCK_CR | LCK_CW | LCK_PR | LCK_PW); if (mode != 0) { /* hit cached lock */ @@ -3512,7 +3560,7 @@ again: } op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, - 0, 0, LUSTRE_OPC_ANY, NULL); + 0, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { mutex_unlock(&lli->lli_layout_mutex); return PTR_ERR(op_data); @@ -3523,14 +3571,13 @@ again: it.it_op = IT_LAYOUT; lockh.cookie = 0ULL; - LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/"DFID".\n", - ll_get_fsname(inode->i_sb, NULL, 0), inode, + LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "", + ll_get_fsname(inode->i_sb, NULL, 0), inode, PFID(&lli->lli_fid)); rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh, NULL, 0, NULL, 0); - if (it.d.lustre.it_data != NULL) - ptlrpc_req_finished(it.d.lustre.it_data); + ptlrpc_req_finished(it.d.lustre.it_data); it.d.lustre.it_data = NULL; ll_finish_md_op_data(op_data); diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c index 3f348a3aa..a55ac4dcc 100644 --- a/drivers/staging/lustre/lustre/llite/llite_close.c +++ b/drivers/staging/lustre/lustre/llite/llite_close.c @@ -52,9 +52,8 @@ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page) spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_SOM_DIRTY; - if (page != NULL && list_empty(&page->cpg_pending_linkage)) - list_add(&page->cpg_pending_linkage, - &club->cob_pending_list); + if (page && list_empty(&page->cpg_pending_linkage)) + list_add(&page->cpg_pending_linkage, &club->cob_pending_list); spin_unlock(&lli->lli_lock); } @@ -65,7 +64,7 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) int rc = 0; spin_lock(&lli->lli_lock); - if (page != NULL && !list_empty(&page->cpg_pending_linkage)) { + if (page && !list_empty(&page->cpg_pending_linkage)) { list_del_init(&page->cpg_pending_linkage); rc = 1; } @@ -76,7 +75,8 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) /** Queues DONE_WRITING if * - done writing is allowed; - * - inode has no no dirty pages; */ + * - inode has no no dirty pages; + */ void ll_queue_done_writing(struct inode *inode, unsigned long flags) { struct ll_inode_info *lli = ll_i2info(inode); @@ -106,7 +106,8 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags) * close() happen, epoch is closed as the inode is marked as * LLIF_EPOCH_PENDING. When pages are written inode should not * be inserted into the queue again, clear this flag to avoid - * it. */ + * it. + */ lli->lli_flags &= ~LLIF_DONE_WRITING; wake_up(&lcq->lcq_waitq); @@ -144,10 +145,11 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, spin_lock(&lli->lli_lock); if (!(list_empty(&club->cob_pending_list))) { if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) { - LASSERT(*och != NULL); - LASSERT(lli->lli_pending_och == NULL); + LASSERT(*och); + LASSERT(!lli->lli_pending_och); /* Inode is dirty and there is no pending write done - * request yet, DONE_WRITE is to be sent later. */ + * request yet, DONE_WRITE is to be sent later. + */ lli->lli_flags |= LLIF_EPOCH_PENDING; lli->lli_pending_och = *och; spin_unlock(&lli->lli_lock); @@ -159,7 +161,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, if (flags & LLIF_DONE_WRITING) { /* Some pages are still dirty, it is early to send * DONE_WRITE. Wait until all pages will be flushed - * and try DONE_WRITE again later. */ + * and try DONE_WRITE again later. + */ LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING)); lli->lli_flags |= LLIF_DONE_WRITING; spin_unlock(&lli->lli_lock); @@ -187,7 +190,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, } /* There is a pending DONE_WRITE -- close epoch with no - * attribute change. */ + * attribute change. + */ if (lli->lli_flags & LLIF_EPOCH_PENDING) { spin_unlock(&lli->lli_lock); goto out; @@ -215,13 +219,13 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data) struct obdo *oa; int rc; - LASSERT(op_data != NULL); + LASSERT(op_data); if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n", inode->i_ino, inode->i_generation, lli->lli_flags); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oa) { CERROR("can't allocate memory for Size-on-MDS update.\n"); return -ENOMEM; @@ -266,7 +270,7 @@ static void ll_prepare_done_writing(struct inode *inode, { ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING); /* If there is no @och, we do not do D_W yet. */ - if (*och == NULL) + if (!*och) return; ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh); @@ -289,13 +293,14 @@ static void ll_done_writing(struct inode *inode) ll_prepare_done_writing(inode, op_data, &och); /* If there is no @och, we do not do D_W yet. */ - if (och == NULL) + if (!och) goto out; rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL); if (rc == -EAGAIN) /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. */ + * OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); else if (rc) CERROR("inode %lu mdc done_writing failed: rc = %d\n", @@ -316,7 +321,7 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq) if (!list_empty(&lcq->lcq_head)) { lli = list_entry(lcq->lcq_head.next, struct ll_inode_info, - lli_close_list); + lli_close_list); list_del_init(&lli->lli_close_list); } else if (atomic_read(&lcq->lcq_stop)) lli = ERR_PTR(-EALREADY); diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 845e992ca..e3c0f1dd4 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -93,9 +93,10 @@ struct ll_remote_perm { gid_t lrp_gid; uid_t lrp_fsuid; gid_t lrp_fsgid; - int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this - is access permission with - lrp_fsuid/lrp_fsgid. */ + int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this + * is access permission with + * lrp_fsuid/lrp_fsgid. + */ }; enum lli_flags { @@ -106,7 +107,8 @@ enum lli_flags { /* DONE WRITING is allowed. */ LLIF_DONE_WRITING = (1 << 2), /* Sizeon-on-MDS attributes are changed. An attribute update needs to - * be sent to MDS. */ + * be sent to MDS. + */ LLIF_SOM_DIRTY = (1 << 3), /* File data is modified. */ LLIF_DATA_MODIFIED = (1 << 4), @@ -130,22 +132,23 @@ struct ll_inode_info { /* identifying fields for both metadata and data stacks. */ struct lu_fid lli_fid; /* Parent fid for accessing default stripe data on parent directory - * for allocating OST objects after a mknod() and later open-by-FID. */ + * for allocating OST objects after a mknod() and later open-by-FID. + */ struct lu_fid lli_pfid; - struct list_head lli_close_list; - /* open count currently used by capability only, indicate whether - * capability needs renewal */ - atomic_t lli_open_count; + struct list_head lli_close_list; + unsigned long lli_rmtperm_time; /* handle is to be sent to MDS later on done_writing and setattr. * Open handle data are needed for the recovery to reconstruct - * the inode state on the MDS. XXX: recovery is not ready yet. */ + * the inode state on the MDS. XXX: recovery is not ready yet. + */ struct obd_client_handle *lli_pending_och; /* We need all three because every inode may be opened in different - * modes */ + * modes + */ struct obd_client_handle *lli_mds_read_och; struct obd_client_handle *lli_mds_write_och; struct obd_client_handle *lli_mds_exec_och; @@ -162,7 +165,8 @@ struct ll_inode_info { spinlock_t lli_agl_lock; /* Try to make the d::member and f::member are aligned. Before using - * these members, make clear whether it is directory or not. */ + * these members, make clear whether it is directory or not. + */ union { /* for directory */ struct { @@ -173,13 +177,15 @@ struct ll_inode_info { /* since parent-child threads can share the same @file * struct, "opendir_key" is the token when dir close for * case of parent exit before child -- it is me should - * cleanup the dir readahead. */ + * cleanup the dir readahead. + */ void *d_opendir_key; struct ll_statahead_info *d_sai; /* protect statahead stuff. */ spinlock_t d_sa_lock; - /* "opendir_pid" is the token when lookup/revalid - * -- I am the owner of dir statahead. */ + /* "opendir_pid" is the token when lookup/revalidate + * -- I am the owner of dir statahead. + */ pid_t d_opendir_pid; } d; @@ -281,11 +287,8 @@ static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen) int ll_xattr_cache_destroy(struct inode *inode); -int ll_xattr_cache_get(struct inode *inode, - const char *name, - char *buffer, - size_t size, - __u64 valid); +int ll_xattr_cache_get(struct inode *inode, const char *name, + char *buffer, size_t size, __u64 valid); /* * Locking to guarantee consistency of non-atomic updates to long long i_size, @@ -305,11 +308,12 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode) } /* default to about 40meg of readahead on a given system. That much tied - * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */ -#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) + * up in 512k readahead requests serviced at 40ms each is about 1GB/s. + */ +#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT)) /* default to read-ahead full files smaller than 2MB on the second read */ -#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT)) enum ra_stat { RA_STAT_HIT = 0, @@ -344,11 +348,13 @@ struct ra_io_arg { unsigned long ria_end; /* end offset of read-ahead*/ /* If stride read pattern is detected, ria_stoff means where * stride read is started. Note: for normal read-ahead, the - * value here is meaningless, and also it will not be accessed*/ + * value here is meaningless, and also it will not be accessed + */ pgoff_t ria_stoff; /* ria_length and ria_pages are the length and pages length in the * stride I/O mode. And they will also be used to check whether - * it is stride I/O read-ahead in the read-ahead pages*/ + * it is stride I/O read-ahead in the read-ahead pages + */ unsigned long ria_length; unsigned long ria_pages; }; @@ -455,7 +461,8 @@ struct eacl_table { struct ll_sb_info { /* this protects pglist and ra_info. It isn't safe to - * grab from interrupt contexts */ + * grab from interrupt contexts + */ spinlock_t ll_lock; spinlock_t ll_pp_extent_lock; /* pp_extent entry*/ spinlock_t ll_process_lock; /* ll_rw_process_info */ @@ -468,10 +475,8 @@ struct ll_sb_info { int ll_flags; unsigned int ll_umounting:1, ll_xattr_cache_enabled:1; - struct list_head ll_conn_chain; /* per-conn chain of SBs */ struct lustre_client_ocd ll_lco; - struct list_head ll_orphan_dentry_list; /*please don't ask -p*/ struct ll_close_queue *ll_lcq; struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ @@ -502,13 +507,16 @@ struct ll_sb_info { /* metadata stat-ahead */ unsigned int ll_sa_max; /* max statahead RPCs */ atomic_t ll_sa_total; /* statahead thread started - * count */ + * count + */ atomic_t ll_sa_wrong; /* statahead thread stopped for - * low hit ratio */ + * low hit ratio + */ atomic_t ll_agl_total; /* AGL thread started count */ - dev_t ll_sdev_orig; /* save s_dev before assign for - * clustered nfs */ + dev_t ll_sdev_orig; /* save s_dev before assign for + * clustered nfs + */ struct rmtacl_ctl_table ll_rct; struct eacl_table ll_et; __kernel_fsid_t ll_fsid; @@ -619,13 +627,15 @@ struct ll_file_data { __u32 fd_flags; fmode_t fd_omode; /* openhandle if lease exists for this file. - * Borrow lli->lli_och_mutex to protect assignment */ + * Borrow lli->lli_och_mutex to protect assignment + */ struct obd_client_handle *fd_lease_och; struct obd_client_handle *fd_och; struct file *fd_file; /* Indicate whether need to report failure when close. * true: failure is known, not report again. - * false: unknown failure, should report. */ + * false: unknown failure, should report. + */ bool fd_write_failed; }; @@ -647,7 +657,7 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi) #if BITS_PER_LONG == 32 return 1; #elif defined(CONFIG_COMPAT) - return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API)); + return unlikely(in_compat_syscall() || (sbi->ll_flags & LL_SBI_32BIT_API)); #else return unlikely(sbi->ll_flags & LL_SBI_32BIT_API); #endif @@ -705,10 +715,10 @@ extern struct file_operations ll_file_operations_flock; extern struct file_operations ll_file_operations_noflock; extern const struct inode_operations ll_file_inode_operations; int ll_have_md_lock(struct inode *inode, __u64 *bits, - ldlm_mode_t l_req_mode); -ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - ldlm_mode_t mode); + enum ldlm_mode l_req_mode); +enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, + struct lustre_handle *lockh, __u64 flags, + enum ldlm_mode mode); int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); int ll_glimpse_ioctl(struct ll_sb_info *sbi, @@ -782,7 +792,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry); void ll_dirty_page_discard_warn(struct page *page, int ioret); int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, struct super_block *, struct lookup_intent *); -int ll_obd_statfs(struct inode *inode, void *arg); +int ll_obd_statfs(struct inode *inode, void __user *arg); int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize); int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize); int ll_process_config(struct lustre_cfg *lcfg); @@ -796,7 +806,7 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen); void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req); /* llite/llite_nfs.c */ -extern struct export_operations lustre_export_operations; +extern const struct export_operations lustre_export_operations; __u32 get_uuid2int(const char *name, int len); void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid); struct inode *search_inode_for_lustre(struct super_block *sb, @@ -913,7 +923,7 @@ static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) struct vvp_thread_info *info; info = lu_context_key_get(&env->le_ctx, &vvp_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -937,7 +947,7 @@ static inline struct vvp_session *vvp_env_session(const struct lu_env *env) struct vvp_session *ses; ses = lu_context_key_get(env->le_ses, &vvp_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -957,21 +967,21 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret); int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); int ll_file_mmap(struct file *file, struct vm_area_struct *vma); -void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, size_t count); +void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, + unsigned long addr, size_t count); struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, size_t count); static inline void ll_invalidate_page(struct page *vmpage) { struct address_space *mapping = vmpage->mapping; - loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; + loff_t offset = vmpage->index << PAGE_SHIFT; LASSERT(PageLocked(vmpage)); - if (mapping == NULL) + if (!mapping) return; - ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); + ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE); truncate_complete_page(mapping, vmpage); } @@ -993,7 +1003,7 @@ static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi) { struct obd_device *obd = sbi->ll_md_exp->exp_obd; - if (obd == NULL) + if (!obd) LBUG(); return &obd->u.cli; } @@ -1018,7 +1028,7 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode) { struct lu_fid *fid; - LASSERT(inode != NULL); + LASSERT(inode); fid = &ll_i2info(inode)->lli_fid; return fid; @@ -1107,39 +1117,44 @@ static inline u64 rce_ops2valid(int ops) struct ll_statahead_info { struct inode *sai_inode; atomic_t sai_refcount; /* when access this struct, hold - * refcount */ + * refcount + */ unsigned int sai_generation; /* generation for statahead */ unsigned int sai_max; /* max ahead of lookup */ __u64 sai_sent; /* stat requests sent count */ __u64 sai_replied; /* stat requests which received - * reply */ + * reply + */ __u64 sai_index; /* index of statahead entry */ __u64 sai_index_wait; /* index of entry which is the - * caller is waiting for */ + * caller is waiting for + */ __u64 sai_hit; /* hit count */ __u64 sai_miss; /* miss count: - * for "ls -al" case, it includes - * hidden dentry miss; - * for "ls -l" case, it does not - * include hidden dentry miss. - * "sai_miss_hidden" is used for - * the later case. - */ + * for "ls -al" case, it includes + * hidden dentry miss; + * for "ls -l" case, it does not + * include hidden dentry miss. + * "sai_miss_hidden" is used for + * the later case. + */ unsigned int sai_consecutive_miss; /* consecutive miss */ unsigned int sai_miss_hidden;/* "ls -al", but first dentry - * is not a hidden one */ + * is not a hidden one + */ unsigned int sai_skip_hidden;/* skipped hidden dentry count */ unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for - * hidden entries */ + * hidden entries + */ sai_agl_valid:1;/* AGL is valid for the dir */ - wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ + wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ struct ptlrpc_thread sai_thread; /* stat-ahead thread */ struct ptlrpc_thread sai_agl_thread; /* AGL thread */ - struct list_head sai_entries; /* entry list */ - struct list_head sai_entries_received; /* entries returned */ - struct list_head sai_entries_stated; /* entries stated */ - struct list_head sai_entries_agl; /* AGL entries to be sent */ - struct list_head sai_cache[LL_SA_CACHE_SIZE]; + struct list_head sai_entries; /* entry list */ + struct list_head sai_entries_received; /* entries returned */ + struct list_head sai_entries_stated; /* entries stated */ + struct list_head sai_entries_agl; /* AGL entries to be sent */ + struct list_head sai_cache[LL_SA_CACHE_SIZE]; spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE]; atomic_t sai_cache_count; /* entry count in cache */ }; @@ -1171,8 +1186,8 @@ ll_statahead_mark(struct inode *dir, struct dentry *dentry) if (lli->lli_opendir_pid != current_pid()) return; - LASSERT(ldd != NULL); - if (sai != NULL) + LASSERT(ldd); + if (sai) ldd->lld_sa_generation = sai->sai_generation; } @@ -1191,7 +1206,7 @@ d_need_statahead(struct inode *dir, struct dentry *dentryp) return -EAGAIN; /* statahead has been stopped */ - if (lli->lli_opendir_key == NULL) + if (!lli->lli_opendir_key) return -EAGAIN; ldd = ll_d2d(dentryp); @@ -1313,13 +1328,15 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, /** direct write pages */ struct ll_dio_pages { /** page array to be written. we don't support - * partial pages except the last one. */ + * partial pages except the last one. + */ struct page **ldp_pages; /* offset of each page */ loff_t *ldp_offsets; /** if ldp_offsets is NULL, it means a sequential * pages to be written, then this is the file offset - * of the * first page. */ + * of the first page. + */ loff_t ldp_start_offset; /** how many bytes are to be written. */ size_t ldp_size; @@ -1345,7 +1362,6 @@ static inline int ll_file_nolock(const struct file *file) struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct inode *inode = file_inode(file); - LASSERT(fd != NULL); return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) || (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK)); } @@ -1362,7 +1378,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, * remote MDT, where the object is, will grant * UPDATE|PERM lock. The inode will be attached to both * LOOKUP and PERM locks, so revoking either locks will - * case the dcache being cleared */ + * case the dcache being cleared + */ if (it->d.lustre.it_remote_lock_mode) { handle.cookie = it->d.lustre.it_remote_lock_handle; CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n", @@ -1383,7 +1400,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, it->d.lustre.it_lock_set = 1; } - if (bits != NULL) + if (bits) *bits = it->d.lustre.it_lock_bits; } @@ -1401,14 +1418,14 @@ static inline int d_lustre_invalid(const struct dentry *dentry) { struct ll_dentry_data *lld = ll_d2d(dentry); - return (lld == NULL) || lld->lld_invalid; + return !lld || lld->lld_invalid; } static inline void __d_lustre_invalidate(struct dentry *dentry) { struct ll_dentry_data *lld = ll_d2d(dentry); - if (lld != NULL) + if (lld) lld->lld_invalid = 1; } @@ -1442,7 +1459,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested) static inline void d_lustre_revalidate(struct dentry *dentry) { spin_lock(&dentry->d_lock); - LASSERT(ll_d2d(dentry) != NULL); + LASSERT(ll_d2d(dentry)); ll_d2d(dentry)->lld_invalid = 0; spin_unlock(&dentry->d_lock); } diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index b2fc5b378..b57a99268 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) si_meminfo(&si); pages = si.totalram - si.totalhigh; - if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) + if (pages >> (20 - PAGE_SHIFT) < 512) lru_page_max = pages / 2; else lru_page_max = (pages / 4) * 3; @@ -102,8 +102,6 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file; sbi->ll_ra_info.ra_max_read_ahead_whole_pages = SBI_DEFAULT_READAHEAD_WHOLE_MAX; - INIT_LIST_HEAD(&sbi->ll_conn_chain); - INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list); ll_generate_random_uuid(uuid); class_uuid_unparse(uuid, &sbi->ll_sb_uuid); @@ -171,7 +169,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, return -ENOMEM; } - if (llite_root != NULL) { + if (llite_root) { err = ldebugfs_register_mountpoint(llite_root, sb, dt, md); if (err < 0) CERROR("could not register mount in /lustre/llite\n"); @@ -204,7 +202,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT)) /* flag mdc connection as lightweight, only used for test - * purpose, use with care */ + * purpose, use with care + */ data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT; data->ocd_ibits_known = MDS_INODELOCK_FULL; @@ -252,10 +251,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* For mount, we only need fs info from MDT0, and also in DNE, it * can make sure the client can be mounted as long as MDT0 is - * available */ + * available + */ err = obd_statfs(NULL, sbi->ll_md_exp, osfs, - cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), - OBD_STATFS_FOR_MDT0); + cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), + OBD_STATFS_FOR_MDT0); if (err) goto out_md_fid; @@ -265,18 +265,19 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, * we can access the MDC export directly and exp_connect_flags will * be non-zero, but if accessing an upgraded 2.1 server it will * have the correct flags filled in. - * XXX: fill in the LMV exp_connect_flags from MDC(s). */ + * XXX: fill in the LMV exp_connect_flags from MDC(s). + */ valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD; if (exp_connect_flags(sbi->ll_md_exp) != 0 && valid != CLIENT_CONNECT_MDT_REQD) { char *buf; - buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto out_md_fid; } - obd_connect_flags2str(buf, PAGE_CACHE_SIZE, + obd_connect_flags2str(buf, PAGE_SIZE, valid ^ CLIENT_CONNECT_MDT_REQD, ","); LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", sbi->ll_md_exp->exp_obd->obd_name, buf); @@ -308,15 +309,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, } if (data->ocd_connect_flags & OBD_CONNECT_ACL) { -#ifdef MS_POSIXACL sb->s_flags |= MS_POSIXACL; -#endif sbi->ll_flags |= LL_SBI_ACL; } else { LCONSOLE_INFO("client wants to enable acl, but mdt not!\n"); -#ifdef MS_POSIXACL sb->s_flags &= ~MS_POSIXACL; -#endif sbi->ll_flags &= ~LL_SBI_ACL; } @@ -338,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) sbi->ll_md_brw_size = data->ocd_brw_size; else - sbi->ll_md_brw_size = PAGE_CACHE_SIZE; + sbi->ll_md_brw_size = PAGE_SIZE; if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { LCONSOLE_INFO("Layout lock feature supported.\n"); @@ -382,7 +379,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* OBD_CONNECT_CKSUM should always be set, even if checksums are * disabled by default, because it can still be enabled on the * fly via /sys. As a consequence, we still need to come to an - * agreement on the supported algorithms at connect time */ + * agreement on the supported algorithms at connect time + */ data->ocd_connect_flags |= OBD_CONNECT_CKSUM; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY)) @@ -453,7 +451,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, #endif /* make root inode - * XXX: move this to after cbd setup? */ + * XXX: move this to after cbd setup? + */ valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS; if (sbi->ll_flags & LL_SBI_RMT_CLIENT) valid |= OBD_MD_FLRMTPERM; @@ -493,7 +492,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, md_free_lustre_md(sbi->ll_md_exp, &lmd); ptlrpc_req_finished(request); - if (root == NULL || IS_ERR(root)) { + if (!(root)) { if (lmd.lsm) obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm); #ifdef CONFIG_FS_POSIX_ACL @@ -502,8 +501,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, lmd.posix_acl = NULL; } #endif - err = IS_ERR(root) ? PTR_ERR(root) : -EBADF; - root = NULL; + err = -EBADF; CERROR("lustre_lite: bad iget4 for root\n"); goto out_root; } @@ -532,9 +530,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, &sbi->ll_cache, NULL); sb->s_root = d_make_root(root); - if (sb->s_root == NULL) { + if (!sb->s_root) { CERROR("%s: can't make root dentry\n", - ll_get_fsname(sb, NULL, 0)); + ll_get_fsname(sb, NULL, 0)); err = -ENOMEM; goto out_lock_cn_cb; } @@ -543,11 +541,13 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* We set sb->s_dev equal on all lustre clients in order to support * NFS export clustering. NFSD requires that the FSID be the same - * on all clients. */ + * on all clients. + */ /* s_dev is also used in lt_compare() to compare two fs, but that is - * only a node-local comparison. */ + * only a node-local comparison. + */ uuid = obd_get_uuid(sbi->ll_md_exp); - if (uuid != NULL) { + if (uuid) { sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid)); get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid); } @@ -597,7 +597,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize) size = sizeof(int); rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE), - KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); + KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); if (rc) CERROR("Get default mdsize error rc %d\n", rc); @@ -619,13 +619,12 @@ static void client_common_put_super(struct super_block *sb) cl_sb_fini(sb); - list_del(&sbi->ll_conn_chain); - obd_fid_fini(sbi->ll_dt_exp->exp_obd); obd_disconnect(sbi->ll_dt_exp); sbi->ll_dt_exp = NULL; /* wait till all OSCs are gone, since cl_cache is accessing sbi. - * see LU-2543. */ + * see LU-2543. + */ obd_zombie_barrier(); ldebugfs_unregister_mountpoint(sbi); @@ -646,7 +645,8 @@ void ll_kill_super(struct super_block *sb) sbi = ll_s2sbi(sb); /* we need to restore s_dev from changed for clustered NFS before * put_super because new kernels have cached s_dev and change sb->s_dev - * in put_super not affected real removing devices */ + * in put_super not affected real removing devices + */ if (sbi) { sb->s_dev = sbi->ll_sdev_orig; sbi->ll_umounting = 1; @@ -777,7 +777,7 @@ static int ll_options(char *options, int *flags) next: /* Find next opt */ s2 = strchr(s1, ','); - if (s2 == NULL) + if (!s2) break; s1 = s2 + 1; } @@ -797,7 +797,6 @@ void ll_lli_init(struct ll_inode_info *lli) /* Do not set lli_fid, it has been initialized already. */ fid_zero(&lli->lli_pfid); INIT_LIST_HEAD(&lli->lli_close_list); - atomic_set(&lli->lli_open_count, 0); lli->lli_rmtperm_time = 0; lli->lli_pending_och = NULL; lli->lli_mds_read_och = NULL; @@ -890,8 +889,9 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) sb->s_d_op = &ll_d_ops; /* Generate a string unique to this super, in case some joker tries - to mount the same fs at two mount points. - Use the address of the super itself.*/ + * to mount the same fs at two mount points. + * Use the address of the super itself. + */ cfg->cfg_instance = sb; cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid; cfg->cfg_callback = class_config_llog_handler; @@ -904,7 +904,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */ lprof = class_get_profile(profilenm); - if (lprof == NULL) { + if (!lprof) { LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n", profilenm); err = -EINVAL; @@ -964,7 +964,8 @@ void ll_put_super(struct super_block *sb) } /* We need to set force before the lov_disconnect in - lustre_common_put_super, since l_d cleans up osc's as well. */ + * lustre_common_put_super, since l_d cleans up osc's as well. + */ if (force) { next = 0; while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, @@ -1036,8 +1037,8 @@ void ll_clear_inode(struct inode *inode) if (S_ISDIR(inode->i_mode)) { /* these should have been cleared in ll_file_release */ - LASSERT(lli->lli_opendir_key == NULL); - LASSERT(lli->lli_sai == NULL); + LASSERT(!lli->lli_opendir_key); + LASSERT(!lli->lli_sai); LASSERT(lli->lli_opendir_pid == 0); } @@ -1065,7 +1066,7 @@ void ll_clear_inode(struct inode *inode) ll_xattr_cache_destroy(inode); if (sbi->ll_flags & LL_SBI_RMT_CLIENT) { - LASSERT(lli->lli_posix_acl == NULL); + LASSERT(!lli->lli_posix_acl); if (lli->lli_remote_perms) { free_rmtperm_hash(lli->lli_remote_perms); lli->lli_remote_perms = NULL; @@ -1074,7 +1075,7 @@ void ll_clear_inode(struct inode *inode) #ifdef CONFIG_FS_POSIX_ACL else if (lli->lli_posix_acl) { LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1); - LASSERT(lli->lli_remote_perms == NULL); + LASSERT(!lli->lli_remote_perms); posix_acl_release(lli->lli_posix_acl); lli->lli_posix_acl = NULL; } @@ -1095,7 +1096,7 @@ void ll_clear_inode(struct inode *inode) #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, - struct md_open_data **mod) + struct md_open_data **mod) { struct lustre_md md; struct inode *inode = d_inode(dentry); @@ -1115,7 +1116,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, if (rc == -ENOENT) { clear_nlink(inode); /* Unlinked special device node? Or just a race? - * Pretend we done everything. */ + * Pretend we did everything. + */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) { ia_valid = op_data->op_attr.ia_valid; @@ -1138,7 +1140,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, ia_valid = op_data->op_attr.ia_valid; /* inode size will be in cl_setattr_ost, can't do it now since dirty - * cache is not cleared yet. */ + * cache is not cleared yet. + */ op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE); rc = simple_setattr(dentry, &op_data->op_attr); op_data->op_attr.ia_valid = ia_valid; @@ -1161,7 +1164,6 @@ static int ll_setattr_done_writing(struct inode *inode, struct ll_inode_info *lli = ll_i2info(inode); int rc = 0; - LASSERT(op_data != NULL); if (!S_ISREG(inode->i_mode)) return 0; @@ -1175,7 +1177,8 @@ static int ll_setattr_done_writing(struct inode *inode, rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod); if (rc == -EAGAIN) /* MDS has instructed us to obtain Size-on-MDS attribute - * from OSTs and send setattr to back to MDS. */ + * from OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); else if (rc) CERROR("inode %lu mdc truncate failed: rc = %d\n", @@ -1208,11 +1211,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) int rc = 0, rc1 = 0; CDEBUG(D_VFSTRACE, - "%s: setattr inode %p/fid:"DFID - " from %llu to %llu, valid %x, hsm_import %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), inode, - PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size, - attr->ia_valid, hsm_import); + "%s: setattr inode %p/fid:" DFID + " from %llu to %llu, valid %x, hsm_import %d\n", + ll_get_fsname(inode->i_sb, NULL, 0), inode, + PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size, + attr->ia_valid, hsm_import); if (attr->ia_valid & ATTR_SIZE) { /* Check new size against VFS/VM file size limit and rlimit */ @@ -1222,7 +1225,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) /* The maximum Lustre file size is variable, based on the * OST maximum object size and number of stripes. This - * needs another check in addition to the VFS check above. */ + * needs another check in addition to the VFS check above. + */ if (attr->ia_size > ll_file_maxbytes(inode)) { CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n", PFID(&lli->lli_fid), attr->ia_size, @@ -1270,7 +1274,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) } /* We always do an MDS RPC, even if we're only changing the size; - * only the MDS knows whether truncate() should fail with -ETXTBUSY */ + * only the MDS knows whether truncate() should fail with -ETXTBUSY + */ op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) @@ -1304,7 +1309,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) /* if not in HSM import mode, clear size attr for released file * we clear the attribute send to MDT in op_data, not the original * received from caller in attr which is used later to - * decide return code */ + * decide return code + */ if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import) op_data->op_attr.ia_valid &= ~ATTR_SIZE; @@ -1322,7 +1328,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) } /* RPC to MDT is sent, cancel data modification flag */ - if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) { + if (op_data->op_bias & MDS_DATA_MODIFIED) { spin_lock(&lli->lli_lock); lli->lli_flags &= ~LLIF_DATA_MODIFIED; spin_unlock(&lli->lli_lock); @@ -1342,7 +1348,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) * extent lock (new_size:EOF for truncate). It may seem * excessive to send mtime/atime updates to OSTs when not * setting times to past, but it is necessary due to possible - * time de-synchronization between MDT inode and OST objects */ + * time de-synchronization between MDT inode and OST objects + */ if (attr->ia_valid & ATTR_SIZE) down_write(&lli->lli_trunc_sem); rc = cl_setattr_ost(inode, attr); @@ -1470,7 +1477,8 @@ int ll_statfs(struct dentry *de, struct kstatfs *sfs) /* We need to downshift for all 32-bit kernels, because we can't * tell if the kernel is being called via sys_statfs64() or not. * Stop before overflowing f_bsize - in which case it is better - * to just risk EOVERFLOW if caller is using old sys_statfs(). */ + * to just risk EOVERFLOW if caller is using old sys_statfs(). + */ if (sizeof(long) < 8) { while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) { sfs->f_bsize <<= 1; @@ -1514,7 +1522,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) struct ll_sb_info *sbi = ll_i2sbi(inode); LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0)); - if (lsm != NULL) { + if (lsm) { if (!lli->lli_has_smd && !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK)) cl_file_inode_init(inode, md); @@ -1599,12 +1607,13 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) if (exp_connect_som(ll_i2mdexp(inode)) && S_ISREG(inode->i_mode)) { struct lustre_handle lockh; - ldlm_mode_t mode; + enum ldlm_mode mode; /* As it is possible a blocking ast has been processed * by this time, we need to check there is an UPDATE * lock on the client and set LLIF_MDS_SIZE_LOCK holding - * it. */ + * it. + */ mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE, &lockh, LDLM_FL_CBPENDING, LCK_CR | LCK_CW | @@ -1617,7 +1626,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) inode->i_ino, lli->lli_flags); } else { /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 */ + * deadlock bz14138 & bz14326 + */ i_size_write(inode, body->size); spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_MDS_SIZE_LOCK; @@ -1627,7 +1637,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) } } else { /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 */ + * deadlock bz14138 & bz14326 + */ i_size_write(inode, body->size); CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n", @@ -1657,7 +1668,8 @@ void ll_read_inode2(struct inode *inode, void *opaque) /* Core attributes from the MDS first. This is a new inode, and * the VFS doesn't zero times in the core inode so we have to do * it ourselves. They will be overwritten by either MDS or OST - * attributes - we just need to make sure they aren't newer. */ + * attributes - we just need to make sure they aren't newer. + */ LTIME_S(inode->i_mtime) = 0; LTIME_S(inode->i_atime) = 0; LTIME_S(inode->i_ctime) = 0; @@ -1689,9 +1701,10 @@ void ll_delete_inode(struct inode *inode) { struct cl_inode_info *lli = cl_i2info(inode); - if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) + if (S_ISREG(inode->i_mode) && lli->lli_clob) /* discard all dirty pages before truncating them, required by - * osc_extent implementation at LU-1030. */ + * osc_extent implementation at LU-1030. + */ cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_DISCARD, 1); @@ -1744,14 +1757,14 @@ int ll_iocontrol(struct inode *inode, struct file *file, ptlrpc_req_finished(req); - return put_user(flags, (int *)arg); + return put_user(flags, (int __user *)arg); } case FSFILT_IOC_SETFLAGS: { struct lov_stripe_md *lsm; struct obd_info oinfo = { }; struct md_op_data *op_data; - if (get_user(flags, (int *)arg)) + if (get_user(flags, (int __user *)arg)) return -EFAULT; op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, @@ -1776,8 +1789,7 @@ int ll_iocontrol(struct inode *inode, struct file *file, return 0; } - oinfo.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); + oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oinfo.oi_oa) { ccc_inode_lsm_put(inode, lsm); return -ENOMEM; @@ -1809,7 +1821,7 @@ int ll_flush_ctx(struct inode *inode) struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_SEC, "flush context for user %d\n", - from_kuid(&init_user_ns, current_uid())); + from_kuid(&init_user_ns, current_uid())); obd_set_info_async(NULL, sbi->ll_md_exp, sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX, @@ -1831,7 +1843,7 @@ void ll_umount_begin(struct super_block *sb) sb->s_count, atomic_read(&sb->s_active)); obd = class_exp2obd(sbi->ll_md_exp); - if (obd == NULL) { + if (!obd) { CERROR("Invalid MDC connection handle %#llx\n", sbi->ll_md_exp->exp_handle.h_cookie); return; @@ -1839,7 +1851,7 @@ void ll_umount_begin(struct super_block *sb) obd->obd_force = 1; obd = class_exp2obd(sbi->ll_dt_exp); - if (obd == NULL) { + if (!obd) { CERROR("Invalid LOV connection handle %#llx\n", sbi->ll_dt_exp->exp_handle.h_cookie); return; @@ -1920,13 +1932,8 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) { - CWARN("%s: cannot allocate op_data to release open handle for " - DFID "\n", - ll_get_fsname(sb, NULL, 0), PFID(&body->fid1)); - + if (!op_data) return; - } op_data->op_fid1 = body->fid1; op_data->op_ioepoch = body->ioepoch; @@ -1941,7 +1948,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, struct super_block *sb, struct lookup_intent *it) { struct ll_sb_info *sbi = NULL; - struct lustre_md md; + struct lustre_md md = { NULL }; int rc; LASSERT(*inode || sb); @@ -1954,7 +1961,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, if (*inode) { ll_update_inode(*inode, &md); } else { - LASSERT(sb != NULL); + LASSERT(sb); /* * At this point server returns to client's same fid as client @@ -1965,15 +1972,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1, sbi->ll_flags & LL_SBI_32BIT_API), &md); - if (*inode == NULL || IS_ERR(*inode)) { + if (!*inode) { #ifdef CONFIG_FS_POSIX_ACL if (md.posix_acl) { posix_acl_release(md.posix_acl); md.posix_acl = NULL; } #endif - rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM; - *inode = NULL; + rc = -ENOMEM; CERROR("new_inode -fatal: rc %d\n", rc); goto out; } @@ -1986,14 +1992,15 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, * 1. proc1: mdt returns a lsm but not granting layout * 2. layout was changed by another client * 3. proc2: refresh layout and layout lock granted - * 4. proc1: to apply a stale layout */ - if (it != NULL && it->d.lustre.it_lock_mode != 0) { + * 4. proc1: to apply a stale layout + */ + if (it && it->d.lustre.it_lock_mode != 0) { struct lustre_handle lockh; struct ldlm_lock *lock; lockh.cookie = it->d.lustre.it_lock_handle; lock = ldlm_handle2lock(&lockh); - LASSERT(lock != NULL); + LASSERT(lock); if (ldlm_has_layout(lock)) { struct cl_object_conf conf; @@ -2008,7 +2015,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, } out: - if (md.lsm != NULL) + if (md.lsm) obd_free_memmd(sbi->ll_dt_exp, &md.lsm); md_free_lustre_md(sbi->ll_md_exp, &md); @@ -2019,14 +2026,13 @@ cleanup: return rc; } -int ll_obd_statfs(struct inode *inode, void *arg) +int ll_obd_statfs(struct inode *inode, void __user *arg) { struct ll_sb_info *sbi = NULL; struct obd_export *exp; char *buf = NULL; struct obd_ioctl_data *data = NULL; __u32 type; - __u32 flags; int len = 0, rc; if (!inode) { @@ -2069,8 +2075,7 @@ int ll_obd_statfs(struct inode *inode, void *arg) goto out_statfs; } - flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0; - rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags); + rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL); if (rc) goto out_statfs; out_statfs: @@ -2101,7 +2106,8 @@ int ll_process_config(struct lustre_cfg *lcfg) LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC); /* Note we have not called client_common_fill_super yet, so - proc fns must be able to handle that! */ + * proc fns must be able to handle that! + */ rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars, lcfg, sb); if (rc > 0) @@ -2111,19 +2117,17 @@ int ll_process_config(struct lustre_cfg *lcfg) /* this function prepares md_op_data hint for passing ot down to MD stack. */ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, - struct inode *i1, struct inode *i2, - const char *name, int namelen, - int mode, __u32 opc, void *data) + struct inode *i1, struct inode *i2, + const char *name, int namelen, + int mode, __u32 opc, void *data) { - LASSERT(i1 != NULL); - if (namelen > ll_i2sbi(i1)->ll_namelen) return ERR_PTR(-ENAMETOOLONG); - if (op_data == NULL) + if (!op_data) op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (op_data == NULL) + if (!op_data) return ERR_PTR(-ENOMEM); ll_i2gids(op_data->op_suppgids, i1, i2); @@ -2143,8 +2147,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, op_data->op_cap = cfs_curproc_cap_pack(); op_data->op_bias = 0; op_data->op_cli_flags = 0; - if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) && - filename_is_volatile(name, namelen, NULL)) + if ((opc == LUSTRE_OPC_CREATE) && name && + filename_is_volatile(name, namelen, NULL)) op_data->op_bias |= MDS_CREATE_VOLATILE; op_data->op_opc = opc; op_data->op_mds = 0; @@ -2152,7 +2156,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, /* If the file is being opened after mknod() (normally due to NFS) * try to use the default stripe data from parent directory for - * allocating OST objects. Try to pass the parent FID to MDS. */ + * allocating OST objects. Try to pass the parent FID to MDS. + */ if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) && !ll_i2info(i2)->lli_has_smd) { struct ll_inode_info *lli = ll_i2info(i2); @@ -2179,7 +2184,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry) { struct ll_sb_info *sbi; - LASSERT((seq != NULL) && (dentry != NULL)); + LASSERT(seq && dentry); sbi = ll_s2sbi(dentry->d_sb); if (sbi->ll_flags & LL_SBI_NOLCK) @@ -2221,8 +2226,8 @@ int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg) if (!obd) return -ENOENT; - if (copy_to_user((void *)arg, obd->obd_name, - strlen(obd->obd_name) + 1)) + if (copy_to_user((void __user *)arg, obd->obd_name, + strlen(obd->obd_name) + 1)) return -EFAULT; return 0; @@ -2240,10 +2245,11 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen) char *ptr; int len; - if (buf == NULL) { + if (!buf) { /* this means the caller wants to use static buffer * and it doesn't care about race. Usually this is - * in error reporting path */ + * in error reporting path + */ buf = fsname_static; buflen = sizeof(fsname_static); } @@ -2269,9 +2275,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret) /* this can be called inside spin lock so use GFP_ATOMIC. */ buf = (char *)__get_free_page(GFP_ATOMIC); - if (buf != NULL) { + if (buf) { dentry = d_find_alias(page->mapping->host); - if (dentry != NULL) + if (dentry) path = dentry_path_raw(dentry, buf, PAGE_SIZE); } @@ -2282,9 +2288,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret) PFID(&obj->cob_header.coh_lu.loh_fid), (path && !IS_ERR(path)) ? path : "", ioret); - if (dentry != NULL) + if (dentry) dput(dentry); - if (buf != NULL) + if (buf) free_page((unsigned long)buf); } diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index bbae95c9f..5b484e62f 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -54,11 +54,11 @@ static const struct vm_operations_struct ll_file_vm_ops; void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, - size_t count) + struct vm_area_struct *vma, unsigned long addr, + size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << PAGE_CACHE_SHIFT); + (vma->vm_pgoff << PAGE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~CFS_PAGE_MASK; } @@ -72,7 +72,7 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, LASSERT(!down_write_trylock(&mm->mmap_sem)); for (vma = find_vma(mm, addr); - vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { + vma && vma->vm_start < (addr + count); vma = vma->vm_next) { if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && vma->vm_flags & VM_SHARED) { ret = vma; @@ -119,13 +119,13 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, */ env = cl_env_nested_get(nest); if (IS_ERR(env)) - return ERR_PTR(-EINVAL); + return ERR_PTR(-EINVAL); *env_ret = env; io = ccc_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; - LASSERT(io->ci_obj != NULL); + LASSERT(io->ci_obj); fio = &io->u.ci_fault; fio->ft_index = index; @@ -136,7 +136,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, * the kernel will not read other pages not covered by ldlm in * filemap_nopage. we do our readahead in ll_readpage. */ - if (ra_flags != NULL) + if (ra_flags) *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); vma->vm_flags &= ~VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; @@ -151,8 +151,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, LASSERT(cio->cui_cl.cis_io == io); - /* mmap lock must be MANDATORY it has to cache - * pages. */ + /* mmap lock must be MANDATORY it has to cache pages. */ io->ci_lockreq = CILR_MANDATORY; cio->cui_fd = fd; } else { @@ -178,8 +177,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct inode *inode; struct ll_inode_info *lli; - LASSERT(vmpage != NULL); - io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); if (IS_ERR(io)) { result = PTR_ERR(io); @@ -201,7 +198,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, /* we grab lli_trunc_sem to exclude truncate case. * Otherwise, we could add dirty pages into osc cache - * while truncate is on-going. */ + * while truncate is on-going. + */ inode = ccc_object_inode(io->ci_obj); lli = ll_i2info(inode); down_read(&lli->lli_trunc_sem); @@ -217,12 +215,13 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct ll_inode_info *lli = ll_i2info(inode); lock_page(vmpage); - if (vmpage->mapping == NULL) { + if (!vmpage->mapping) { unlock_page(vmpage); /* page was truncated and lock was cancelled, return * ENODATA so that VM_FAULT_NOPAGE will be returned - * to handle_mm_fault(). */ + * to handle_mm_fault(). + */ if (result == 0) result = -ENODATA; } else if (!PageDirty(vmpage)) { @@ -315,13 +314,14 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) result = cl_io_loop(env, io); /* ft_flags are only valid if we reached - * the call to filemap_fault */ + * the call to filemap_fault + */ if (vio->u.fault.fault.ft_flags_valid) fault_ret = vio->u.fault.fault.ft_flags; vmpage = vio->u.fault.ft_vmpage; - if (result != 0 && vmpage != NULL) { - page_cache_release(vmpage); + if (result != 0 && vmpage) { + put_page(vmpage); vmf->page = NULL; } } @@ -344,9 +344,10 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int result; sigset_t set; - /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite + /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite * so that it can be killed by admin but not cause segfault by - * other signals. */ + * other signals. + */ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); restart: @@ -357,9 +358,9 @@ restart: /* check if this page has been truncated */ lock_page(vmpage); - if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ + if (unlikely(!vmpage->mapping)) { /* unlucky */ unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); vmf->page = NULL; if (!printed && ++count > 16) { @@ -447,7 +448,8 @@ static void ll_vm_close(struct vm_area_struct *vma) } /* XXX put nice comment here. talk about __free_pte -> dirty pages and - * nopage's reference passing to the pte */ + * nopage's reference passing to the pte + */ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) { int rc = -ENOENT; @@ -455,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last %llu first %llu\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, + unmap_mapping_range(mapping, first + PAGE_SIZE - 1, last - first + 1, 0); } diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c index 18aab25f9..193aab879 100644 --- a/drivers/staging/lustre/lustre/llite/llite_nfs.c +++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c @@ -105,7 +105,8 @@ struct inode *search_inode_for_lustre(struct super_block *sb, return ERR_PTR(rc); /* Because inode is NULL, ll_prep_md_op_data can not - * be used here. So we allocate op_data ourselves */ + * be used here. So we allocate op_data ourselves + */ op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) return ERR_PTR(-ENOMEM); @@ -141,10 +142,11 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren struct inode *inode; struct dentry *result; - CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid)); if (!fid_is_sane(fid)) return ERR_PTR(-ESTALE); + CDEBUG(D_INFO, "Get dentry for fid: " DFID "\n", PFID(fid)); + inode = search_inode_for_lustre(sb, fid); if (IS_ERR(inode)) return ERR_CAST(inode); @@ -160,7 +162,7 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren * We have to find the parent to tell MDS how to init lov objects. */ if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd && - parent != NULL) { + parent && !fid_is_zero(parent)) { struct ll_inode_info *lli = ll_i2info(inode); spin_lock(&lli->lli_lock); @@ -174,8 +176,6 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren return result; } -#define LUSTRE_NFS_FID 0x97 - /** * \a connectable - is nfsd will connect himself or this should be done * at lustre @@ -188,20 +188,25 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen, struct inode *parent) { + int fileid_len = sizeof(struct lustre_nfs_fid) / 4; struct lustre_nfs_fid *nfs_fid = (void *)fh; - CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n", - inode->i_ino, PFID(ll_inode2fid(inode)), *plen, - (int)sizeof(struct lustre_nfs_fid)); + CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n", + inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len); - if (*plen < sizeof(struct lustre_nfs_fid) / 4) - return 255; + if (*plen < fileid_len) { + *plen = fileid_len; + return FILEID_INVALID; + } nfs_fid->lnf_child = *ll_inode2fid(inode); - nfs_fid->lnf_parent = *ll_inode2fid(parent); - *plen = sizeof(struct lustre_nfs_fid) / 4; + if (parent) + nfs_fid->lnf_parent = *ll_inode2fid(parent); + else + fid_zero(&nfs_fid->lnf_parent); + *plen = fileid_len; - return LUSTRE_NFS_FID; + return FILEID_LUSTRE; } static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, @@ -209,7 +214,8 @@ static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, unsigned type) { /* It is hack to access lde_fid for comparison with lgd_fid. - * So the input 'name' must be part of the 'lu_dirent'. */ + * So the input 'name' must be part of the 'lu_dirent'. + */ struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name); struct ll_getname_data *lgd = container_of(ctx, struct ll_getname_data, ctx); @@ -259,7 +265,7 @@ static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid, { struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - if (fh_type != LUSTRE_NFS_FID) + if (fh_type != FILEID_LUSTRE) return ERR_PTR(-EPROTO); return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent); @@ -270,7 +276,7 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid, { struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - if (fh_type != LUSTRE_NFS_FID) + if (fh_type != FILEID_LUSTRE) return ERR_PTR(-EPROTO); return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL); @@ -292,8 +298,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild) sbi = ll_s2sbi(dir->i_sb); - CDEBUG(D_INFO, "getting parent for (%lu,"DFID")\n", - dir->i_ino, PFID(ll_inode2fid(dir))); + CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n", + dir->i_ino, PFID(ll_inode2fid(dir))); rc = ll_get_default_mdsize(sbi, &lmmsize); if (rc != 0) @@ -314,8 +320,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild) body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); LASSERT(body->valid & OBD_MD_FLID); - CDEBUG(D_INFO, "parent for "DFID" is "DFID"\n", - PFID(ll_inode2fid(dir)), PFID(&body->fid1)); + CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n", + PFID(ll_inode2fid(dir)), PFID(&body->fid1)); result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL); @@ -323,10 +329,10 @@ static struct dentry *ll_get_parent(struct dentry *dchild) return result; } -struct export_operations lustre_export_operations = { - .get_parent = ll_get_parent, - .encode_fh = ll_encode_fh, - .get_name = ll_get_name, +const struct export_operations lustre_export_operations = { + .get_parent = ll_get_parent, + .encode_fh = ll_encode_fh, + .get_name = ll_get_name, .fh_to_dentry = ll_fh_to_dentry, .fh_to_parent = ll_fh_to_parent, }; diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c index b27c3f2fc..8509b07cb 100644 --- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c +++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c @@ -98,7 +98,7 @@ static void rce_free(struct rmtacl_ctl_entry *rce) } static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct, - pid_t key) + pid_t key) { struct rmtacl_ctl_entry *rce; struct list_head *head = &rct->rct_entries[rce_hashfunc(key)]; @@ -125,12 +125,12 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops) struct rmtacl_ctl_entry *rce, *e; rce = rce_alloc(key, ops); - if (rce == NULL) + if (!rce) return -ENOMEM; spin_lock(&rct->rct_lock); e = __rct_search(rct, key); - if (unlikely(e != NULL)) { + if (unlikely(e)) { CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n", (int)key, ops); rce_free(e); @@ -172,7 +172,7 @@ void rct_fini(struct rmtacl_ctl_table *rct) for (i = 0; i < RCE_HASHES; i++) while (!list_empty(&rct->rct_entries[i])) { rce = list_entry(rct->rct_entries[i].next, - struct rmtacl_ctl_entry, rce_list); + struct rmtacl_ctl_entry, rce_list); rce_free(rce); } spin_unlock(&rct->rct_lock); @@ -208,12 +208,12 @@ void ee_free(struct eacl_entry *ee) } static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key, - struct lu_fid *fid, int type) + struct lu_fid *fid, int type) { struct eacl_entry *ee; struct list_head *head = &et->et_entries[ee_hashfunc(key)]; - LASSERT(fid != NULL); + LASSERT(fid); list_for_each_entry(ee, head, ee_list) if (ee->ee_key == key) { if (lu_fid_eq(&ee->ee_fid, fid) && @@ -256,12 +256,12 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type, struct eacl_entry *ee, *e; ee = ee_alloc(key, fid, type, header); - if (ee == NULL) + if (!ee) return -ENOMEM; spin_lock(&et->et_lock); e = __et_search_del(et, key, fid, type); - if (unlikely(e != NULL)) { + if (unlikely(e)) { CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n", (int)key, PFID(fid), type); ee_free(e); @@ -290,7 +290,7 @@ void et_fini(struct eacl_table *et) for (i = 0; i < EE_HASHES; i++) while (!list_empty(&et->et_entries[i])) { ee = list_entry(et->et_entries[i].next, - struct eacl_entry, ee_list); + struct eacl_entry, ee_list); ee_free(ee); } spin_unlock(&et->et_lock); diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index 871924b3f..f169c0db6 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c @@ -211,15 +211,14 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) return io->ci_result; io->ci_lockreq = CILR_NEVER; - LASSERT(head != NULL); rw = head->bi_rw; - for (bio = head; bio != NULL; bio = bio->bi_next) { + for (bio = head; bio ; bio = bio->bi_next) { LASSERT(rw == bio->bi_rw); offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; bio_for_each_segment(bvec, bio, iter) { BUG_ON(bvec.bv_offset != 0); - BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); + BUG_ON(bvec.bv_len != PAGE_SIZE); pages[page_count] = bvec.bv_page; offsets[page_count] = offset; @@ -233,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, page_count); - pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; + pvec->ldp_size = page_count << PAGE_SHIFT; pvec->ldp_nr = page_count; /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to @@ -297,7 +296,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) spin_lock_irq(&lo->lo_lock); first = lo->lo_bio; - if (unlikely(first == NULL)) { + if (unlikely(!first)) { spin_unlock_irq(&lo->lo_lock); return 0; } @@ -308,7 +307,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) rw = first->bi_rw; bio = &lo->lo_bio; while (*bio && (*bio)->bi_rw == rw) { - CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", + CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n", (unsigned long long)(*bio)->bi_iter.bi_sector, (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt); @@ -458,7 +457,7 @@ static int loop_thread(void *data) total_count, times, total_count / times); } - LASSERT(bio != NULL); + LASSERT(bio); LASSERT(count <= atomic_read(&lo->lo_pending)); loop_handle_bio(lo, bio); atomic_sub(count, &lo->lo_pending); @@ -508,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->lo_blocksize = PAGE_CACHE_SIZE; + lo->lo_blocksize = PAGE_SIZE; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; @@ -526,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, lo->lo_queue->queuedata = lo; /* queue parameters */ - CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); + CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8))); blk_queue_logical_block_size(lo->lo_queue, - (unsigned short)PAGE_CACHE_SIZE); + (unsigned short)PAGE_SIZE); blk_queue_max_hw_sectors(lo->lo_queue, - LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); + LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9)); blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); set_capacity(disks[lo->lo_number], size); @@ -560,7 +559,7 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev, if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */ return -EBUSY; - if (filp == NULL) + if (!filp) return -EINVAL; spin_lock_irq(&lo->lo_lock); @@ -625,18 +624,18 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, case LL_IOC_LLOOP_INFO: { struct lu_fid fid; - if (lo->lo_backing_file == NULL) { + if (!lo->lo_backing_file) { err = -ENOENT; break; } - if (inode == NULL) + if (!inode) inode = file_inode(lo->lo_backing_file); if (lo->lo_state == LLOOP_BOUND) fid = ll_i2info(inode)->lli_fid; else fid_zero(&fid); - if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid))) + if (copy_to_user((void __user *)arg, &fid, sizeof(fid))) err = -EFAULT; break; } @@ -676,7 +675,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, if (magic != ll_iocontrol_magic) return LLIOC_CONT; - if (disks == NULL) { + if (!disks) { err = -ENODEV; goto out1; } @@ -708,7 +707,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, dev = MKDEV(lloop_major, lo->lo_number); /* quit if the used pointer is writable */ - if (put_user((long)old_encode_dev(dev), (long *)arg)) { + if (put_user((long)old_encode_dev(dev), (long __user *)arg)) { err = -EFAULT; goto out; } @@ -793,7 +792,7 @@ static int __init lloop_init(void) lloop_major, max_loop); ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist); - if (ll_iocontrol_magic == NULL) + if (!ll_iocontrol_magic) goto out_mem1; loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL); @@ -872,11 +871,12 @@ static void lloop_exit(void) kfree(loop_dev); } -module_init(lloop_init); -module_exit(lloop_exit); - module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "maximum of lloop_device"); MODULE_AUTHOR("OpenSFS, Inc. "); MODULE_DESCRIPTION("Lustre virtual block device"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); + +module_init(lloop_init); +module_exit(lloop_exit); diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c index f134ad9d2..27ab12614 100644 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c @@ -43,7 +43,7 @@ #include "llite_internal.h" #include "vvp_internal.h" -/* /proc/lustre/llite mount point registration */ +/* debugfs llite mount point registration */ static struct file_operations ll_rw_extents_stats_fops; static struct file_operations ll_rw_extents_stats_pp_fops; static struct file_operations ll_rw_offset_stats_fops; @@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj, if (rc) return rc; - pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ + pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ if (pages_number > totalram_pages / 2) { CERROR("can't set file readahead more than %lu MB\n", - totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/ + totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/ return -ERANGE; } @@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_pages_per_file; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -345,10 +345,11 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj, return rc; /* Cap this at the current max readahead window size, the readahead - * algorithm does this anyway so it's pointless to set it larger. */ + * algorithm does this anyway so it's pointless to set it larger. + */ if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", - sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); + sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT)); return -ERANGE; } @@ -365,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) struct super_block *sb = m->private; struct ll_sb_info *sbi = ll_s2sbi(sb); struct cl_client_cache *cache = &sbi->ll_cache; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; int max_cached_mb; int unused_mb; @@ -404,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, return -EFAULT; kernbuf[count] = 0; - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - kernbuf; rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); @@ -414,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, if (pages_number < 0 || pages_number > totalram_pages) { CERROR("%s: can't set max cache more than %lu MB\n", ll_get_fsname(sb, NULL, 0), - totalram_pages >> (20 - PAGE_CACHE_SHIFT)); + totalram_pages >> (20 - PAGE_SHIFT)); return -ERANGE; } @@ -453,7 +454,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, if (diff <= 0) break; - if (sbi->ll_dt_exp == NULL) { /* being initialized */ + if (!sbi->ll_dt_exp) { /* being initialized */ rc = -ENODEV; break; } @@ -461,9 +462,9 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, /* difficult - have to ask OSCs to drop LRU slots. */ tmp = diff << 1; rc = obd_set_info_async(NULL, sbi->ll_dt_exp, - sizeof(KEY_CACHE_LRU_SHRINK), - KEY_CACHE_LRU_SHRINK, - sizeof(tmp), &tmp, NULL); + sizeof(KEY_CACHE_LRU_SHRINK), + KEY_CACHE_LRU_SHRINK, + sizeof(tmp), &tmp, NULL); if (rc < 0) break; } @@ -966,9 +967,9 @@ int ldebugfs_register_mountpoint(struct dentry *parent, name[MAX_STRING_SIZE] = '\0'; - LASSERT(sbi != NULL); - LASSERT(mdc != NULL); - LASSERT(osc != NULL); + LASSERT(sbi); + LASSERT(mdc); + LASSERT(osc); /* Get fsname */ len = strlen(lsi->lsi_lmd->lmd_profile); @@ -999,7 +1000,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, CWARN("Error adding the extent_stats file\n"); rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, - "extents_stats_per_process", + "extents_stats_per_process", 0644, &ll_rw_extents_stats_pp_fops, sbi); if (rc) CWARN("Error adding the extents_stats_per_process file\n"); @@ -1012,7 +1013,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, /* File operations stats */ sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES, LPROCFS_STATS_FLAG_NONE); - if (sbi->ll_stats == NULL) { + if (!sbi->ll_stats) { err = -ENOMEM; goto out; } @@ -1033,13 +1034,13 @@ int ldebugfs_register_mountpoint(struct dentry *parent, llite_opcode_table[id].opname, ptr); } err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats", - sbi->ll_stats); + sbi->ll_stats); if (err) goto out; sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string), LPROCFS_STATS_FLAG_NONE); - if (sbi->ll_ra_stats == NULL) { + if (!sbi->ll_ra_stats) { err = -ENOMEM; goto out; } @@ -1049,7 +1050,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, ra_stat_string[id], "pages"); err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats", - sbi->ll_ra_stats); + sbi->ll_ra_stats); if (err) goto out; @@ -1103,7 +1104,7 @@ void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi) #define pct(a, b) (b ? a * 100 / b : 0) static void ll_display_extents_info(struct ll_rw_extents_info *io_extents, - struct seq_file *seq, int which) + struct seq_file *seq, int which) { unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum; unsigned long start, end, r, w; @@ -1503,5 +1504,5 @@ LPROC_SEQ_FOPS(ll_rw_offset_stats); void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars) { - lvars->obd_vars = lprocfs_llite_obd_vars; + lvars->obd_vars = lprocfs_llite_obd_vars; } diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index da5f443a0..f8f98e4e8 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -118,16 +118,16 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, ll_read_inode2(inode, md); if (S_ISREG(inode->i_mode) && - ll_i2info(inode)->lli_clob == NULL) { + !ll_i2info(inode)->lli_clob) { CDEBUG(D_INODE, - "%s: apply lsm %p to inode "DFID".\n", - ll_get_fsname(sb, NULL, 0), md->lsm, - PFID(ll_inode2fid(inode))); + "%s: apply lsm %p to inode " DFID ".\n", + ll_get_fsname(sb, NULL, 0), md->lsm, + PFID(ll_inode2fid(inode))); rc = cl_file_inode_init(inode, md); } if (rc != 0) { iget_failed(inode); - inode = ERR_PTR(rc); + inode = NULL; } else unlock_new_inode(inode); } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) @@ -180,10 +180,11 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, __u64 bits = lock->l_policy_data.l_inodebits.bits; /* Inode is set to lock->l_resource->lr_lvb_inode - * for mdc - bug 24555 */ - LASSERT(lock->l_ast_data == NULL); + * for mdc - bug 24555 + */ + LASSERT(!lock->l_ast_data); - if (inode == NULL) + if (!inode) break; /* Invalidate all dentries associated with this inode */ @@ -202,7 +203,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } /* For OPEN locks we differentiate between lock modes - * LCK_CR, LCK_CW, LCK_PR - bug 22891 */ + * LCK_CR, LCK_CW, LCK_PR - bug 22891 + */ if (bits & MDS_INODELOCK_OPEN) ll_have_md_lock(inode, &bits, lock->l_req_mode); @@ -260,7 +262,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) && - inode->i_sb->s_root != NULL && + inode->i_sb->s_root && !is_root_inode(inode)) ll_invalidate_aliases(inode); @@ -285,15 +287,11 @@ __u32 ll_i2suppgid(struct inode *i) /* Pack the required supplementary groups into the supplied groups array. * If we don't need to use the groups from the target inode(s) then we * instead pack one or more groups from the user's supplementary group - * array in case it might be useful. Not needed if doing an MDS-side upcall. */ + * array in case it might be useful. Not needed if doing an MDS-side upcall. + */ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) { -#if 0 - int i; -#endif - - LASSERT(i1 != NULL); - LASSERT(suppgids != NULL); + LASSERT(i1); suppgids[0] = ll_i2suppgid(i1); @@ -301,22 +299,6 @@ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) suppgids[1] = ll_i2suppgid(i2); else suppgids[1] = -1; - -#if 0 - for (i = 0; i < current_ngroups; i++) { - if (suppgids[0] == -1) { - if (current_groups[i] != suppgids[1]) - suppgids[0] = current_groups[i]; - continue; - } - if (suppgids[1] == -1) { - if (current_groups[i] != suppgids[0]) - suppgids[1] = current_groups[i]; - continue; - } - break; - } -#endif } /* @@ -409,7 +391,8 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, int rc = 0; /* NB 1 request reference will be taken away by ll_intent_lock() - * when I return */ + * when I return + */ CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it, it->d.lustre.it_disposition); if (!it_disposition(it, DISP_LOOKUP_NEG)) { @@ -420,13 +403,14 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits); /* We used to query real size from OSTs here, but actually - this is not needed. For stat() calls size would be updated - from subsequent do_revalidate()->ll_inode_revalidate_it() in - 2.4 and - vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6 - Everybody else who needs correct file size would call - ll_glimpse_size or some equivalent themselves anyway. - Also see bug 7198. */ + * this is not needed. For stat() calls size would be updated + * from subsequent do_revalidate()->ll_inode_revalidate_it() in + * 2.4 and + * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6 + * Everybody else who needs correct file size would call + * ll_glimpse_size or some equivalent themselves anyway. + * Also see bug 7198. + */ } /* Only hash *de if it is unhashed (new dentry). @@ -443,9 +427,10 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, *de = alias; } else if (!it_disposition(it, DISP_LOOKUP_NEG) && !it_disposition(it, DISP_OPEN_CREATE)) { - /* With DISP_OPEN_CREATE dentry will - instantiated in ll_create_it. */ - LASSERT(d_inode(*de) == NULL); + /* With DISP_OPEN_CREATE dentry will be + * instantiated in ll_create_it. + */ + LASSERT(!d_inode(*de)); d_instantiate(*de, inode); } @@ -498,7 +483,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, if (d_mountpoint(dentry)) CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it)); - if (it == NULL || it->it_op == IT_GETXATTR) + if (!it || it->it_op == IT_GETXATTR) it = &lookup_it; if (it->it_op == IT_GETATTR) { @@ -557,7 +542,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, out: if (req) ptlrpc_req_finished(req); - if (it->it_op == IT_GETATTR && (retval == NULL || retval == dentry)) + if (it->it_op == IT_GETATTR && (!retval || retval == dentry)) ll_statahead_mark(parent, dentry); return retval; } @@ -582,7 +567,7 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry, itp = ⁢ de = ll_lookup_it(parent, dentry, itp, 0); - if (itp != NULL) + if (itp) ll_intent_release(itp); return de; @@ -622,7 +607,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, de = ll_lookup_it(dir, dentry, it, lookup_flags); if (IS_ERR(de)) rc = PTR_ERR(de); - else if (de != NULL) + else if (de) dentry = de; if (!rc) { @@ -631,7 +616,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, rc = ll_create_it(dir, dentry, mode, it); if (rc) { /* We dget in ll_splice_alias. */ - if (de != NULL) + if (de) dput(de); goto out_release; } @@ -655,7 +640,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, /* We dget in ll_splice_alias. finish_open takes * care of dget for fd open. */ - if (de != NULL) + if (de) dput(de); } } else { @@ -693,7 +678,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it) /* We asked for a lock on the directory, but were granted a * lock on the inode. Since we finally have an inode pointer, - * stuff it in the lock. */ + * stuff it in the lock. + */ CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n", inode, inode->i_ino, inode->i_generation); ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); @@ -767,7 +753,7 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry, int tgt_len = 0; int err; - if (unlikely(tgt != NULL)) + if (unlikely(tgt)) tgt_len = strlen(tgt) + 1; op_data = ll_prep_md_op_data(NULL, dir, NULL, @@ -888,10 +874,11 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) /* The MDS sent back the EA because we unlinked the last reference * to this file. Use this EA to unlink the objects on the OST. * It's opaque so we don't swab here; we leave it to obd_unpackmd() to - * check it is complete and sensible. */ + * check it is complete and sensible. + */ eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD, body->eadatasize); - LASSERT(eadata != NULL); + LASSERT(eadata); rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize); if (rc < 0) { @@ -900,8 +887,8 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) } LASSERT(rc >= sizeof(*lsm)); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) { + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) { rc = -ENOMEM; goto out_free_memmd; } @@ -917,7 +904,7 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) &RMF_LOGCOOKIES, sizeof(struct llog_cookie) * lsm->lsm_stripe_count); - if (oti.oti_logcookies == NULL) { + if (!oti.oti_logcookies) { oa->o_valid &= ~OBD_MD_FLCOOKIE; body->valid &= ~OBD_MD_FLCOOKIE; } @@ -938,7 +925,8 @@ out: /* ll_unlink() doesn't update the inode with the new link count. * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there * is any lock existing. They will recycle dentries and inodes based upon locks - * too. b=20433 */ + * too. b=20433 + */ static int ll_unlink(struct inode *dir, struct dentry *dentry) { struct ptlrpc_request *request = NULL; @@ -1028,7 +1016,7 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry, dir, 3000, oldname); err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO, - 0, LUSTRE_OPC_SYMLINK); + 0, LUSTRE_OPC_SYMLINK); if (!err) ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1); diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c index fe4a72268..e9d25317c 100644 --- a/drivers/staging/lustre/lustre/llite/remote_perm.c +++ b/drivers/staging/lustre/lustre/llite/remote_perm.c @@ -61,7 +61,7 @@ static inline struct ll_remote_perm *alloc_ll_remote_perm(void) { struct ll_remote_perm *lrp; - lrp = kmem_cache_alloc(ll_remote_perm_cachep, GFP_KERNEL | __GFP_ZERO); + lrp = kmem_cache_zalloc(ll_remote_perm_cachep, GFP_KERNEL); if (lrp) INIT_HLIST_NODE(&lrp->lrp_list); return lrp; @@ -82,7 +82,7 @@ static struct hlist_head *alloc_rmtperm_hash(void) struct hlist_head *hash; int i; - hash = kmem_cache_alloc(ll_rmtperm_hash_cachep, GFP_NOFS | __GFP_ZERO); + hash = kmem_cache_zalloc(ll_rmtperm_hash_cachep, GFP_NOFS); if (!hash) return NULL; diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c index f35547496..edab6c5b7 100644 --- a/drivers/staging/lustre/lustre/llite/rw.c +++ b/drivers/staging/lustre/lustre/llite/rw.c @@ -70,9 +70,9 @@ static void ll_cl_fini(struct ll_cl_context *lcc) struct cl_page *page = lcc->lcc_page; LASSERT(lcc->lcc_cookie == current); - LASSERT(env != NULL); + LASSERT(env); - if (page != NULL) { + if (page) { lu_ref_del(&page->cp_reference, "cl_io", io); cl_page_put(env, page); } @@ -97,7 +97,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file, int result = 0; clob = ll_i2info(vmpage->mapping->host)->lli_clob; - LASSERT(clob != NULL); + LASSERT(clob); env = cl_env_get(&refcheck); if (IS_ERR(env)) @@ -111,7 +111,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file, cio = ccc_env_io(env); io = cio->cui_cl.cis_io; - if (io == NULL && create) { + if (!io && create) { struct inode *inode = vmpage->mapping->host; loff_t pos; @@ -120,7 +120,8 @@ static struct ll_cl_context *ll_cl_init(struct file *file, /* this is too bad. Someone is trying to write the * page w/o holding inode mutex. This means we can - * add dirty pages into cache during truncate */ + * add dirty pages into cache during truncate + */ CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n", current->comm); dump_stack(); @@ -145,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file, */ io->ci_lockreq = CILR_NEVER; - pos = vmpage->index << PAGE_CACHE_SHIFT; + pos = vmpage->index << PAGE_SHIFT; /* Create a temp IO to serve write. */ - result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); + result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE); if (result == 0) { cio->cui_fd = LUSTRE_FPRIVATE(file); cio->cui_iter = NULL; @@ -163,12 +164,11 @@ static struct ll_cl_context *ll_cl_init(struct file *file, } lcc->lcc_io = io; - if (io == NULL) + if (!io) result = -EIO; if (result == 0) { struct cl_page *page; - LASSERT(io != NULL); LASSERT(io->ci_state == CIS_IO_GOING); LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file)); page = cl_page_find(env, clob, vmpage->index, vmpage, @@ -240,7 +240,8 @@ int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from, ll_cl_fini(lcc); } /* returning 0 in prepare assumes commit must be called - * afterwards */ + * afterwards + */ } else { result = PTR_ERR(lcc); } @@ -296,8 +297,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); * to get an ra budget that is larger than the remaining readahead pages * and reach here at exactly the same time. They will compute /a ret to * consume the remaining pages, but will fail at atomic_add_return() and - * get a zero ra window, although there is still ra space remaining. - Jay */ - + * get a zero ra window, although there is still ra space remaining. - Jay + */ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria, unsigned long pages) @@ -307,7 +308,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, /* If read-ahead pages left are less than 1M, do not do read-ahead, * otherwise it will form small read RPC(< 1M), which hurt server - * performance a lot. */ + * performance a lot. + */ ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages); if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) { ret = 0; @@ -324,7 +326,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, * branch is more expensive than subtracting zero from the result. * * Strided read is left unaligned to avoid small fragments beyond - * the RPC boundary from needing an extra read RPC. */ + * the RPC boundary from needing an extra read RPC. + */ if (ria->ria_pages == 0) { long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES; @@ -364,7 +367,7 @@ void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which) #define RAS_CDEBUG(ras) \ CDEBUG(D_READA, \ "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \ - "csr %lu sf %lu sp %lu sl %lu \n", \ + "csr %lu sf %lu sp %lu sl %lu\n", \ ras->ras_last_readpage, ras->ras_consecutive_requests, \ ras->ras_consecutive_pages, ras->ras_window_start, \ ras->ras_window_len, ras->ras_next_readahead, \ @@ -378,9 +381,9 @@ static int index_in_window(unsigned long index, unsigned long point, unsigned long start = point - before, end = point + after; if (start > point) - start = 0; + start = 0; if (end < point) - end = ~0; + end = ~0; return start <= index && index <= end; } @@ -473,7 +476,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, const char *msg = NULL; vmpage = grab_cache_page_nowait(mapping, index); - if (vmpage != NULL) { + if (vmpage) { /* Check if vmpage was truncated or reclaimed */ if (vmpage->mapping == mapping) { page = cl_page_find(env, clob, vmpage->index, @@ -495,12 +498,12 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, } if (rc != 1) unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); } else { which = RA_STAT_FAILED_GRAB_PAGE; msg = "g_c_p_n failed"; } - if (msg != NULL) { + if (msg) { ll_ra_stats_inc(mapping, which); CDEBUG(D_READA, "%s\n", msg); } @@ -515,14 +518,16 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, /* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't * know what the actual RPC size is. If this needs to change, it makes more * sense to tune the i_blkbits value for the file based on the OSTs it is - * striped over, rather than having a constant value for all files here. */ + * striped over, rather than having a constant value for all files here. + */ -/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). +/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)). * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled * by default, this should be adjusted corresponding with max_read_ahead_mb * and max_read_ahead_per_file_mb otherwise the readahead budget can be used - * up quickly which will affect read performance significantly. See LU-2816 */ -#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) + * up quickly which will affect read performance significantly. See LU-2816 + */ +#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT) static inline int stride_io_mode(struct ll_readahead_state *ras) { @@ -570,7 +575,7 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs, if (end_left > st_pgs) end_left = st_pgs; - CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n", + CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n", start, end, start_left, end_left); if (start == end) @@ -600,7 +605,8 @@ static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) /* If ria_length == ria_pages, it means non-stride I/O mode, * idx should always inside read-ahead window in this case * For stride I/O mode, just check whether the idx is inside - * the ria_pages. */ + * the ria_pages. + */ return ria->ria_length == 0 || ria->ria_length == ria->ria_pages || (idx >= ria->ria_stoff && (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages); @@ -616,12 +622,12 @@ static int ll_read_ahead_pages(const struct lu_env *env, int rc, count = 0, stride_ria; unsigned long page_idx; - LASSERT(ria != NULL); + LASSERT(ria); RIA_DEBUG(ria); stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0; - for (page_idx = ria->ria_start; page_idx <= ria->ria_end && - *reserved_pages > 0; page_idx++) { + for (page_idx = ria->ria_start; + page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) { if (ras_inside_ra_window(page_idx, ria)) { /* If the page is inside the read-ahead window*/ rc = ll_read_ahead_page(env, io, queue, @@ -634,11 +640,13 @@ static int ll_read_ahead_pages(const struct lu_env *env, } else if (stride_ria) { /* If it is not in the read-ahead window, and it is * read-ahead mode, then check whether it should skip - * the stride gap */ + * the stride gap + */ pgoff_t offset; /* FIXME: This assertion only is valid when it is for * forward read-ahead, it will be fixed when backward - * read-ahead is implemented */ + * read-ahead is implemented + */ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx, ria->ria_start, ria->ria_end, ria->ria_stoff, @@ -647,7 +655,7 @@ static int ll_read_ahead_pages(const struct lu_env *env, offset = offset % (ria->ria_length); if (offset > ria->ria_pages) { page_idx += ria->ria_length - offset; - CDEBUG(D_READA, "i %lu skip %lu \n", page_idx, + CDEBUG(D_READA, "i %lu skip %lu\n", page_idx, ria->ria_length - offset); continue; } @@ -699,7 +707,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, bead = NULL; /* Enlarge the RA window to encompass the full read */ - if (bead != NULL && ras->ras_window_start + ras->ras_window_len < + if (bead && ras->ras_window_start + ras->ras_window_len < bead->lrr_start + bead->lrr_count) { ras->ras_window_len = bead->lrr_start + bead->lrr_count - ras->ras_window_start; @@ -721,7 +729,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, */ /* Note: we only trim the RPC, instead of extending the RPC * to the boundary, so to avoid reading too much pages during - * random reading. */ + * random reading. + */ rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1)); if (rpc_boundary > 0) rpc_boundary--; @@ -730,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, end = rpc_boundary; /* Truncate RA window to end of file */ - end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); + end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT)); ras->ras_next_readahead = max(end, end + 1); RAS_CDEBUG(ras); @@ -764,19 +773,19 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, mapping, &ra_end); - LASSERTF(reserved >= 0, "reserved %lu\n", reserved); if (reserved != 0) ll_ra_count_put(ll_i2sbi(inode), reserved); - if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) + if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT)) ll_ra_stats_inc(mapping, RA_STAT_EOF); /* if we didn't get to the end of the region we reserved from * the ras we need to go back and update the ras so that the * next read-ahead tries from where we left off. we only do so * if the region we failed to issue read-ahead on is still ahead - * of the app and behind the next index to start read-ahead from */ - CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n", + * of the app and behind the next index to start read-ahead from + */ + CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n", ra_end, end, ria->ria_end); if (ra_end != end + 1) { @@ -860,7 +869,7 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras, unsigned long stride_gap = index - ras->ras_last_readpage - 1; if (!stride_io_mode(ras) && (stride_gap != 0 || - ras->ras_consecutive_stride_requests == 0)) { + ras->ras_consecutive_stride_requests == 0)) { ras->ras_stride_pages = ras->ras_consecutive_pages; ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages; } @@ -881,7 +890,8 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras, } /* Stride Read-ahead window will be increased inc_len according to - * stride I/O pattern */ + * stride I/O pattern + */ static void ras_stride_increase_window(struct ll_readahead_state *ras, struct ll_ra_info *ra, unsigned long inc_len) @@ -952,7 +962,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * or reads to some other part of the file. Secondly if we get a * read-ahead miss that we think we've previously issued. This can * be a symptom of there being so many read-ahead pages that the VM is - * reclaiming it before we get to it. */ + * reclaiming it before we get to it. + */ if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) { zero = 1; ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE); @@ -969,12 +980,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * file up to ra_max_pages_per_file. This is simply a best effort * and only occurs once per open file. Normal RA behavior is reverted * to for subsequent IO. The mmap case does not increment - * ras_requests and thus can never trigger this behavior. */ + * ras_requests and thus can never trigger this behavior. + */ if (ras->ras_requests == 2 && !ras->ras_request_index) { __u64 kms_pages; - kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT; CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); @@ -1015,14 +1027,16 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, stride_io_mode(ras)) { /*If stride-RA hit cache miss, the stride dector *will not be reset to avoid the overhead of - *redetecting read-ahead mode */ + *redetecting read-ahead mode + */ if (index != ras->ras_last_readpage + 1) ras->ras_consecutive_pages = 0; ras_reset(inode, ras, index); RAS_CDEBUG(ras); } else { /* Reset both stride window and normal RA - * window */ + * window + */ ras_reset(inode, ras, index); ras->ras_consecutive_pages++; ras_stride_reset(ras); @@ -1031,7 +1045,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, } else if (stride_io_mode(ras)) { /* If this is contiguous read but in stride I/O mode * currently, check whether stride step still is valid, - * if invalid, it will reset the stride ra window*/ + * if invalid, it will reset the stride ra window + */ if (!index_in_stride_window(ras, index)) { /* Shrink stride read-ahead window to be zero */ ras_stride_reset(ras); @@ -1047,7 +1062,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (stride_io_mode(ras)) /* Since stride readahead is sensitive to the offset * of read-ahead, so we use original offset here, - * instead of ras_window_start, which is RPC aligned */ + * instead of ras_window_start, which is RPC aligned + */ ras->ras_next_readahead = max(index, ras->ras_next_readahead); else ras->ras_next_readahead = max(ras->ras_window_start, @@ -1055,7 +1071,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, RAS_CDEBUG(ras); /* Trigger RA in the mmap case where ras_consecutive_requests - * is not incremented and thus can't be used to trigger RA */ + * is not incremented and thus can't be used to trigger RA + */ if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { ras->ras_window_len = RAS_INCREASE_STEP(inode); goto out_unlock; @@ -1101,7 +1118,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); - LASSERT(ll_i2dtexp(inode) != NULL); + LASSERT(ll_i2dtexp(inode)); env = cl_env_nested_get(&nest); if (IS_ERR(env)) { @@ -1110,7 +1127,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) } clob = ll_i2info(inode)->lli_clob; - LASSERT(clob != NULL); + LASSERT(clob); io = ccc_env_thread_io(env); io->ci_obj = clob; @@ -1153,14 +1170,16 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) /* Flush page failed because the extent is being written out. * Wait for the write of extent to be finished to avoid * breaking kernel which assumes ->writepage should mark - * PageWriteback or clean the page. */ + * PageWriteback or clean the page. + */ result = cl_sync_file_range(inode, offset, - offset + PAGE_CACHE_SIZE - 1, + offset + PAGE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. * decreasing this page because the caller will count - * it. */ + * it. + */ wbc->nr_to_write -= result - 1; result = 0; } @@ -1192,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) int ignore_layout = 0; if (wbc->range_cyclic) { - start = mapping->writeback_index << PAGE_CACHE_SHIFT; + start = mapping->writeback_index << PAGE_SHIFT; end = OBD_OBJECT_EOF; } else { start = wbc->range_start; @@ -1210,7 +1229,8 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (sbi->ll_umounting) /* if the mountpoint is being umounted, all pages have to be * evicted to avoid hitting LBUG when truncate_inode_pages() - * is called later on. */ + * is called later on. + */ ignore_layout = 1; result = cl_sync_file_range(inode, start, end, mode, ignore_layout); if (result > 0) { @@ -1221,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { if (end == OBD_OBJECT_EOF) end = i_size_read(inode); - mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; + mapping->writeback_index = (end >> PAGE_SHIFT) + 1; } return result; } diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 711fda93a..69aa15e8e 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c @@ -87,14 +87,14 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, * below because they are run with page locked and all our io is * happening with locked page too */ - if (offset == 0 && length == PAGE_CACHE_SIZE) { + if (offset == 0 && length == PAGE_SIZE) { env = cl_env_get(&refcheck); if (!IS_ERR(env)) { inode = vmpage->mapping->host; obj = ll_i2info(inode)->lli_clob; - if (obj != NULL) { + if (obj) { page = cl_vmpage_page(vmpage, obj); - if (page != NULL) { + if (page) { lu_ref_add(&page->cp_reference, "delete", vmpage); cl_page_delete(env, page); @@ -109,12 +109,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, } } -#ifdef HAVE_RELEASEPAGE_WITH_INT -#define RELEASEPAGE_ARG_TYPE int -#else -#define RELEASEPAGE_ARG_TYPE gfp_t -#endif -static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) +static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) { struct cl_env_nest nest; struct lu_env *env; @@ -128,11 +123,11 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) return 0; mapping = vmpage->mapping; - if (mapping == NULL) + if (!mapping) return 1; obj = ll_i2info(mapping->host)->lli_clob; - if (obj == NULL) + if (!obj) return 1; /* 1 for page allocator, 1 for cl_page and 1 for page cache */ @@ -145,12 +140,13 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) /* If we can't allocate an env we won't call cl_page_put() * later on which further means it's impossible to drop * page refcount by cl_page, so ask kernel to not free - * this page. */ + * this page. + */ return 0; page = cl_vmpage_page(vmpage, obj); - result = page == NULL; - if (page != NULL) { + result = !page; + if (page) { if (!cl_page_in_use(page)) { result = 1; cl_page_delete(env, page); @@ -197,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, return -EFBIG; } - *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - *max_pages -= user_addr >> PAGE_CACHE_SHIFT; + *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; + *max_pages -= user_addr >> PAGE_SHIFT; *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); if (*pages) { @@ -212,7 +208,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, } /* ll_free_user_pages - tear down page struct array - * @pages: array of page struct pointers underlying target buffer */ + * @pages: array of page struct pointers underlying target buffer + */ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) { int i; @@ -220,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) for (i = 0; i < npages; i++) { if (do_dirty) set_page_dirty_lock(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } kvfree(pages); } @@ -246,7 +243,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, cl_2queue_init(queue); for (i = 0; i < page_count; i++) { if (pv->ldp_offsets) - file_offset = pv->ldp_offsets[i]; + file_offset = pv->ldp_offsets[i]; LASSERT(!(file_offset & (page_size - 1))); clp = cl_page_find(env, obj, cl_index(obj, file_offset), @@ -266,7 +263,8 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, do_io = true; /* check the page type: if the page is a host page, then do - * write directly */ + * write directly + */ if (clp->cp_type == CPT_CACHEABLE) { struct page *vmpage = cl_page_vmpage(env, clp); struct page *src_page; @@ -284,14 +282,16 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, kunmap_atomic(src); /* make sure page will be added to the transfer by - * cl_io_submit()->...->vvp_page_prep_write(). */ + * cl_io_submit()->...->vvp_page_prep_write(). + */ if (rw == WRITE) set_page_dirty(vmpage); if (rw == READ) { /* do not issue the page for read, since it * may reread a ra page which has NOT uptodate - * bit set. */ + * bit set. + */ cl_page_disown(env, io, clp); do_io = false; } @@ -339,29 +339,25 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, size_t size, loff_t file_offset, struct page **pages, int page_count) { - struct ll_dio_pages pvec = { .ldp_pages = pages, - .ldp_nr = page_count, - .ldp_size = size, - .ldp_offsets = NULL, - .ldp_start_offset = file_offset - }; - - return ll_direct_rw_pages(env, io, rw, inode, &pvec); + struct ll_dio_pages pvec = { + .ldp_pages = pages, + .ldp_nr = page_count, + .ldp_size = size, + .ldp_offsets = NULL, + .ldp_start_offset = file_offset + }; + + return ll_direct_rw_pages(env, io, rw, inode, &pvec); } -#ifdef KMALLOC_MAX_SIZE -#define MAX_MALLOC KMALLOC_MAX_SIZE -#else -#define MAX_MALLOC (128 * 1024) -#endif - /* This is the maximum size of a single O_DIRECT request, based on the * kmalloc limit. We need to fit all of the brw_page structs, each one * representing PAGE_SIZE worth of user data, into a single buffer, and * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is - * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ -#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ - ~(DT_MAX_BRW_SIZE - 1)) + * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. + */ +#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ + PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, loff_t file_offset) { @@ -386,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> PAGE_CACHE_SHIFT, - MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); + file_offset, file_offset, count >> PAGE_SHIFT, + MAX_DIO_SIZE >> PAGE_SHIFT); /* Check that all user buffers are aligned as well */ if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) @@ -396,7 +392,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, env = cl_env_get(&refcheck); LASSERT(!IS_ERR(env)); io = ccc_env_io(env)->cui_cl.cis_io; - LASSERT(io != NULL); + LASSERT(io); /* 0. Need locking between buffered and direct access. and race with * size changing by concurrent truncates and writes. @@ -433,10 +429,11 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, * for the request, shrink it to a smaller * PAGE_SIZE multiple and try again. * We should always be able to kmalloc for a - * page worth of page pointers = 4MB on i386. */ + * page worth of page pointers = 4MB on i386. + */ if (result == -ENOMEM && - size > (PAGE_CACHE_SIZE / sizeof(*pages)) * - PAGE_CACHE_SIZE) { + size > (PAGE_SIZE / sizeof(*pages)) * + PAGE_SIZE) { size = ((((size / 2) - 1) | ~CFS_PAGE_MASK) + 1) & CFS_PAGE_MASK; @@ -461,7 +458,7 @@ out: struct lov_stripe_md *lsm; lsm = ccc_inode_lsm_get(inode); - LASSERT(lsm != NULL); + LASSERT(lsm); lov_stripe_lock(lsm); obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0); lov_stripe_unlock(lsm); @@ -474,13 +471,13 @@ out: } static int ll_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int rc; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -491,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, rc = ll_prepare_write(file, page, from, from + len); if (rc) { unlock_page(page); - page_cache_release(page); + put_page(page); } return rc; } @@ -500,20 +497,20 @@ static int ll_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int rc; rc = ll_commit_write(file, page, from, from + copied); unlock_page(page); - page_cache_release(page); + put_page(page); return rc ?: copied; } #ifdef CONFIG_MIGRATION static int ll_migratepage(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode + struct page *newpage, struct page *page, + enum migrate_mode mode ) { /* Always fail page migration until we have a proper implementation */ diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c index 88ffd8e3a..99ffd1589 100644 --- a/drivers/staging/lustre/lustre/llite/statahead.c +++ b/drivers/staging/lustre/lustre/llite/statahead.c @@ -49,13 +49,13 @@ #define SA_OMITTED_ENTRY_MAX 8ULL -typedef enum { +enum se_stat { /** negative values are for error cases */ SA_ENTRY_INIT = 0, /** init entry */ SA_ENTRY_SUCC = 1, /** stat succeed */ SA_ENTRY_INVA = 2, /** invalid entry */ SA_ENTRY_DEST = 3, /** entry to be destroyed */ -} se_stat_t; +}; struct ll_sa_entry { /* link into sai->sai_entries */ @@ -71,7 +71,7 @@ struct ll_sa_entry { /* low layer ldlm lock handle */ __u64 se_handle; /* entry status */ - se_stat_t se_stat; + enum se_stat se_stat; /* entry size, contains name */ int se_size; /* pointer to async getattr enqueue info */ @@ -130,7 +130,7 @@ ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) static inline int agl_should_run(struct ll_statahead_info *sai, struct inode *inode) { - return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid); + return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid); } static inline int sa_sent_full(struct ll_statahead_info *sai) @@ -284,7 +284,7 @@ ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index) } static void ll_sa_entry_cleanup(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) + struct ll_sa_entry *entry) { struct md_enqueue_info *minfo = entry->se_minfo; struct ptlrpc_request *req = entry->se_req; @@ -303,7 +303,7 @@ static void ll_sa_entry_cleanup(struct ll_statahead_info *sai, } static void ll_sa_entry_put(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) + struct ll_sa_entry *entry) { if (atomic_dec_and_test(&entry->se_refcount)) { CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n", @@ -366,7 +366,7 @@ ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) */ static void do_sa_entry_to_stated(struct ll_statahead_info *sai, - struct ll_sa_entry *entry, se_stat_t stat) + struct ll_sa_entry *entry, enum se_stat stat) { struct ll_sa_entry *se; struct list_head *pos = &sai->sai_entries_stated; @@ -392,7 +392,7 @@ do_sa_entry_to_stated(struct ll_statahead_info *sai, */ static int ll_sa_entry_to_stated(struct ll_statahead_info *sai, - struct ll_sa_entry *entry, se_stat_t stat) + struct ll_sa_entry *entry, enum se_stat stat) { struct ll_inode_info *lli = ll_i2info(sai->sai_inode); int ret = 1; @@ -494,12 +494,13 @@ static void ll_sai_put(struct ll_statahead_info *sai) if (unlikely(atomic_read(&sai->sai_refcount) > 0)) { /* It is race case, the interpret callback just hold - * a reference count */ + * a reference count + */ spin_unlock(&lli->lli_sa_lock); return; } - LASSERT(lli->lli_opendir_key == NULL); + LASSERT(!lli->lli_opendir_key); LASSERT(thread_is_stopped(&sai->sai_thread)); LASSERT(thread_is_stopped(&sai->sai_agl_thread)); @@ -513,8 +514,8 @@ static void ll_sai_put(struct ll_statahead_info *sai) PFID(&lli->lli_fid), sai->sai_sent, sai->sai_replied); - list_for_each_entry_safe(entry, next, - &sai->sai_entries, se_link) + list_for_each_entry_safe(entry, next, &sai->sai_entries, + se_link) do_sa_entry_fini(sai, entry); LASSERT(list_empty(&sai->sai_entries)); @@ -618,20 +619,21 @@ static void ll_post_statahead(struct ll_statahead_info *sai) it = &minfo->mi_it; req = entry->se_req; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } child = entry->se_inode; - if (child == NULL) { + if (!child) { /* * lookup. */ LASSERT(fid_is_zero(&minfo->mi_data.op_fid2)); /* XXX: No fid in reply, this is probably cross-ref case. - * SA can't handle it yet. */ + * SA can't handle it yet. + */ if (body->valid & OBD_MD_MDS) { rc = -EAGAIN; goto out; @@ -672,7 +674,8 @@ out: /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock * reference count by calling "ll_intent_drop_lock()" in spite of the * above operations failed or not. Do not worry about calling - * "ll_intent_drop_lock()" more than once. */ + * "ll_intent_drop_lock()" more than once. + */ rc = ll_sa_entry_to_stated(sai, entry, rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC); if (rc == 0 && entry->se_index == sai->sai_index_wait) @@ -698,14 +701,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, /* release ibits lock ASAP to avoid deadlock when statahead * thread enqueues lock on parent in readdir and another * process enqueues lock on child with parent lock held, eg. - * unlink. */ + * unlink. + */ handle = it->d.lustre.it_lock_handle; ll_intent_drop_lock(it); } spin_lock(&lli->lli_sa_lock); /* stale entry */ - if (unlikely(lli->lli_sai == NULL || + if (unlikely(!lli->lli_sai || lli->lli_sai->sai_generation != minfo->mi_generation)) { spin_unlock(&lli->lli_sa_lock); rc = -ESTALE; @@ -720,7 +724,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, } entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata); - if (entry == NULL) { + if (!entry) { sai->sai_replied++; spin_unlock(&lli->lli_sa_lock); rc = -EIDRM; @@ -736,11 +740,12 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, /* Release the async ibits lock ASAP to avoid deadlock * when statahead thread tries to enqueue lock on parent * for readpage and other tries to enqueue lock on child - * with parent's lock held, for example: unlink. */ + * with parent's lock held, for example: unlink. + */ entry->se_handle = handle; wakeup = list_empty(&sai->sai_entries_received); list_add_tail(&entry->se_list, - &sai->sai_entries_received); + &sai->sai_entries_received); } sai->sai_replied++; spin_unlock(&lli->lli_sa_lock); @@ -756,7 +761,7 @@ out: iput(dir); kfree(minfo); } - if (sai != NULL) + if (sai) ll_sai_put(sai); return rc; } @@ -853,7 +858,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry, struct ldlm_enqueue_info *einfo; int rc; - if (unlikely(inode == NULL)) + if (unlikely(!inode)) return 1; if (d_mountpoint(dentry)) @@ -908,10 +913,9 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name, rc = do_sa_revalidate(dir, entry, dentry); if (rc == 1 && agl_should_run(sai, d_inode(dentry))) ll_agl_add(sai, d_inode(dentry), entry->se_index); - } - if (dentry != NULL) dput(dentry); + } if (rc) { rc1 = ll_sa_entry_to_stated(sai, entry, @@ -948,7 +952,8 @@ static int ll_agl_thread(void *arg) if (thread_is_init(thread)) /* If someone else has changed the thread state * (e.g. already changed to SVC_STOPPING), we can't just - * blindly overwrite that setting. */ + * blindly overwrite that setting. + */ thread_set_flags(thread, SVC_RUNNING); spin_unlock(&plli->lli_agl_lock); wake_up(&thread->t_ctl_waitq); @@ -964,7 +969,8 @@ static int ll_agl_thread(void *arg) spin_lock(&plli->lli_agl_lock); /* The statahead thread maybe help to process AGL entries, - * so check whether list empty again. */ + * so check whether list empty again. + */ if (!list_empty(&sai->sai_entries_agl)) { clli = list_entry(sai->sai_entries_agl.next, struct ll_inode_info, lli_agl_list); @@ -1007,8 +1013,8 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai) sai, parent); plli = ll_i2info(d_inode(parent)); - task = kthread_run(ll_agl_thread, parent, - "ll_agl_%u", plli->lli_opendir_pid); + task = kthread_run(ll_agl_thread, parent, "ll_agl_%u", + plli->lli_opendir_pid); if (IS_ERR(task)) { CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task)); thread_set_flags(thread, SVC_STOPPED); @@ -1049,7 +1055,8 @@ static int ll_statahead_thread(void *arg) if (thread_is_init(thread)) /* If someone else has changed the thread state * (e.g. already changed to SVC_STOPPING), we can't just - * blindly overwrite that setting. */ + * blindly overwrite that setting. + */ thread_set_flags(thread, SVC_RUNNING); spin_unlock(&plli->lli_sa_lock); wake_up(&thread->t_ctl_waitq); @@ -1070,7 +1077,7 @@ static int ll_statahead_thread(void *arg) } dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; + for (ent = lu_dirent_start(dp); ent; ent = lu_dirent_next(ent)) { __u64 hash; int namelen; @@ -1137,7 +1144,8 @@ interpret_it: /* If no window for metadata statahead, but there are * some AGL entries to be triggered, then try to help - * to process the AGL entries. */ + * to process the AGL entries. + */ if (sa_sent_full(sai)) { spin_lock(&plli->lli_agl_lock); while (!list_empty(&sai->sai_entries_agl)) { @@ -1274,7 +1282,7 @@ void ll_stop_statahead(struct inode *dir, void *key) { struct ll_inode_info *lli = ll_i2info(dir); - if (unlikely(key == NULL)) + if (unlikely(!key)) return; spin_lock(&lli->lli_sa_lock); @@ -1357,7 +1365,7 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) } dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; + for (ent = lu_dirent_start(dp); ent; ent = lu_dirent_next(ent)) { __u64 hash; int namelen; @@ -1365,7 +1373,8 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) hash = le64_to_cpu(ent->lde_hash); /* The ll_get_dir_page() can return any page containing - * the given hash which may be not the start hash. */ + * the given hash which may be not the start hash. + */ if (unlikely(hash < pos)) continue; @@ -1448,7 +1457,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry) struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); int hit; - if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) + if (entry && entry->se_stat == SA_ENTRY_SUCC) hit = 1; else hit = 0; @@ -1498,6 +1507,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, struct ll_sa_entry *entry; struct ptlrpc_thread *thread; struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc = 0; struct ll_inode_info *plli; @@ -1540,7 +1550,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name); - if (entry == NULL || only_unplug) { + if (!entry || only_unplug) { ll_sai_unplug(sai, entry); return entry ? 1 : -EAGAIN; } @@ -1559,8 +1569,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } } - if (entry->se_stat == SA_ENTRY_SUCC && - entry->se_inode != NULL) { + if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) { struct inode *inode = entry->se_inode; struct lookup_intent it = { .it_op = IT_GETATTR, .d.lustre.it_lock_handle = @@ -1570,11 +1579,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode), &bits); if (rc == 1) { - if (d_inode(*dentryp) == NULL) { + if (!d_inode(*dentryp)) { struct dentry *alias; alias = ll_splice_alias(inode, - *dentryp); + *dentryp); if (IS_ERR(alias)) { ll_sai_unplug(sai, entry); return PTR_ERR(alias); @@ -1583,7 +1592,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } else if (d_inode(*dentryp) != inode) { /* revalidate, but inode is recreated */ CDEBUG(D_READA, - "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n", + "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n", *dentryp, d_inode(*dentryp)->i_ino, d_inode(*dentryp)->i_generation, @@ -1616,14 +1625,14 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } sai = ll_sai_alloc(); - if (sai == NULL) { + if (!sai) { rc = -ENOMEM; goto out; } sai->sai_ls_all = (rc == LS_FIRST_DOT_DE); sai->sai_inode = igrab(dir); - if (unlikely(sai->sai_inode == NULL)) { + if (unlikely(!sai->sai_inode)) { CWARN("Do not start stat ahead on dying inode "DFID"\n", PFID(&lli->lli_fid)); rc = -ESTALE; @@ -1651,25 +1660,28 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, * but as soon as we expose the sai by attaching it to the lli that * default reference can be dropped by another thread calling * ll_stop_statahead. We need to take a local reference to protect - * the sai buffer while we intend to access it. */ + * the sai buffer while we intend to access it. + */ ll_sai_get(sai); lli->lli_sai = sai; plli = ll_i2info(d_inode(parent)); - rc = PTR_ERR(kthread_run(ll_statahead_thread, parent, - "ll_sa_%u", plli->lli_opendir_pid)); + task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u", + plli->lli_opendir_pid); thread = &sai->sai_thread; - if (IS_ERR_VALUE(rc)) { + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("can't start ll_sa thread, rc: %d\n", rc); dput(parent); lli->lli_opendir_key = NULL; thread_set_flags(thread, SVC_STOPPED); thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED); /* Drop both our own local reference and the default - * reference from allocation time. */ + * reference from allocation time. + */ ll_sai_put(sai); ll_sai_put(sai); - LASSERT(lli->lli_sai == NULL); + LASSERT(!lli->lli_sai); return -EAGAIN; } diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c index 86c371ef7..61856d37a 100644 --- a/drivers/staging/lustre/lustre/llite/super25.c +++ b/drivers/staging/lustre/lustre/llite/super25.c @@ -53,8 +53,8 @@ static struct inode *ll_alloc_inode(struct super_block *sb) struct ll_inode_info *lli; ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1); - lli = kmem_cache_alloc(ll_inode_cachep, GFP_NOFS | __GFP_ZERO); - if (lli == NULL) + lli = kmem_cache_zalloc(ll_inode_cachep, GFP_NOFS); + if (!lli) return NULL; inode_init_once(&lli->lli_vfs_inode); @@ -89,7 +89,7 @@ MODULE_ALIAS_FS("lustre"); void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg)); -static int __init init_lustre_lite(void) +static int __init lustre_init(void) { lnet_process_id_t lnet_id; struct timespec64 ts; @@ -99,7 +99,8 @@ static int __init init_lustre_lite(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre client module (%p).\n", &lustre_super_operations); @@ -108,26 +109,26 @@ static int __init init_lustre_lite(void) sizeof(struct ll_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, NULL); - if (ll_inode_cachep == NULL) + if (!ll_inode_cachep) goto out_cache; ll_file_data_slab = kmem_cache_create("ll_file_data", - sizeof(struct ll_file_data), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (ll_file_data_slab == NULL) + sizeof(struct ll_file_data), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!ll_file_data_slab) goto out_cache; ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache", sizeof(struct ll_remote_perm), 0, 0, NULL); - if (ll_remote_perm_cachep == NULL) + if (!ll_remote_perm_cachep) goto out_cache; ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache", REMOTE_PERM_HASHSIZE * sizeof(struct list_head), 0, 0, NULL); - if (ll_rmtperm_hash_cachep == NULL) + if (!ll_rmtperm_hash_cachep) goto out_cache; llite_root = debugfs_create_dir("llite", debugfs_lustre_root); @@ -146,7 +147,8 @@ static int __init init_lustre_lite(void) cfs_get_random_bytes(seed, sizeof(seed)); /* Nodes with small feet have little entropy. The NID for this - * node gives the most entropy in the low bits */ + * node gives the most entropy in the low bits + */ for (i = 0;; i++) { if (LNetGetId(i, &lnet_id) == -ENOENT) break; @@ -186,7 +188,7 @@ out_cache: return rc; } -static void __exit exit_lustre_lite(void) +static void __exit lustre_exit(void) { lustre_register_client_fill_super(NULL); lustre_register_kill_super_cb(NULL); @@ -207,8 +209,9 @@ static void __exit exit_lustre_lite(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Lite Client File System"); +MODULE_DESCRIPTION("Lustre Client File System"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -module_init(init_lustre_lite); -module_exit(exit_lustre_lite); +module_init(lustre_init); +module_exit(lustre_exit); diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 2610348f6..46d03ea48 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c @@ -59,7 +59,8 @@ static int ll_readlink_internal(struct inode *inode, *symname = lli->lli_symlink_name; /* If the total CDEBUG() size is larger than a page, it * will print a warning to the console, avoid this by - * printing just the last part of the symlink. */ + * printing just the last part of the symlink. + */ CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n", print_limit < symlen ? "..." : "", print_limit, (*symname) + symlen - print_limit, symlen); @@ -81,7 +82,6 @@ static int ll_readlink_internal(struct inode *inode, } body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if ((body->valid & OBD_MD_LINKNAME) == 0) { CERROR("OBD_MD_LINKNAME not set on reply\n"); rc = -EPROTO; @@ -91,13 +91,13 @@ static int ll_readlink_internal(struct inode *inode, LASSERT(symlen != 0); if (body->eadatasize != symlen) { CERROR("inode %lu: symlink length %d not expected %d\n", - inode->i_ino, body->eadatasize - 1, symlen - 1); + inode->i_ino, body->eadatasize - 1, symlen - 1); rc = -EPROTO; goto failed; } *symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD); - if (*symname == NULL || + if (!*symname || strnlen(*symname, symlen) != symlen - 1) { /* not full/NULL terminated */ CERROR("inode %lu: symlink not NULL terminated string of length %d\n", diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c index fdca4ec05..282b70b77 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_dev.c +++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c @@ -79,8 +79,8 @@ static void *vvp_key_init(const struct lu_context *ctx, { struct vvp_thread_info *info; - info = kmem_cache_alloc(vvp_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -98,8 +98,8 @@ static void *vvp_session_key_init(const struct lu_context *ctx, { struct vvp_session *session; - session = kmem_cache_alloc(vvp_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } @@ -228,7 +228,7 @@ int cl_sb_fini(struct super_block *sb) if (!IS_ERR(env)) { cld = sbi->ll_cl; - if (cld != NULL) { + if (cld) { cl_stack_fini(env, cld); sbi->ll_cl = NULL; sbi->ll_site = NULL; @@ -325,11 +325,11 @@ static struct cl_object *vvp_pgcache_obj(const struct lu_env *env, cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket, vvp_pgcache_obj_get, id); - if (id->vpi_obj != NULL) { + if (id->vpi_obj) { struct lu_object *lu_obj; lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type); - if (lu_obj != NULL) { + if (lu_obj) { lu_object_ref_add(lu_obj, "dump", current); return lu2cl(lu_obj); } @@ -355,7 +355,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env, if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash)) return ~0ULL; clob = vvp_pgcache_obj(env, dev, &id); - if (clob != NULL) { + if (clob) { struct cl_object_header *hdr; int nr; struct cl_page *pg; @@ -443,7 +443,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v) vvp_pgcache_id_unpack(pos, &id); sbi = f->private; clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id); - if (clob != NULL) { + if (clob) { hdr = cl_object_header(clob); spin_lock(&hdr->coh_page_guard); @@ -452,7 +452,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v) seq_printf(f, "%8x@"DFID": ", id.vpi_index, PFID(&hdr->coh_lu.loh_fid)); - if (page != NULL) { + if (page) { vvp_pgcache_page_show(env, f, page); cl_page_put(env, page); } else diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h index 2e39533a4..bb393378c 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_internal.h +++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h @@ -44,14 +44,13 @@ #include "../include/cl_object.h" #include "llite_internal.h" -int vvp_io_init (const struct lu_env *env, - struct cl_object *obj, struct cl_io *io); -int vvp_lock_init (const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, +int vvp_io_init(const struct lu_env *env, + struct cl_object *obj, struct cl_io *io); +int vvp_lock_init(const struct lu_env *env, + struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); -int vvp_page_init (const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); +int vvp_page_init(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); struct lu_object *vvp_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 0920ac6b3..85a835976 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -68,7 +68,7 @@ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) * have to acquire group lock. */ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, - struct inode *inode) + struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_io *cio = ccc_env_io(env); @@ -78,7 +78,8 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, case CIT_READ: case CIT_WRITE: /* don't need lock here to check lli_layout_gen as we have held - * extent lock and GROUP lock has to hold to swap layout */ + * extent lock and GROUP lock has to hold to swap layout + */ if (ll_layout_version_get(lli) != cio->cui_layout_gen) { io->ci_need_restart = 1; /* this will return application a short read/write */ @@ -134,7 +135,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) */ rc = ll_layout_restore(ccc_object_inode(obj)); /* if restore registration failed, no restart, - * we will return -ENODATA */ + * we will return -ENODATA + */ /* The layout will change after restore, so we need to * block on layout lock hold by the MDT * as MDT will not send new layout in lvb (see LU-3124) @@ -164,8 +166,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) DFID" layout changed from %d to %d.\n", PFID(lu_object_fid(&obj->co_lu)), cio->cui_layout_gen, gen); - /* today successful restore is the only possible - * case */ + /* today successful restore is the only possible case */ /* restore was done, clear restoring state */ ll_i2info(ccc_object_inode(obj))->lli_flags &= ~LLIF_FILE_RESTORING; @@ -181,7 +182,7 @@ static void vvp_io_fault_fini(const struct lu_env *env, CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); - if (page != NULL) { + if (page) { lu_ref_del(&page->cp_reference, "fault", io); cl_page_put(env, page); io->u.ci_fault.ft_page = NULL; @@ -220,11 +221,11 @@ static int vvp_mmap_locks(const struct lu_env *env, if (!cl_is_normalio(env, io)) return 0; - if (vio->cui_iter == NULL) /* nfs or loop back device write */ + if (!vio->cui_iter) /* nfs or loop back device write */ return 0; /* No MM (e.g. NFS)? No vmas too. */ - if (mm == NULL) + if (!mm) return 0; iov_for_each(iov, i, *(vio->cui_iter)) { @@ -456,7 +457,8 @@ static void vvp_io_setattr_end(const struct lu_env *env, if (cl_io_is_trunc(io)) /* Truncate in memory pages - they must be clean pages - * because osc has already notified to destroy osc_extents. */ + * because osc has already notified to destroy osc_extents. + */ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); inode_unlock(inode); @@ -499,8 +501,8 @@ static int vvp_io_read_start(const struct lu_env *env, goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, - "Read ino %lu, %lu bytes, offset %lld, size %llu\n", - inode->i_ino, cnt, pos, i_size_read(inode)); + "Read ino %lu, %lu bytes, offset %lld, size %llu\n", + inode->i_ino, cnt, pos, i_size_read(inode)); /* turn off the kernel's read-ahead */ cio->cui_fd->fd_file->f_ra.ra_pages = 0; @@ -510,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env, vio->cui_ra_window_set = 1; bead->lrr_start = cl_index(obj, pos); /* - * XXX: explicit PAGE_CACHE_SIZE + * XXX: explicit PAGE_SIZE */ - bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); + bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1); ll_ra_read_in(file, bead); } @@ -525,11 +527,12 @@ static int vvp_io_read_start(const struct lu_env *env, break; case IO_SPLICE: result = generic_file_splice_read(file, &pos, - vio->u.splice.cui_pipe, cnt, - vio->u.splice.cui_flags); + vio->u.splice.cui_pipe, cnt, + vio->u.splice.cui_flags); /* LU-1109: do splice read stripe by stripe otherwise if it * may make nfsd stuck if this read occupied all internal pipe - * buffers. */ + * buffers. + */ io->ci_continue = 0; break; default: @@ -587,7 +590,7 @@ static int vvp_io_write_start(const struct lu_env *env, CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); - if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */ + if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */ result = 0; else result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter); @@ -673,7 +676,7 @@ static int vvp_io_fault_start(const struct lu_env *env, /* must return locked page */ if (fio->ft_mkwrite) { - LASSERT(cfio->ft_vmpage != NULL); + LASSERT(cfio->ft_vmpage); lock_page(cfio->ft_vmpage); } else { result = vvp_io_kernel_fault(cfio); @@ -689,13 +692,15 @@ static int vvp_io_fault_start(const struct lu_env *env, size = i_size_read(inode); /* Though we have already held a cl_lock upon this page, but - * it still can be truncated locally. */ + * it still can be truncated locally. + */ if (unlikely((vmpage->mapping != inode->i_mapping) || (page_offset(vmpage) > size))) { CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); /* return +1 to stop cl_io_loop() and ll_fault() will catch - * and retry. */ + * and retry. + */ result = 1; goto out; } @@ -736,7 +741,8 @@ static int vvp_io_fault_start(const struct lu_env *env, } /* if page is going to be written, we should add this page into cache - * earlier. */ + * earlier. + */ if (fio->ft_mkwrite) { wait_on_page_writeback(vmpage); if (set_page_dirty(vmpage)) { @@ -750,7 +756,8 @@ static int vvp_io_fault_start(const struct lu_env *env, /* Do not set Dirty bit here so that in case IO is * started before the page is really made dirty, we - * still have chance to detect it. */ + * still have chance to detect it. + */ result = cl_page_cache_add(env, io, page, CRT_WRITE); LASSERT(cl_page_is_owned(page, io)); @@ -792,7 +799,7 @@ static int vvp_io_fault_start(const struct lu_env *env, out: /* return unlocked vmpage to avoid deadlocking */ - if (vmpage != NULL) + if (vmpage) unlock_page(vmpage); cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; return result; @@ -803,7 +810,8 @@ static int vvp_io_fsync_start(const struct lu_env *env, { /* we should mark TOWRITE bit to each dirty page in radix tree to * verify pages have been written, but this is difficult because of - * race. */ + * race. + */ return 0; } @@ -951,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env, * We're completely overwriting an existing page, so _don't_ * set it up to date until commit_write */ - if (from == 0 && to == PAGE_CACHE_SIZE) { + if (from == 0 && to == PAGE_SIZE) { CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); POISON_PAGE(page, 0x11); } else @@ -1003,7 +1011,7 @@ static int vvp_io_commit_write(const struct lu_env *env, * * (3) IO is batched up to the RPC size and is async until the * client max cache is hit - * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) + * (/sys/fs/lustre/osc/OSC.../max_dirty_mb) * */ if (!PageDirty(vmpage)) { @@ -1014,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env, set_page_dirty(vmpage); vvp_write_pending(cl2ccc(obj), cp); } else if (result == -EDQUOT) { - pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; + pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT; bool need_clip = true; /* @@ -1032,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env, * being. */ if (last_index > pg->cp_index) { - to = PAGE_CACHE_SIZE; + to = PAGE_SIZE; need_clip = false; } else if (last_index == pg->cp_index) { int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; @@ -1153,7 +1161,8 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, count = io->u.ci_rw.crw_count; /* "If nbyte is 0, read() will return 0 and have no other - * results." -- Single Unix Spec */ + * results." -- Single Unix Spec + */ if (count == 0) result = 1; else @@ -1173,25 +1182,28 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, /* ignore layout change for generic CIT_MISC but not for glimpse. * io context for glimpse must set ci_verify_layout to true, - * see cl_glimpse_size0() for details. */ + * see cl_glimpse_size0() for details. + */ if (io->ci_type == CIT_MISC && !io->ci_verify_layout) io->ci_ignore_layout = 1; /* Enqueue layout lock and get layout version. We need to do this * even for operations requiring to open file, such as read and write, - * because it might not grant layout lock in IT_OPEN. */ + * because it might not grant layout lock in IT_OPEN. + */ if (result == 0 && !io->ci_ignore_layout) { result = ll_layout_refresh(inode, &cio->cui_layout_gen); if (result == -ENOENT) /* If the inode on MDS has been removed, but the objects * on OSTs haven't been destroyed (async unlink), layout * fetch will return -ENOENT, we'd ignore this error - * and continue with dirty flush. LU-3230. */ + * and continue with dirty flush. LU-3230. + */ result = 0; if (result < 0) CERROR("%s: refresh file layout " DFID " error %d.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(lu_object_fid(&obj->co_lu)), result); + ll_get_fsname(inode->i_sb, NULL, 0), + PFID(lu_object_fid(&obj->co_lu)), result); } return result; diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c index c82714ea8..03c887d8e 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_object.c +++ b/drivers/staging/lustre/lustre/llite/vvp_object.c @@ -137,7 +137,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, * page may be stale due to layout change, and the process * will never be notified. * This operation is expensive but mmap processes have to pay - * a price themselves. */ + * a price themselves. + */ unmap_mapping_range(conf->coc_inode->i_mapping, 0, OBD_OBJECT_EOF, 0); @@ -147,7 +148,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, if (conf->coc_opc != OBJECT_CONF_SET) return 0; - if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) { + if (conf->u.coc_md && conf->u.coc_md->lsm) { CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n", PFID(&lli->lli_fid), lli->lli_layout_gen, conf->u.coc_md->lsm->lsm_layout_gen); @@ -186,9 +187,8 @@ struct ccc_object *cl_inode2ccc(struct inode *inode) struct cl_object *obj = lli->lli_clob; struct lu_object *lu; - LASSERT(obj != NULL); lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type); - LASSERT(lu != NULL); + LASSERT(lu); return lu2ccc(lu); } diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c index a133475a7..33ca3eb34 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_page.c +++ b/drivers/staging/lustre/lustre/llite/vvp_page.c @@ -56,8 +56,8 @@ static void vvp_page_fini_common(struct ccc_page *cp) { struct page *vmpage = cp->cpg_page; - LASSERT(vmpage != NULL); - page_cache_release(vmpage); + LASSERT(vmpage); + put_page(vmpage); } static void vvp_page_fini(const struct lu_env *env, @@ -81,7 +81,7 @@ static int vvp_page_own(const struct lu_env *env, struct ccc_page *vpg = cl2ccc_page(slice); struct page *vmpage = vpg->cpg_page; - LASSERT(vmpage != NULL); + LASSERT(vmpage); if (nonblock) { if (!trylock_page(vmpage)) return -EAGAIN; @@ -105,7 +105,7 @@ static void vvp_page_assume(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); wait_on_page_writeback(vmpage); } @@ -116,7 +116,7 @@ static void vvp_page_unassume(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); } @@ -125,7 +125,7 @@ static void vvp_page_disown(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); unlock_page(cl2vm_page(slice)); @@ -139,7 +139,7 @@ static void vvp_page_discard(const struct lu_env *env, struct address_space *mapping; struct ccc_page *cpg = cl2ccc_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); mapping = vmpage->mapping; @@ -161,15 +161,15 @@ static int vvp_page_unmap(const struct lu_env *env, struct page *vmpage = cl2vm_page(slice); __u64 offset; - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); - offset = vmpage->index << PAGE_CACHE_SHIFT; + offset = vmpage->index << PAGE_SHIFT; /* * XXX is it safe to call this with the page lock held? */ - ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); + ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE); return 0; } @@ -199,7 +199,7 @@ static void vvp_page_export(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); if (uptodate) SetPageUptodate(vmpage); @@ -232,7 +232,8 @@ static int vvp_page_prep_write(const struct lu_env *env, LASSERT(!PageDirty(vmpage)); /* ll_writepage path is not a sync write, so need to set page writeback - * flag */ + * flag + */ if (!pg->cp_sync_io) set_page_writeback(vmpage); @@ -262,7 +263,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret set_bit(AS_EIO, &inode->i_mapping->flags); if ((ioret == -ESHUTDOWN || ioret == -EINTR) && - obj->cob_discard_page_warned == 0) { + obj->cob_discard_page_warned == 0) { obj->cob_discard_page_warned = 1; ll_dirty_page_discard_warn(vmpage, ioret); } @@ -290,7 +291,7 @@ static void vvp_page_completion_read(const struct lu_env *env, } else cp->cpg_defer_uptodate = 0; - if (page->cp_sync_io == NULL) + if (!page->cp_sync_io) unlock_page(vmpage); } @@ -317,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env, cp->cpg_write_queued = 0; vvp_write_complete(cl2ccc(slice->cpl_obj), cp); - if (pg->cp_sync_io != NULL) { + if (pg->cp_sync_io) { LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); } else { @@ -356,15 +357,14 @@ static int vvp_page_make_ready(const struct lu_env *env, lock_page(vmpage); if (clear_page_dirty_for_io(vmpage)) { LASSERT(pg->cp_state == CPS_CACHED); - /* This actually clears the dirty bit in the radix - * tree. */ + /* This actually clears the dirty bit in the radix tree. */ set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), - cl2ccc_page(slice)); + vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); } else if (pg->cp_state == CPS_PAGEOUT) { /* is it possible for osc_flush_async_page() to already - * make it ready? */ + * make it ready? + */ result = -EALREADY; } else { CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n", @@ -385,7 +385,7 @@ static int vvp_page_print(const struct lu_env *env, (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ", vp, vp->cpg_defer_uptodate, vp->cpg_ra_used, vp->cpg_write_queued, vmpage); - if (vmpage != NULL) { + if (vmpage) { (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", (long)vmpage->flags, page_count(vmpage), page_mapcount(vmpage), vmpage->private, @@ -530,27 +530,26 @@ static const struct cl_page_operations vvp_transient_page_ops = { }; int vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct ccc_page *cpg = cl_object_page_slice(obj, page); CLOBINVRNT(env, obj, ccc_object_invariant(obj)); cpg->cpg_page = vmpage; - page_cache_get(vmpage); + get_page(vmpage); INIT_LIST_HEAD(&cpg->cpg_pending_linkage); if (page->cp_type == CPT_CACHEABLE) { SetPagePrivate(vmpage); vmpage->private = (unsigned long)page; - cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_page_ops); + cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops); } else { struct ccc_object *clobj = cl2ccc(obj); LASSERT(!inode_trylock(clobj->cob_inode)); cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_transient_page_ops); + &vvp_transient_page_ops); clobj->cob_transient_pages++; } return 0; diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index 8eb43f192..b68dcc921 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -135,7 +135,7 @@ int ll_setxattr_common(struct inode *inode, const char *name, /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && - strcmp(name, "security.capability") == 0)) + strcmp(name, "security.capability") == 0)) return 0; /* LU-549: Disable security.selinux when selinux is disabled */ @@ -148,7 +148,7 @@ int ll_setxattr_common(struct inode *inode, const char *name, (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); - if (rce == NULL || + if (!rce || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_RSETFACL)) return -EOPNOTSUPP; @@ -158,7 +158,6 @@ int ll_setxattr_common(struct inode *inode, const char *name, ee = et_search_del(&sbi->ll_et, current_pid(), ll_inode2fid(inode), xattr_type); - LASSERT(ee != NULL); if (valid & OBD_MD_FLXATTR) { acl = lustre_acl_xattr_merge2ext( (posix_acl_xattr_header *)value, @@ -192,12 +191,11 @@ int ll_setxattr_common(struct inode *inode, const char *name, valid, name, pv, size, 0, flags, ll_i2suppgid(inode), &req); #ifdef CONFIG_FS_POSIX_ACL - if (new_value != NULL) - /* - * Release the posix ACL space. - */ - kfree(new_value); - if (acl != NULL) + /* + * Release the posix ACL space. + */ + kfree(new_value); + if (acl) lustre_ext_acl_xattr_free(acl); #endif if (rc) { @@ -239,11 +237,12 @@ int ll_setxattr(struct dentry *dentry, const char *name, /* Attributes that are saved via getxattr will always have * the stripe_offset as 0. Instead, the MDS should be - * allowed to pick the starting OST index. b=17846 */ - if (lump != NULL && lump->lmm_stripe_offset == 0) + * allowed to pick the starting OST index. b=17846 + */ + if (lump && lump->lmm_stripe_offset == 0) lump->lmm_stripe_offset = -1; - if (lump != NULL && S_ISREG(inode->i_mode)) { + if (lump && S_ISREG(inode->i_mode)) { int flags = FMODE_WRITE; int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ? sizeof(*lump) : sizeof(struct lov_user_md_v3); @@ -312,7 +311,7 @@ int ll_getxattr_common(struct inode *inode, const char *name, /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && - strcmp(name, "security.capability") == 0)) + strcmp(name, "security.capability") == 0)) return -ENODATA; /* LU-549: Disable security.selinux when selinux is disabled */ @@ -325,7 +324,7 @@ int ll_getxattr_common(struct inode *inode, const char *name, (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); - if (rce == NULL || + if (!rce || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_LGETFACL && rce->rce_ops != RMT_RSETFACL && @@ -366,7 +365,7 @@ do_getxattr: goto out_xattr; /* Add "system.posix_acl_access" to the list */ - if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) { + if (lli->lli_posix_acl && valid & OBD_MD_FLXATTRLS) { if (size == 0) { rc += sizeof(XATTR_NAME_ACL_ACCESS); } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) { @@ -398,7 +397,7 @@ getxattr_nocache: if (size < body->eadatasize) { CERROR("server bug: replied size %u > %u\n", - body->eadatasize, (int)size); + body->eadatasize, (int)size); rc = -ERANGE; goto out; } @@ -410,7 +409,7 @@ getxattr_nocache: /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->eadatasize); + body->eadatasize); if (!xdata) { rc = -EFAULT; goto out; @@ -482,13 +481,14 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, if (size == 0 && S_ISDIR(inode->i_mode)) { /* XXX directory EA is fix for now, optimize to save - * RPC transfer */ + * RPC transfer + */ rc = sizeof(struct lov_user_md); goto out; } lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) { + if (!lsm) { if (S_ISDIR(inode->i_mode)) { rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request); @@ -497,7 +497,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, } } else { /* LSM is present already after lookup/getattr call. - * we need to grab layout lock once it is implemented */ + * we need to grab layout lock once it is implemented + */ rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm); lmmsize = rc; } @@ -510,7 +511,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, /* used to call ll_get_max_mdsize() forward to get * the maximum buffer size, while some apps (such as * rsync 3.0.x) care much about the exact xattr value - * size */ + * size + */ rc = lmmsize; goto out; } @@ -526,7 +528,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, memcpy(lump, lmm, lmmsize); /* do not return layout gen for getxattr otherwise it would * confuse tar --xattr by recognizing layout gen as stripe - * offset when the file is restored. See LU-2809. */ + * offset when the file is restored. See LU-2809. + */ lump->lmm_layout_gen = 0; rc = lmmsize; @@ -560,7 +563,7 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) if (rc < 0) goto out; - if (buffer != NULL) { + if (buffer) { struct ll_sb_info *sbi = ll_i2sbi(inode); char *xattr_name = buffer; int xlen, rem = rc; @@ -598,12 +601,12 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) const size_t name_len = sizeof("lov") - 1; const size_t total_len = prefix_len + name_len + 1; - if (((rc + total_len) > size) && (buffer != NULL)) { + if (((rc + total_len) > size) && buffer) { ptlrpc_req_finished(request); return -ERANGE; } - if (buffer != NULL) { + if (buffer) { buffer += rc; memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len); memcpy(buffer + prefix_len, "lov", name_len); diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c index d1402762a..3480ce2bb 100644 --- a/drivers/staging/lustre/lustre/llite/xattr_cache.c +++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c @@ -23,7 +23,8 @@ */ struct ll_xattr_entry { struct list_head xe_list; /* protected with - * lli_xattrs_list_rwsem */ + * lli_xattrs_list_rwsem + */ char *xe_name; /* xattr name, \0-terminated */ char *xe_value; /* xattr value */ unsigned xe_namelen; /* strlen(xe_name) + 1 */ @@ -59,9 +60,6 @@ void ll_xattr_fini(void) */ static void ll_xattr_cache_init(struct ll_inode_info *lli) { - - LASSERT(lli != NULL); - INIT_LIST_HEAD(&lli->lli_xattrs); lli->lli_flags |= LLIF_XATTR_CACHE; } @@ -83,8 +81,7 @@ static int ll_xattr_cache_find(struct list_head *cache, list_for_each_entry(entry, cache, xe_list) { /* xattr_name == NULL means look for any entry */ - if (xattr_name == NULL || - strcmp(xattr_name, entry->xe_name) == 0) { + if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) { *xattr = entry; CDEBUG(D_CACHE, "find: [%s]=%.*s\n", entry->xe_name, entry->xe_vallen, @@ -117,8 +114,8 @@ static int ll_xattr_cache_add(struct list_head *cache, return -EPROTO; } - xattr = kmem_cache_alloc(xattr_kmem, GFP_NOFS | __GFP_ZERO); - if (xattr == NULL) { + xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS); + if (!xattr) { CDEBUG(D_CACHE, "failed to allocate xattr\n"); return -ENOMEM; } @@ -136,8 +133,8 @@ static int ll_xattr_cache_add(struct list_head *cache, xattr->xe_vallen = xattr_val_len; list_add(&xattr->xe_list, cache); - CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, - xattr_val_len, xattr_val); + CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len, + xattr_val); return 0; err_value: @@ -194,7 +191,7 @@ static int ll_xattr_cache_list(struct list_head *cache, list_for_each_entry_safe(xattr, tmp, cache, xe_list) { CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n", - xld_buffer, xld_tail, xattr->xe_name); + xld_buffer, xld_tail, xattr->xe_name); if (xld_buffer) { xld_size -= xattr->xe_namelen; @@ -270,7 +267,7 @@ static int ll_xattr_find_get_lock(struct inode *inode, struct lookup_intent *oit, struct ptlrpc_request **req) { - ldlm_mode_t mode; + enum ldlm_mode mode; struct lustre_handle lockh = { 0 }; struct md_op_data *op_data; struct ll_inode_info *lli = ll_i2info(inode); @@ -284,7 +281,8 @@ static int ll_xattr_find_get_lock(struct inode *inode, mutex_lock(&lli->lli_xattrs_enq_lock); /* inode may have been shrunk and recreated, so data is gone, match lock - * only when data exists. */ + * only when data exists. + */ if (ll_xattr_cache_valid(lli)) { /* Try matching first. */ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, @@ -359,7 +357,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) } /* Matched but no cache? Cancelled on error by a parallel refill. */ - if (unlikely(req == NULL)) { + if (unlikely(!req)) { CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n"); rc = -EIO; goto out_maybe_drop; @@ -376,19 +374,19 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { CERROR("no MDT BODY in the refill xattr reply\n"); rc = -EPROTO; goto out_destroy; } /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->eadatasize); + body->eadatasize); xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS, - body->aclsize); + body->aclsize); xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS, body->max_mdsize * sizeof(__u32)); - if (xdata == NULL || xval == NULL || xsizes == NULL) { + if (!xdata || !xval || !xsizes) { CERROR("wrong setxattr reply\n"); rc = -EPROTO; goto out_destroy; @@ -404,7 +402,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) for (i = 0; i < body->max_mdsize; i++) { CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval); /* Perform consistency checks: attr names and vals in pill */ - if (memchr(xdata, 0, xtail - xdata) == NULL) { + if (!memchr(xdata, 0, xtail - xdata)) { CERROR("xattr protocol violation (names are broken)\n"); rc = -EPROTO; } else if (xval + *xsizes > xvtail) { @@ -471,11 +469,8 @@ out_destroy: * \retval -ERANGE the buffer is not large enough * \retval -ENODATA no such attr or the list is empty */ -int ll_xattr_cache_get(struct inode *inode, - const char *name, - char *buffer, - size_t size, - __u64 valid) +int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer, + size_t size, __u64 valid) { struct lookup_intent oit = { .it_op = IT_GETXATTR }; struct ll_inode_info *lli = ll_i2info(inode); @@ -504,7 +499,7 @@ int ll_xattr_cache_get(struct inode *inode, if (size != 0) { if (size >= xattr->xe_vallen) memcpy(buffer, xattr->xe_value, - xattr->xe_vallen); + xattr->xe_vallen); else rc = -ERANGE; } diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c index ee235926f..378691b2a 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c @@ -58,7 +58,8 @@ int lmv_fld_lookup(struct lmv_obd *lmv, int rc; /* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and - * this fid_is_local check should be removed once LU-2240 is fixed */ + * this fid_is_local check should be removed once LU-2240 is fixed + */ LASSERTF((fid_seq_in_fldb(fid_seq(fid)) || fid_seq_is_local_file(fid_seq(fid))) && fid_is_sane(fid), DFID" is insane!\n", PFID(fid)); diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c index 66de27f1d..e0958eaed 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c @@ -69,7 +69,7 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm, int rc = 0; body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; LASSERT((body->valid & OBD_MD_MDS)); @@ -107,14 +107,16 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm, op_data->op_fid1 = body->fid1; /* Sent the parent FID to the remote MDT */ - if (parent_fid != NULL) { + if (parent_fid) { /* The parent fid is only for remote open to * check whether the open is from OBF, - * see mdt_cross_open */ + * see mdt_cross_open + */ LASSERT(it->it_op & IT_OPEN); op_data->op_fid2 = *parent_fid; /* Add object FID to op_fid3, in case it needs to check stale - * (M_CHECK_STALE), see mdc_finish_intent_lock */ + * (M_CHECK_STALE), see mdc_finish_intent_lock + */ op_data->op_fid3 = body->fid1; } @@ -173,7 +175,8 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, return PTR_ERR(tgt); /* If it is ready to open the file by FID, do not need - * allocate FID at all, otherwise it will confuse MDT */ + * allocate FID at all, otherwise it will confuse MDT + */ if ((it->it_op & IT_CREAT) && !(it->it_flags & MDS_OPEN_BY_FID)) { /* @@ -204,7 +207,7 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, return rc; body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* * Not cross-ref case, just get out of here. @@ -268,9 +271,9 @@ static int lmv_intent_lookup(struct obd_export *exp, op_data->op_bias &= ~MDS_CROSS_REF; rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, - flags, reqp, cb_blocking, extra_lock_flags); + flags, reqp, cb_blocking, extra_lock_flags); - if (rc < 0 || *reqp == NULL) + if (rc < 0 || !*reqp) return rc; /* @@ -278,7 +281,7 @@ static int lmv_intent_lookup(struct obd_export *exp, * remote inode. Let's check this. */ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* Not cross-ref case, just get out of here. */ if (likely(!(body->valid & OBD_MD_MDS))) @@ -299,7 +302,6 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; int rc; - LASSERT(it != NULL); LASSERT(fid_is_sane(&op_data->op_fid1)); CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n", diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h index eb8e673cb..8a0087190 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h +++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h @@ -66,7 +66,7 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req) struct mdt_body *body; struct lmv_stripe_md *mea; - LASSERT(req != NULL); + LASSERT(req); body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); @@ -75,13 +75,11 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req) mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, body->eadatasize); - LASSERT(mea != NULL); - if (mea->mea_count == 0) return NULL; if (mea->mea_magic != MEA_MAGIC_LAST_CHAR && - mea->mea_magic != MEA_MAGIC_ALL_CHARS && - mea->mea_magic != MEA_MAGIC_HASH_SEGMENT) + mea->mea_magic != MEA_MAGIC_ALL_CHARS && + mea->mea_magic != MEA_MAGIC_HASH_SEGMENT) return NULL; return mea; @@ -101,7 +99,7 @@ lmv_get_target(struct lmv_obd *lmv, u32 mds) int i; for (i = 0; i < count; i++) { - if (lmv->tgts[i] == NULL) + if (!lmv->tgts[i]) continue; if (lmv->tgts[i]->ltd_idx == mds) diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index bbafe0a71..9abb7c2b9 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -53,6 +53,7 @@ #include "../include/lprocfs_status.h" #include "../include/lustre_lite.h" #include "../include/lustre_fid.h" +#include "../include/lustre_kernelcomm.h" #include "lmv_internal.h" static void lmv_activate_target(struct lmv_obd *lmv, @@ -87,7 +88,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, spin_lock(&lmv->lmv_lock); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i, @@ -103,7 +104,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, } obd = class_exp2obd(tgt->ltd_exp); - if (obd == NULL) { + if (!obd) { rc = -ENOTCONN; goto out_lmv_lock; } @@ -237,7 +238,7 @@ static int lmv_connect(const struct lu_env *env, * and MDC stuff will be called directly, for instance while reading * ../mdc/../kbytesfree procfs file, etc. */ - if (data->ocd_connect_flags & OBD_CONNECT_REAL) + if (data && data->ocd_connect_flags & OBD_CONNECT_REAL) rc = lmv_check_connect(obd); if (rc && lmv->lmv_tgts_kobj) @@ -261,7 +262,7 @@ static void lmv_set_timeouts(struct obd_device *obd) for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0) + if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) continue; obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS), @@ -301,8 +302,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize, return 0; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) { CWARN("%s: NULL export for %d\n", obd->obd_name, i); continue; @@ -311,7 +311,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize, rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize, cookiesize, def_cookiesize); if (rc) { - CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d.\n", + CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n", obd->obd_name, i, rc); break; } @@ -339,9 +339,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) } CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, - cluuid->uuid); + mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, + tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, cluuid->uuid); if (!mdc_obd->obd_set_up) { CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid); @@ -397,8 +396,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) lmv->max_cookiesize, lmv->max_def_cookiesize); CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - atomic_read(&obd->obd_refcount)); + mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, + atomic_read(&obd->obd_refcount)); if (lmv->lmv_tgts_kobj) /* Even if we failed to create the link, that's fine */ @@ -409,7 +408,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) static void lmv_del_target(struct lmv_obd *lmv, int index) { - if (lmv->tgts[index] == NULL) + if (!lmv->tgts[index]) return; kfree(lmv->tgts[index]); @@ -418,7 +417,7 @@ static void lmv_del_target(struct lmv_obd *lmv, int index) } static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, - __u32 index, int gen) + __u32 index, int gen) { struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; @@ -441,7 +440,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, } } - if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) { + if ((index < lmv->tgts_size) && lmv->tgts[index]) { tgt = lmv->tgts[index]; CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n", obd->obd_name, @@ -459,7 +458,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, while (newsize < index + 1) newsize <<= 1; newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (newtgts == NULL) { + if (!newtgts) { lmv_init_unlock(lmv); return -ENOMEM; } @@ -538,11 +537,9 @@ int lmv_check_connect(struct obd_device *obd) CDEBUG(D_CONFIG, "Time to connect %s to %s\n", lmv->cluuid.uuid, obd->obd_name); - LASSERT(lmv->tgts != NULL); - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; rc = lmv_connect_mdc(obd, tgt); if (rc) @@ -562,7 +559,7 @@ int lmv_check_connect(struct obd_device *obd) int rc2; tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; tgt->ltd_active = 0; if (tgt->ltd_exp) { @@ -585,9 +582,6 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) struct obd_device *mdc_obd; int rc; - LASSERT(tgt != NULL); - LASSERT(obd != NULL); - mdc_obd = class_exp2obd(tgt->ltd_exp); if (mdc_obd) { @@ -640,7 +634,7 @@ static int lmv_disconnect(struct obd_export *exp) goto out_local; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; lmv_disconnect_mdc(obd, lmv->tgts[i]); @@ -662,7 +656,8 @@ out_local: return rc; } -static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg) +static int lmv_fid2path(struct obd_export *exp, int len, void *karg, + void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lmv_obd *lmv = &obddev->u.lmv; @@ -683,8 +678,9 @@ repeat_fid2path: goto out_fid2path; /* If remote_gf != NULL, it means just building the - * path on the remote MDT, copy this path segment to gf */ - if (remote_gf != NULL) { + * path on the remote MDT, copy this path segment to gf + */ + if (remote_gf) { struct getinfo_fid2path *ori_gf; char *ptr; @@ -714,7 +710,7 @@ repeat_fid2path: goto out_fid2path; /* sigh, has to go to another MDT to do path building further */ - if (remote_gf == NULL) { + if (!remote_gf) { remote_gf_size = sizeof(*remote_gf) + PATH_MAX; remote_gf = kzalloc(remote_gf_size, GFP_NOFS); if (!remote_gf) { @@ -779,7 +775,7 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv, nr_out = 0; for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) { curr_tgt = lmv_find_target(lmv, - &hur_in->hur_user_item[i].hui_fid); + &hur_in->hur_user_item[i].hui_fid); if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) { hur_out->hur_user_item[nr_out] = hur_in->hur_user_item[i]; @@ -792,14 +788,17 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv, } static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, void *uarg) + struct lustre_kernelcomm *lk, + void __user *uarg) { - int i, rc = 0; + int rc = 0; + __u32 i; /* unregister request (call from llapi_hsm_copytool_fini) */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { /* best effort: try to clean as much as possible - * (continue on error) */ + * (continue on error) + */ obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); } @@ -808,23 +807,25 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, * and will unregister automatically. */ rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group); + return rc; } static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, void *uarg) + struct lustre_kernelcomm *lk, void __user *uarg) { - struct file *filp; - int i, j, err; - int rc = 0; - bool any_set = false; + struct file *filp; + __u32 i, j; + int err, rc = 0; + bool any_set = false; + struct kkuc_ct_data kcd = { 0 }; /* All or nothing: try to register to all MDS. * In case of failure, unregister from previous MDS, - * except if it because of inactive target. */ + * except if it because of inactive target. + */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, - len, lk, uarg); + err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); if (err) { if (lmv->tgts[i]->ltd_active) { /* permanent error */ @@ -836,13 +837,13 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, /* unregister from previous MDS */ for (j = 0; j < i; j++) obd_iocontrol(cmd, - lmv->tgts[j]->ltd_exp, - len, lk, uarg); + lmv->tgts[j]->ltd_exp, + len, lk, uarg); return rc; } /* else: transient error. - * kuc will register to the missing MDT - * when it is back */ + * kuc will register to the missing MDT when it is back + */ } else { any_set = true; } @@ -854,17 +855,25 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, /* at least one registration done, with no failure */ filp = fget(lk->lk_wfd); - if (filp == NULL) { + if (!filp) return -EBADF; + + kcd.kcd_magic = KKUC_CT_DATA_MAGIC; + kcd.kcd_uuid = lmv->cluuid; + kcd.kcd_archive = lk->lk_data; + + rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, + &kcd, sizeof(kcd)); + if (rc) { + if (filp) + fput(filp); } - rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data); - if (rc != 0 && filp != NULL) - fput(filp); + return rc; } static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void *uarg) + int len, void *karg, void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lmv_obd *lmv = &obddev->u.lmv; @@ -887,8 +896,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (index >= count) return -ENODEV; - if (lmv->tgts[index] == NULL || - lmv->tgts[index]->ltd_active == 0) + if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0) return -ENODATA; mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp); @@ -897,8 +905,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) return -EFAULT; rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf, @@ -907,8 +915,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (rc) return rc; if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int) data->ioc_plen1, - (int) sizeof(stat_buf)))) + min((int)data->ioc_plen1, + (int)sizeof(stat_buf)))) return -EFAULT; break; } @@ -922,18 +930,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, return -EINVAL; tgt = lmv->tgts[qctl->qc_idx]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) return -EINVAL; } else if (qctl->qc_valid == QC_UUID) { for (i = 0; i < count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; if (!obd_uuid_equals(&tgt->ltd_uuid, &qctl->obd_uuid)) continue; - if (tgt->ltd_exp == NULL) + if (!tgt->ltd_exp) return -EINVAL; break; @@ -967,8 +975,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (icc->icc_mdtindex >= count) return -ENODEV; - if (lmv->tgts[icc->icc_mdtindex] == NULL || - lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL || + if (!lmv->tgts[icc->icc_mdtindex] || + !lmv->tgts[icc->icc_mdtindex]->ltd_exp || lmv->tgts[icc->icc_mdtindex]->ltd_active == 0) return -ENODEV; rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp, @@ -976,7 +984,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, break; } case LL_IOC_GET_CONNECT_FLAGS: { - if (lmv->tgts[0] == NULL) + if (!lmv->tgts[0]) return -ENODATA; rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg); break; @@ -993,10 +1001,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, tgt = lmv_find_target(lmv, &op_data->op_fid1); if (IS_ERR(tgt)) - return PTR_ERR(tgt); + return PTR_ERR(tgt); - if (tgt->ltd_exp == NULL) - return -EINVAL; + if (!tgt->ltd_exp) + return -EINVAL; rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); break; @@ -1021,7 +1029,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, /* if the request is about a single fid * or if there is a single MDS, no need to split - * the request. */ + * the request. + */ if (reqcount == 1 || count == 1) { tgt = lmv_find_target(lmv, &hur->hur_user_item[0].hui_fid); @@ -1044,7 +1053,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, hur_user_item[nr]) + hur->hur_request.hr_data_len; req = libcfs_kvzalloc(reqlen, GFP_NOFS); - if (req == NULL) + if (!req) return -ENOMEM; lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req); @@ -1070,7 +1079,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (IS_ERR(tgt2)) return PTR_ERR(tgt2); - if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL)) + if (!tgt1->ltd_exp || !tgt2->ltd_exp) return -EINVAL; /* only files on same MDT can have their layouts swapped */ @@ -1094,11 +1103,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, struct obd_device *mdc_obd; int err; - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; /* ll_umount_begin() sets force flag but for lmv, not - * mdc. Let's pass it through */ + * mdc. Let's pass it through + */ mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp); mdc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, @@ -1122,51 +1131,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, return rc; } -#if 0 -static int lmv_all_chars_policy(int count, const char *name, - int len) -{ - unsigned int c = 0; - - while (len > 0) - c += name[--len]; - c = c % count; - return c; -} - -static int lmv_nid_policy(struct lmv_obd *lmv) -{ - struct obd_import *imp; - __u32 id; - - /* - * XXX: To get nid we assume that underlying obd device is mdc. - */ - imp = class_exp2cliimp(lmv->tgts[0].ltd_exp); - id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32); - return id % lmv->desc.ld_tgt_count; -} - -static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data, - enum placement_policy placement) -{ - switch (placement) { - case PLACEMENT_CHAR_POLICY: - return lmv_all_chars_policy(lmv->desc.ld_tgt_count, - op_data->op_name, - op_data->op_namelen); - case PLACEMENT_NID_POLICY: - return lmv_nid_policy(lmv); - - default: - break; - } - - CERROR("Unsupported placement policy %x\n", placement); - return -EINVAL; -} -#endif - /** * This is _inode_ placement policy function (not name). */ @@ -1175,7 +1139,7 @@ static int lmv_placement_policy(struct obd_device *obd, { struct lmv_obd *lmv = &obd->u.lmv; - LASSERT(mds != NULL); + LASSERT(mds); if (lmv->desc.ld_tgt_count == 1) { *mds = 0; @@ -1205,7 +1169,8 @@ static int lmv_placement_policy(struct obd_device *obd, } /* Allocate new fid on target according to operation type and parent - * home mds. */ + * home mds. + */ *mds = op_data->op_mds; return 0; } @@ -1225,7 +1190,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds) */ mutex_lock(&tgt->ltd_fid_mutex); - if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL) { + if (tgt->ltd_active == 0 || !tgt->ltd_exp) { rc = -ENODEV; goto out; } @@ -1252,8 +1217,8 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid, u32 mds = 0; int rc; - LASSERT(op_data != NULL); - LASSERT(fid != NULL); + LASSERT(op_data); + LASSERT(fid); rc = lmv_placement_policy(obd, op_data, &mds); if (rc) { @@ -1291,7 +1256,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) } lmv->tgts = kcalloc(32, sizeof(*lmv->tgts), GFP_NOFS); - if (lmv->tgts == NULL) + if (!lmv->tgts) return -ENOMEM; lmv->tgts_size = 32; @@ -1332,11 +1297,11 @@ static int lmv_cleanup(struct obd_device *obd) struct lmv_obd *lmv = &obd->u.lmv; fld_client_fini(&lmv->lmv_fld); - if (lmv->tgts != NULL) { + if (lmv->tgts) { int i; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL) + if (!lmv->tgts[i]) continue; lmv_del_target(lmv, i); } @@ -1357,7 +1322,8 @@ static int lmv_process_config(struct obd_device *obd, u32 len, void *buf) switch (lcfg->lcfg_command) { case LCFG_ADD_MDC: /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID - * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */ + * 2:0 3:1 4:lustre-MDT0000-mdc_UUID + */ if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) { rc = -EINVAL; goto out; @@ -1402,7 +1368,7 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, return -ENOMEM; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp, @@ -1421,7 +1387,8 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, * i.e. mount does not need the merged osfs * from all of MDT. * And also clients can be mounted as long as - * MDT0 is in service*/ + * MDT0 is in service + */ if (flags & OBD_STATFS_FOR_MDT0) goto out_free_temp; } else { @@ -1547,7 +1514,7 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid) * space of MDT storing inode. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; md_null_inode(lmv->tgts[i]->ltd_exp, fid); } @@ -1575,7 +1542,7 @@ static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid, * space of MDT storing inode. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data); if (rc) @@ -1655,7 +1622,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data, cap_effective, rdev, request); if (rc == 0) { - if (*request == NULL) + if (!*request) return rc; CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2)); } @@ -1701,7 +1668,6 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo, int pmode; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if (!(body->valid & OBD_MD_MDS)) return 0; @@ -1808,7 +1774,6 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if (body->valid & OBD_MD_MDS) { struct lu_fid rid = body->fid1; @@ -1842,7 +1807,8 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, NULL) static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data, - int op_tgt, ldlm_mode_t mode, int bits, int flag) + int op_tgt, enum ldlm_mode mode, int bits, + int flag) { struct lu_fid *fid = md_op_data_fid(op_data, flag); struct obd_device *obd = exp->exp_obd; @@ -2051,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * |s|e|f|p|ent| 0 | ... | 0 | * '----------------- -----' * - * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is + * However, on hosts where the native VM page size (PAGE_SIZE) is * larger than LU_PAGE_SIZE, a single host page may contain multiple * lu_dirpages. After reading the lu_dirpages from the MDS, the * ldp_hash_end of the first lu_dirpage refers to the one immediately @@ -2082,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span * to the first entry of the next lu_dirpage. */ -#if PAGE_CACHE_SIZE > LU_PAGE_SIZE +#if PAGE_SIZE > LU_PAGE_SIZE static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) { int i; @@ -2097,7 +2063,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) while (--nlupgs > 0) { ent = lu_dirent_start(dp); - for (end_dirent = ent; ent != NULL; + for (end_dirent = ent; ent; end_dirent = ent, ent = lu_dirent_next(ent)) ; @@ -2117,7 +2083,8 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) break; /* Enlarge the end entry lde_reclen from 0 to - * first entry of next lu_dirpage. */ + * first entry of next lu_dirpage. + */ LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0); end_dirent->lde_reclen = cpu_to_le16((char *)(dp->ldp_entries) - @@ -2134,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) } #else #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) -#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ +#endif /* PAGE_SIZE > LU_PAGE_SIZE */ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct page **pages, struct ptlrpc_request **request) @@ -2143,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct lmv_obd *lmv = &obd->u.lmv; __u64 offset = op_data->op_offset; int rc; - int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ + int ncfspgs; /* pages read in PAGE_SIZE */ int nlupgs; /* pages read in LU_PAGE_SIZE */ struct lmv_tgt_desc *tgt; @@ -2162,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, if (rc != 0) return rc; - ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1) - >> PAGE_CACHE_SHIFT; + ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1) + >> PAGE_SHIFT; nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); @@ -2227,7 +2194,7 @@ retry: return rc; body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* Not cross-ref case, just get out of here. */ @@ -2255,7 +2222,8 @@ retry: * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times). * * In theory, it might try unlimited time here, but it should - * be very rare case. */ + * be very rare case. + */ op_data->op_fid2 = body->fid1; ptlrpc_req_finished(*request); *request = NULL; @@ -2270,7 +2238,8 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) switch (stage) { case OBD_CLEANUP_EARLY: /* XXX: here should be calling obd_precleanup() down to - * stack. */ + * stack. + */ break; case OBD_CLEANUP_EXPORTS: fld_client_debugfs_fini(&lmv->lmv_fld); @@ -2291,7 +2260,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, int rc = 0; obd = class_exp2obd(exp); - if (obd == NULL) { + if (!obd) { CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", exp->exp_handle.h_cookie); return -EINVAL; @@ -2312,7 +2281,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, /* * All tgts should be connected when this gets called. */ - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; if (!obd_get_info(env, tgt->ltd_exp, keylen, key, @@ -2355,7 +2324,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, int rc = 0; obd = class_exp2obd(exp); - if (obd == NULL) { + if (!obd) { CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", exp->exp_handle.h_cookie); return -EINVAL; @@ -2368,7 +2337,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; err = obd_set_info_async(env, tgt->ltd_exp, @@ -2403,9 +2372,9 @@ static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, return 0; } - if (*lmmp == NULL) { + if (!*lmmp) { *lmmp = libcfs_kvzalloc(mea_size, GFP_NOFS); - if (*lmmp == NULL) + if (!*lmmp) return -ENOMEM; } @@ -2443,10 +2412,10 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, __u32 magic; mea_size = lmv_get_easize(lmv); - if (lsmp == NULL) + if (!lsmp) return mea_size; - if (*lsmp != NULL && lmm == NULL) { + if (*lsmp && !lmm) { kvfree(*tmea); *lsmp = NULL; return 0; @@ -2455,7 +2424,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, LASSERT(mea_size == lmm_size); *tmea = libcfs_kvzalloc(mea_size, GFP_NOFS); - if (*tmea == NULL) + if (!*tmea) return -ENOMEM; if (!lmm) @@ -2485,8 +2454,8 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, } static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - ldlm_cancel_flags_t flags, void *opaque) + ldlm_policy_data_t *policy, enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; @@ -2494,10 +2463,10 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, int err; int i; - LASSERT(fid != NULL); + LASSERT(fid); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) continue; @@ -2519,14 +2488,16 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, return rc; } -static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) +static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; - ldlm_mode_t rc; + enum ldlm_mode rc; int i; CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid)); @@ -2538,8 +2509,7 @@ static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, * one fid was created in. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) continue; @@ -2695,7 +2665,7 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0) + if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) continue; if (!tgt->ltd_active) { CDEBUG(D_HA, "mdt %d is inactive.\n", i); @@ -2730,7 +2700,7 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp, int err; tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) { + if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) { CERROR("lmv idx %d inactive\n", i); return -EIO; } @@ -2813,7 +2783,8 @@ static void lmv_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver"); +MODULE_DESCRIPTION("Lustre Logical Metadata Volume"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(lmv_init); diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c index 40cf4d9f0..b39e364a2 100644 --- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c +++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c @@ -138,7 +138,7 @@ static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lmv_obd *lmv; - LASSERT(dev != NULL); + LASSERT(dev); lmv = &dev->u.lmv; seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid); return 0; @@ -171,7 +171,7 @@ static int lmv_tgt_seq_show(struct seq_file *p, void *v) { struct lmv_tgt_desc *tgt = v; - if (tgt == NULL) + if (!tgt) return 0; seq_printf(p, "%d: %s %sACTIVE\n", tgt->ltd_idx, tgt->ltd_uuid.uuid, diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h index 66a2492c1..7dd3162b5 100644 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h @@ -579,51 +579,49 @@ extern struct kmem_cache *lovsub_req_kmem; extern struct kmem_cache *lov_lock_link_kmem; -int lov_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lov_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); - -int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link, - struct lovsub_lock *sub); +int lov_object_init(const struct lu_env *env, struct lu_object *obj, + const struct lu_object_conf *conf); +int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, + const struct lu_object_conf *conf); +int lov_lock_init(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_io_init(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); + +int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link, + struct lovsub_lock *sub); struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio, int stripe); -void lov_sub_put(struct lov_io_sub *sub); -int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, - struct lovsub_lock *sublock, - const struct cl_lock_descr *d, int idx); - -int lov_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, struct page *vmpage); -int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, struct page *vmpage); - -int lov_page_init_empty(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); -int lov_page_init_raid0(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); +void lov_sub_put(struct lov_io_sub *sub); +int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, + struct lovsub_lock *sublock, + const struct cl_lock_descr *d, int idx); + +int lov_page_init(const struct lu_env *env, struct cl_object *ob, + struct cl_page *page, struct page *vmpage); +int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, + struct cl_page *page, struct page *vmpage); + +int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); +int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); struct lu_object *lov_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev); + const struct lu_object_header *hdr, + struct lu_device *dev); struct lu_object *lovsub_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); @@ -631,9 +629,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lov_lock_link *lov_lock_link_find(const struct lu_env *env, struct lov_lock *lck, struct lovsub_lock *sub); -struct lov_io_sub *lov_page_subio(const struct lu_env *env, - struct lov_io *lio, - const struct cl_page_slice *slice); +struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, + const struct cl_page_slice *slice); #define lov_foreach_target(lov, var) \ for (var = 0; var < lov_targets_nr(lov); ++var) @@ -651,7 +648,7 @@ static inline struct lov_session *lov_env_session(const struct lu_env *env) struct lov_session *ses; ses = lu_context_key_get(env->le_ses, &lov_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -759,7 +756,7 @@ static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock) const struct cl_lock_slice *slice; slice = cl_lock_at(lock, &lovsub_device_type); - LASSERT(slice != NULL); + LASSERT(slice); return cl2lovsub_lock(slice); } @@ -798,7 +795,7 @@ static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice) } static inline struct lov_io *cl2lov_io(const struct lu_env *env, - const struct cl_io_slice *ios) + const struct cl_io_slice *ios) { struct lov_io *lio; @@ -817,7 +814,7 @@ static inline struct lov_thread_info *lov_env_info(const struct lu_env *env) struct lov_thread_info *info; info = lu_context_key_get(&env->le_ctx, &lov_key); - LASSERT(info != NULL); + LASSERT(info); return info; } diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c index 3733fdc88..532ef87df 100644 --- a/drivers/staging/lustre/lustre/lov/lov_dev.c +++ b/drivers/staging/lustre/lustre/lov/lov_dev.c @@ -142,8 +142,8 @@ static void *lov_key_init(const struct lu_context *ctx, { struct lov_thread_info *info; - info = kmem_cache_alloc(lov_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info != NULL) + info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS); + if (info) INIT_LIST_HEAD(&info->lti_closure.clc_list); else info = ERR_PTR(-ENOMEM); @@ -170,8 +170,8 @@ static void *lov_session_key_init(const struct lu_context *ctx, { struct lov_session *info; - info = kmem_cache_alloc(lov_session_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(lov_session_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -199,15 +199,15 @@ static struct lu_device *lov_device_fini(const struct lu_env *env, int i; struct lov_device *ld = lu2lov_dev(d); - LASSERT(ld->ld_lov != NULL); - if (ld->ld_target == NULL) + LASSERT(ld->ld_lov); + if (!ld->ld_target) return NULL; lov_foreach_target(ld, i) { struct lovsub_device *lsd; lsd = ld->ld_target[i]; - if (lsd != NULL) { + if (lsd) { cl_stack_fini(env, lovsub2cl_dev(lsd)); ld->ld_target[i] = NULL; } @@ -222,8 +222,8 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, int i; int rc = 0; - LASSERT(d->ld_site != NULL); - if (ld->ld_target == NULL) + LASSERT(d->ld_site); + if (!ld->ld_target) return rc; lov_foreach_target(ld, i) { @@ -232,7 +232,7 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, struct lov_tgt_desc *desc; desc = ld->ld_lov->lov_tgts[i]; - if (desc == NULL) + if (!desc) continue; cl = cl_type_setup(env, d->ld_site, &lovsub_device_type, @@ -261,8 +261,8 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev, struct lov_req *lr; int result; - lr = kmem_cache_alloc(lov_req_kmem, GFP_NOFS | __GFP_ZERO); - if (lr != NULL) { + lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS); + if (lr) { cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops); result = 0; } else @@ -282,9 +282,9 @@ static void lov_emerg_free(struct lov_device_emerg **emrg, int nr) struct lov_device_emerg *em; em = emrg[i]; - if (em != NULL) { + if (em) { LASSERT(em->emrg_page_list.pl_nr == 0); - if (em->emrg_env != NULL) + if (em->emrg_env) cl_env_put(em->emrg_env, &em->emrg_refcheck); kfree(em); } @@ -300,7 +300,7 @@ static struct lu_device *lov_device_free(const struct lu_env *env, cl_device_fini(lu2cl_dev(d)); kfree(ld->ld_target); - if (ld->ld_emrg != NULL) + if (ld->ld_emrg) lov_emerg_free(ld->ld_emrg, nr); kfree(ld); return NULL; @@ -311,7 +311,7 @@ static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev, { struct lov_device *ld = lu2lov_dev(dev); - if (ld->ld_target[index] != NULL) { + if (ld->ld_target[index]) { cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index])); ld->ld_target[index] = NULL; } @@ -324,17 +324,17 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr) int result; emerg = kcalloc(nr, sizeof(emerg[0]), GFP_NOFS); - if (emerg == NULL) + if (!emerg) return ERR_PTR(-ENOMEM); for (result = i = 0; i < nr && result == 0; i++) { struct lov_device_emerg *em; em = kzalloc(sizeof(*em), GFP_NOFS); - if (em != NULL) { + if (em) { emerg[i] = em; cl_page_list_init(&em->emrg_page_list); em->emrg_env = cl_env_alloc(&em->emrg_refcheck, - LCT_REMEMBER|LCT_NOREF); + LCT_REMEMBER | LCT_NOREF); if (!IS_ERR(em->emrg_env)) em->emrg_env->le_ctx.lc_cookie = 0x2; else { @@ -370,7 +370,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) return PTR_ERR(emerg); newd = kcalloc(tgt_size, sz, GFP_NOFS); - if (newd != NULL) { + if (newd) { mutex_lock(&dev->ld_mutex); if (sub_size > 0) { memcpy(newd, dev->ld_target, sub_size * sz); @@ -379,7 +379,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) dev->ld_target = newd; dev->ld_target_nr = tgt_size; - if (dev->ld_emrg != NULL) + if (dev->ld_emrg) lov_emerg_free(dev->ld_emrg, sub_size); dev->ld_emrg = emerg; mutex_unlock(&dev->ld_mutex); @@ -404,8 +404,6 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, obd_getref(obd); tgt = obd->u.lov.lov_tgts[index]; - LASSERT(tgt != NULL); - LASSERT(tgt->ltd_obd != NULL); if (!tgt->ltd_obd->obd_set_up) { CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid)); @@ -414,7 +412,7 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, rc = lov_expand_targets(env, ld); if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) { - LASSERT(dev->ld_site != NULL); + LASSERT(dev->ld_site); cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type, tgt->ltd_obd->obd_lu_dev); @@ -492,7 +490,7 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env, /* setup the LOV OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); + LASSERT(obd); rc = lov_setup(obd, cfg); if (rc) { lov_device_free(env, d); diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c index b3c9c85aa..b6529401c 100644 --- a/drivers/staging/lustre/lustre/lov/lov_ea.c +++ b/drivers/staging/lustre/lustre/lov/lov_ea.c @@ -100,8 +100,8 @@ struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size) return NULL; for (i = 0; i < stripe_count; i++) { - loi = kmem_cache_alloc(lov_oinfo_slab, GFP_NOFS | __GFP_ZERO); - if (loi == NULL) + loi = kmem_cache_zalloc(lov_oinfo_slab, GFP_NOFS); + if (!loi) goto err; lsm->lsm_oinfo[i] = loi; } @@ -141,7 +141,7 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm, static void lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno, - u64 *lov_off, u64 *swidth) + u64 *lov_off, u64 *swidth) { if (swidth) *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; @@ -162,12 +162,13 @@ static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa, } /* Find minimum stripe maxbytes value. For inactive or - * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */ + * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. + */ static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes) { struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import; - if (imp == NULL || !tgt->ltd_active) { + if (!imp || !tgt->ltd_active) { *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES; return; } diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h index 2d00bad58..590f9326a 100644 --- a/drivers/staging/lustre/lustre/lov/lov_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_internal.h @@ -43,7 +43,8 @@ /* lov_do_div64(a, b) returns a % b, and a = a / b. * The 32-bit code is LOV-specific due to knowing about stripe limits in * order to reduce the divisor to a 32-bit number. If the divisor is - * already a 32-bit value the compiler handles this directly. */ + * already a 32-bit value the compiler handles this directly. + */ #if BITS_PER_LONG == 64 # define lov_do_div64(n, base) ({ \ uint64_t __base = (base); \ @@ -92,7 +93,8 @@ struct lov_request_set { atomic_t set_refcount; struct obd_export *set_exp; /* XXX: There is @set_exp already, however obd_statfs gets obd_device - only. */ + * only. + */ struct obd_device *set_obd; int set_count; atomic_t set_completes; @@ -114,7 +116,6 @@ void lov_finish_set(struct lov_request_set *set); static inline void lov_get_reqset(struct lov_request_set *set) { - LASSERT(set != NULL); LASSERT(atomic_read(&set->set_refcount) > 0); atomic_inc(&set->set_refcount); } @@ -137,12 +138,10 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm, struct ost_lvb *lvb, __u64 *kms_place); /* lov_offset.c */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, - int stripeno); +u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno); int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, int stripeno, u64 *u64); -u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, - int stripeno); +u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, int stripeno); int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, u64 start, u64 end, u64 *obd_start, u64 *obd_end); @@ -197,7 +196,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm, int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, struct lov_mds_md *lmm, int lmm_bytes); int lov_getstripe(struct obd_export *exp, - struct lov_stripe_md *lsm, struct lov_user_md *lump); + struct lov_stripe_md *lsm, struct lov_user_md __user *lump); int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count, int pattern, int magic); int lov_free_memmd(struct lov_stripe_md **lsmp); diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c index 93fe69eb2..4296aacd8 100644 --- a/drivers/staging/lustre/lustre/lov/lov_io.c +++ b/drivers/staging/lustre/lustre/lov/lov_io.c @@ -60,7 +60,7 @@ static inline void lov_sub_exit(struct lov_io_sub *sub) static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, struct lov_io_sub *sub) { - if (sub->sub_io != NULL) { + if (sub->sub_io) { if (sub->sub_io_initialized) { lov_sub_enter(sub); cl_io_fini(sub->sub_env, sub->sub_io); @@ -74,7 +74,7 @@ static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, kfree(sub->sub_io); sub->sub_io = NULL; } - if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) { + if (!IS_ERR_OR_NULL(sub->sub_env)) { if (!sub->sub_borrowed) cl_env_put(sub->sub_env, &sub->sub_refcheck); sub->sub_env = NULL; @@ -143,11 +143,11 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio, int stripe = sub->sub_stripe; int result; - LASSERT(sub->sub_io == NULL); - LASSERT(sub->sub_env == NULL); + LASSERT(!sub->sub_io); + LASSERT(!sub->sub_env); LASSERT(sub->sub_stripe < lio->lis_stripe_count); - if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL)) + if (unlikely(!lov_r0(lov)->lo_sub[stripe])) return -EIO; result = 0; @@ -252,7 +252,6 @@ static int lov_page_stripe(const struct cl_page *page) subobj = lu2lovsub( lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header, &lovsub_device_type)); - LASSERT(subobj != NULL); return subobj->lso_index; } @@ -263,9 +262,9 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, struct cl_page *page = slice->cpl_page; int stripe; - LASSERT(lio->lis_cl.cis_io != NULL); + LASSERT(lio->lis_cl.cis_io); LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object); - LASSERT(lsm != NULL); + LASSERT(lsm); LASSERT(lio->lis_nr_subios > 0); stripe = lov_page_stripe(page); @@ -278,7 +277,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; int result; - LASSERT(lio->lis_object != NULL); + LASSERT(lio->lis_object); /* * Need to be optimized, we can't afford to allocate a piece of memory @@ -288,7 +287,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, libcfs_kvzalloc(lsm->lsm_stripe_count * sizeof(lio->lis_subs[0]), GFP_NOFS); - if (lio->lis_subs != NULL) { + if (lio->lis_subs) { lio->lis_nr_subios = lio->lis_stripe_count; lio->lis_single_subio_index = -1; lio->lis_active_subios = 0; @@ -304,7 +303,6 @@ static void lov_io_slice_init(struct lov_io *lio, io->ci_result = 0; lio->lis_object = obj; - LASSERT(obj->lo_lsm != NULL); lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count; switch (io->ci_type) { @@ -358,7 +356,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) struct lov_object *lov = cl2lov(ios->cis_obj); int i; - if (lio->lis_subs != NULL) { + if (lio->lis_subs) { for (i = 0; i < lio->lis_nr_subios; i++) lov_io_sub_fini(env, lio, &lio->lis_subs[i]); kvfree(lio->lis_subs); @@ -395,7 +393,7 @@ static int lov_io_iter_init(const struct lu_env *env, endpos, &start, &end)) continue; - if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) { + if (unlikely(!lov_r0(lio->lis_object)->lo_sub[stripe])) { if (ios->cis_io->ci_type == CIT_READ || ios->cis_io->ci_type == CIT_WRITE || ios->cis_io->ci_type == CIT_FAULT) @@ -601,13 +599,13 @@ static int lov_io_submit(const struct lu_env *env, return rc; } - LASSERT(lio->lis_subs != NULL); + LASSERT(lio->lis_subs); if (alloc) { stripes_qin = libcfs_kvzalloc(sizeof(*stripes_qin) * lio->lis_nr_subios, GFP_NOFS); - if (stripes_qin == NULL) + if (!stripes_qin) return -ENOMEM; for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) @@ -949,13 +947,13 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, } int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) + struct cl_io *io) { struct lov_object *lov = cl2lov(obj); struct lov_io *lio = lov_env_io(env); int result; - LASSERT(lov->lo_lsm != NULL); + LASSERT(lov->lo_lsm); lio->lis_object = lov; switch (io->ci_type) { diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c index d866791d7..ae854bc25 100644 --- a/drivers/staging/lustre/lustre/lov/lov_lock.c +++ b/drivers/staging/lustre/lustre/lov/lov_lock.c @@ -115,7 +115,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck, /* * check that sub-lock doesn't have lock link to this top-lock. */ - LASSERT(lov_lock_link_find(env, lck, lsl) == NULL); + LASSERT(!lov_lock_link_find(env, lck, lsl)); LASSERT(idx < lck->lls_nr); lck->lls_sub[idx].sub_lock = lsl; @@ -144,8 +144,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, LASSERT(idx < lck->lls_nr); - link = kmem_cache_alloc(lov_lock_link_kmem, GFP_NOFS | __GFP_ZERO); - if (link != NULL) { + link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS); + if (link) { struct lov_sublock_env *subenv; struct lov_lock_sub *lls; struct cl_lock_descr *descr; @@ -160,7 +160,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, * to remember the subio. This is because lock is able * to be cached, but this is not true for IO. This * further means a sublock might be referenced in - * different io context. -jay */ + * different io context. -jay + */ sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io, descr, "lov-parent", parent); @@ -220,7 +221,7 @@ static int lov_sublock_lock(const struct lu_env *env, LASSERT(!(lls->sub_flags & LSF_HELD)); link = lov_lock_link_find(env, lck, sublock); - LASSERT(link != NULL); + LASSERT(link); lov_lock_unlink(env, link, sublock); lov_sublock_unlock(env, sublock, closure, NULL); lck->lls_cancel_race = 1; @@ -263,7 +264,7 @@ static int lov_subresult(int result, int rc) int rc_rank; LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT, - "result = %d", result); + "result = %d\n", result); LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT, "rc = %d\n", rc); CLASSERT(CLO_WAIT < CLO_REPEAT); @@ -309,14 +310,14 @@ static int lov_lock_sub_init(const struct lu_env *env, * XXX for wide striping smarter algorithm is desirable, * breaking out of the loop, early. */ - if (likely(r0->lo_sub[i] != NULL) && + if (likely(r0->lo_sub[i]) && lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) nr++; } LASSERT(nr > 0); lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS); - if (lck->lls_sub == NULL) + if (!lck->lls_sub) return -ENOMEM; lck->lls_nr = nr; @@ -328,14 +329,14 @@ static int lov_lock_sub_init(const struct lu_env *env, * top-lock. */ for (i = 0, nr = 0; i < r0->lo_nr; ++i) { - if (likely(r0->lo_sub[i] != NULL) && + if (likely(r0->lo_sub[i]) && lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) { struct cl_lock_descr *descr; descr = &lck->lls_sub[nr].sub_descr; - LASSERT(descr->cld_obj == NULL); + LASSERT(!descr->cld_obj); descr->cld_obj = lovsub2cl(r0->lo_sub[i]); descr->cld_start = cl_index(descr->cld_obj, start); descr->cld_end = cl_index(descr->cld_obj, end); @@ -369,7 +370,6 @@ static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck, struct cl_lock *sublock; int dying; - LASSERT(lck->lls_sub[i].sub_lock != NULL); sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock; LASSERT(cl_lock_is_mutexed(sublock)); @@ -413,7 +413,6 @@ static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck, if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) { struct cl_lock *sublock; - LASSERT(lck->lls_sub[i].sub_lock != NULL); sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock; LASSERT(cl_lock_is_mutexed(sublock)); LASSERT(sublock->cll_state != CLS_FREEING); @@ -435,13 +434,13 @@ static void lov_lock_fini(const struct lu_env *env, lck = cl2lov_lock(slice); LASSERT(lck->lls_nr_filled == 0); - if (lck->lls_sub != NULL) { + if (lck->lls_sub) { for (i = 0; i < lck->lls_nr; ++i) /* * No sub-locks exists at this point, as sub-lock has * a reference on its parent. */ - LASSERT(lck->lls_sub[i].sub_lock == NULL); + LASSERT(!lck->lls_sub[i].sub_lock); kvfree(lck->lls_sub); } kmem_cache_free(lov_lock_kmem, lck); @@ -479,7 +478,8 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck, result = cl_enqueue_try(env, sublock, io, enqflags); if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) { /* if it is enqueued, try to `wait' on it---maybe it's already - * granted */ + * granted + */ result = cl_wait_try(env, sublock); if (result == CLO_REENQUEUED) result = CLO_WAIT; @@ -515,12 +515,13 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent, if (!IS_ERR(sublock)) { cl_lock_get_trust(sublock); if (parent->cll_state == CLS_QUEUING && - lck->lls_sub[idx].sub_lock == NULL) { + !lck->lls_sub[idx].sub_lock) { lov_sublock_adopt(env, lck, sublock, idx, link); } else { kmem_cache_free(lov_lock_link_kmem, link); /* other thread allocated sub-lock, or enqueue is no - * longer going on */ + * longer going on + */ cl_lock_mutex_put(env, parent); cl_lock_unhold(env, sublock, "lov-parent", parent); cl_lock_mutex_get(env, parent); @@ -574,10 +575,11 @@ static int lov_lock_enqueue(const struct lu_env *env, * Sub-lock might have been canceled, while top-lock was * cached. */ - if (sub == NULL) { + if (!sub) { result = lov_sublock_fill(env, lock, io, lck, i); /* lov_sublock_fill() released @lock mutex, - * restart. */ + * restart. + */ break; } sublock = sub->lss_cl.cls_lock; @@ -605,7 +607,8 @@ static int lov_lock_enqueue(const struct lu_env *env, /* take recursive mutex of sublock */ cl_lock_mutex_get(env, sublock); /* need to release all locks in closure - * otherwise it may deadlock. LU-2683.*/ + * otherwise it may deadlock. LU-2683. + */ lov_sublock_unlock(env, sub, closure, subenv); /* sublock and parent are held. */ @@ -620,7 +623,7 @@ static int lov_lock_enqueue(const struct lu_env *env, break; } } else { - LASSERT(sublock->cll_conflict == NULL); + LASSERT(!sublock->cll_conflict); lov_sublock_unlock(env, sub, closure, subenv); } } @@ -649,11 +652,12 @@ static int lov_lock_unuse(const struct lu_env *env, /* top-lock state cannot change concurrently, because single * thread (one that released the last hold) carries unlocking - * to the completion. */ + * to the completion. + */ LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT); lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) + if (!sub) continue; sublock = sub->lss_cl.cls_lock; @@ -679,7 +683,7 @@ static int lov_lock_unuse(const struct lu_env *env, } static void lov_lock_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) + const struct cl_lock_slice *slice) { struct lov_lock *lck = cl2lov_lock(slice); struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock); @@ -695,10 +699,11 @@ static void lov_lock_cancel(const struct lu_env *env, /* top-lock state cannot change concurrently, because single * thread (one that released the last hold) carries unlocking - * to the completion. */ + * to the completion. + */ lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) + if (!sub) continue; sublock = sub->lss_cl.cls_lock; @@ -757,7 +762,6 @@ again: lls = &lck->lls_sub[i]; sub = lls->sub_lock; - LASSERT(sub != NULL); sublock = sub->lss_cl.cls_lock; rc = lov_sublock_lock(env, lck, lls, closure, &subenv); if (rc == 0) { @@ -776,8 +780,9 @@ again: if (result != 0) break; } - /* Each sublock only can be reenqueued once, so will not loop for - * ever. */ + /* Each sublock only can be reenqueued once, so will not loop + * forever. + */ if (result == 0 && reenqueued != 0) goto again; cl_lock_closure_fini(closure); @@ -805,7 +810,7 @@ static int lov_lock_use(const struct lu_env *env, lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) { + if (!sub) { /* * Sub-lock might have been canceled, while top-lock was * cached. @@ -826,7 +831,8 @@ static int lov_lock_use(const struct lu_env *env, i, 1, rc); } else if (sublock->cll_state == CLS_NEW) { /* Sub-lock might have been canceled, while - * top-lock was cached. */ + * top-lock was cached. + */ result = -ESTALE; lov_sublock_release(env, lck, i, 1, result); } @@ -852,45 +858,6 @@ static int lov_lock_use(const struct lu_env *env, return result; } -#if 0 -static int lock_lock_multi_match() -{ - struct cl_lock *lock = slice->cls_lock; - struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr; - struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj); - struct lov_layout_raid0 *r0 = lov_r0(loo); - struct lov_lock_sub *sub; - struct cl_object *subobj; - u64 fstart; - u64 fend; - u64 start; - u64 end; - int i; - - fstart = cl_offset(need->cld_obj, need->cld_start); - fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1; - subneed->cld_mode = need->cld_mode; - cl_lock_mutex_get(env, lock); - for (i = 0; i < lov->lls_nr; ++i) { - sub = &lov->lls_sub[i]; - if (sub->sub_lock == NULL) - continue; - subobj = sub->sub_descr.cld_obj; - if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe, - fstart, fend, &start, &end)) - continue; - subneed->cld_start = cl_index(subobj, start); - subneed->cld_end = cl_index(subobj, end); - subneed->cld_obj = subobj; - if (!cl_lock_ext_match(&sub->sub_got, subneed)) { - result = 0; - break; - } - } - cl_lock_mutex_put(env, lock); -} -#endif - /** * Check if the extent region \a descr is covered by \a child against the * specific \a stripe. @@ -922,10 +889,10 @@ static int lov_lock_stripe_is_matching(const struct lu_env *env, idx = lov_stripe_number(lsm, start); if (idx == stripe || - unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) { + unlikely(!lov_r0(lov)->lo_sub[idx])) { idx = lov_stripe_number(lsm, end); if (idx == stripe || - unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) + unlikely(!lov_r0(lov)->lo_sub[idx])) result = 1; } } @@ -970,7 +937,8 @@ static int lov_lock_fits_into(const struct lu_env *env, LASSERT(lov->lls_nr > 0); /* for top lock, it's necessary to match enq flags otherwise it will - * run into problem if a sublock is missing and reenqueue. */ + * run into problem if a sublock is missing and reenqueue. + */ if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags) return 0; @@ -1074,7 +1042,7 @@ static void lov_lock_delete(const struct lu_env *env, struct lov_lock_sub *lls = &lck->lls_sub[i]; struct lovsub_lock *lsl = lls->sub_lock; - if (lsl == NULL) /* already removed */ + if (!lsl) /* already removed */ continue; rc = lov_sublock_lock(env, lck, lls, closure, NULL); @@ -1090,9 +1058,9 @@ static void lov_lock_delete(const struct lu_env *env, lov_sublock_release(env, lck, i, 1, 0); link = lov_lock_link_find(env, lck, lsl); - LASSERT(link != NULL); + LASSERT(link); lov_lock_unlink(env, link, lsl); - LASSERT(lck->lls_sub[i].sub_lock == NULL); + LASSERT(!lck->lls_sub[i].sub_lock); lov_sublock_unlock(env, lsl, closure, NULL); } @@ -1112,7 +1080,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie, sub = &lck->lls_sub[i]; (*p)(env, cookie, " %d %x: ", i, sub->sub_flags); - if (sub->sub_lock != NULL) + if (sub->sub_lock) cl_lock_print(env, cookie, p, sub->sub_lock->lss_cl.cls_lock); else @@ -1139,8 +1107,8 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, struct lov_lock *lck; int result; - lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lck != NULL) { + lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); + if (lck) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops); result = lov_lock_sub_init(env, lck, io); } else @@ -1157,7 +1125,8 @@ static void lov_empty_lock_fini(const struct lu_env *env, } static int lov_empty_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_lock_slice *slice) + lu_printer_t p, + const struct cl_lock_slice *slice) { (*p)(env, cookie, "empty\n"); return 0; @@ -1170,13 +1139,13 @@ static const struct cl_lock_operations lov_empty_lock_ops = { }; int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io) + struct cl_lock *lock, const struct cl_io *io) { struct lov_lock *lck; int result = -ENOMEM; - lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lck != NULL) { + lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); + if (lck) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops); lck->lls_orig = lock->cll_descr; result = 0; diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c index 97115bec7..029cd4d62 100644 --- a/drivers/staging/lustre/lustre/lov/lov_merge.c +++ b/drivers/staging/lustre/lustre/lov/lov_merge.c @@ -129,7 +129,8 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm, "stripe %d KMS %sing %llu->%llu\n", stripe, kms > loi->loi_kms ? "increase":"shrink", loi->loi_kms, kms); - loi_kms_set(loi, loi->loi_lvb.lvb_size = kms); + loi->loi_lvb.lvb_size = kms; + loi_kms_set(loi, loi->loi_lvb.lvb_size); } return 0; } diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c index 6c2bdfe9c..5daa7faf4 100644 --- a/drivers/staging/lustre/lustre/lov/lov_obd.c +++ b/drivers/staging/lustre/lustre/lov/lov_obd.c @@ -61,7 +61,8 @@ #include "lov_internal.h" /* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion. - Any function that expects lov_tgts to remain stationary must take a ref. */ + * Any function that expects lov_tgts to remain stationary must take a ref. + */ static void lov_getref(struct obd_device *obd) { struct lov_obd *lov = &obd->u.lov; @@ -96,7 +97,8 @@ static void lov_putref(struct obd_device *obd) list_add(&tgt->ltd_kill, &kill); /* XXX - right now there is a dependency on ld_tgt_count * being the maximum tgt index for computing the - * mds_max_easize. So we can't shrink it. */ + * mds_max_easize. So we can't shrink it. + */ lov_ost_pool_remove(&lov->lov_packed, i); lov->lov_tgts[i] = NULL; lov->lov_death_row--; @@ -158,7 +160,8 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate, if (activate) { tgt_obd->obd_no_recov = 0; /* FIXME this is probably supposed to be - ptlrpc_set_import_active. Horrible naming. */ + * ptlrpc_set_import_active. Horrible naming. + */ ptlrpc_activate_import(imp); } @@ -262,7 +265,7 @@ static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) osc_obd = class_exp2obd(tgt->ltd_exp); CDEBUG(D_CONFIG, "%s: disconnecting target %s\n", - obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL"); + obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL"); if (tgt->ltd_active) { tgt->ltd_active = 0; @@ -315,7 +318,8 @@ static int lov_disconnect(struct obd_export *exp) } /* Let's hold another reference so lov_del_obd doesn't spin through - putref every time */ + * putref every time + */ obd_getref(obd); for (i = 0; i < lov->desc.ld_tgt_count; i++) { @@ -358,7 +362,7 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, * LU-642, initially inactive OSC could miss the obd_connect, * we make up for it here. */ - if (ev == OBD_NOTIFY_ACTIVATE && tgt->ltd_exp == NULL && + if (ev == OBD_NOTIFY_ACTIVATE && !tgt->ltd_exp && obd_uuid_equals(uuid, &tgt->ltd_uuid)) { struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"}; @@ -399,10 +403,9 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, CDEBUG(D_INFO, "OSC %s already %sactive!\n", uuid->uuid, active ? "" : "in"); goto out; - } else { - CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n", - obd_uuid2str(uuid), active ? "" : "in"); } + CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n", + obd_uuid2str(uuid), active ? "" : "in"); lov->lov_tgts[index]->ltd_active = active; if (active) { @@ -481,7 +484,8 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched, continue; /* don't send sync event if target not - * connected/activated */ + * connected/activated + */ if (is_sync && !lov->lov_tgts[i]->ltd_active) continue; @@ -521,12 +525,12 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME, &obd->obd_uuid); - if (tgt_obd == NULL) + if (!tgt_obd) return -EINVAL; mutex_lock(&lov->lov_lock); - if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) { + if ((index < lov->lov_tgt_size) && lov->lov_tgts[index]) { tgt = lov->lov_tgts[index]; CERROR("UUID %s already assigned at LOV target index %d\n", obd_uuid2str(&tgt->ltd_uuid), index); @@ -543,7 +547,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, while (newsize < index + 1) newsize <<= 1; newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (newtgts == NULL) { + if (!newtgts) { mutex_unlock(&lov->lov_lock); return -ENOMEM; } @@ -590,14 +594,15 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, mutex_unlock(&lov->lov_lock); CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n", - index, tgt->ltd_gen, lov->desc.ld_tgt_count); + index, tgt->ltd_gen, lov->desc.ld_tgt_count); rc = obd_notify(obd, tgt_obd, OBD_NOTIFY_CREATE, &index); if (lov->lov_connects == 0) { /* lov_connect hasn't been called yet. We'll do the - lov_connect_obd on this target when that fn first runs, - because we don't know the connect flags yet. */ + * lov_connect_obd on this target when that fn first runs, + * because we don't know the connect flags yet. + */ return 0; } @@ -613,11 +618,11 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, goto out; } - if (lov->lov_cache != NULL) { + if (lov->lov_cache) { rc = obd_set_info_async(NULL, tgt->ltd_exp, - sizeof(KEY_CACHE_SET), KEY_CACHE_SET, - sizeof(struct cl_client_cache), lov->lov_cache, - NULL); + sizeof(KEY_CACHE_SET), KEY_CACHE_SET, + sizeof(struct cl_client_cache), + lov->lov_cache, NULL); if (rc < 0) goto out; } @@ -702,8 +707,9 @@ static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) kfree(tgt); /* Manual cleanup - no cleanup logs to clean up the osc's. We must - do it ourselves. And we can't do it from lov_cleanup, - because we just lost our only reference to it. */ + * do it ourselves. And we can't do it from lov_cleanup, + * because we just lost our only reference to it. + */ if (osc_obd) class_manual_cleanup(osc_obd); } @@ -773,9 +779,9 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg) if (desc->ld_magic != LOV_DESC_MAGIC) { if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) { - CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n", - obd->obd_name, desc); - lustre_swab_lov_desc(desc); + CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n", + obd->obd_name, desc); + lustre_swab_lov_desc(desc); } else { CERROR("%s: Bad lov desc magic: %#x\n", obd->obd_name, desc->ld_magic); @@ -859,7 +865,8 @@ static int lov_cleanup(struct obd_device *obd) /* free pool structs */ CDEBUG(D_INFO, "delete pool %p\n", pool); /* In the function below, .hs_keycmp resolves to - * pool_hashkey_keycmp() */ + * pool_hashkey_keycmp() + */ /* coverity[overrun-buffer-val] */ lov_pool_del(obd, pool->pool_name); } @@ -879,8 +886,9 @@ static int lov_cleanup(struct obd_device *obd) if (lov->lov_tgts[i]->ltd_active || atomic_read(&lov->lov_refcount)) /* We should never get here - these - should have been removed in the - disconnect. */ + * should have been removed in the + * disconnect. + */ CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n", i, lov->lov_death_row, atomic_read(&lov->lov_refcount)); @@ -981,7 +989,7 @@ static int lov_recreate(struct obd_export *exp, struct obdo *src_oa, ost_idx = src_oa->o_nlink; lsm = *ea; - if (lsm == NULL) { + if (!lsm) { rc = -EINVAL; goto out; } @@ -1025,8 +1033,8 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, struct lov_obd *lov; int rc = 0; - LASSERT(ea != NULL); - if (exp == NULL) + LASSERT(ea); + if (!exp) return -EINVAL; if ((src_oa->o_valid & OBD_MD_FLFLAGS) && @@ -1043,7 +1051,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, /* Recreate a specific object id at the given OST index */ if ((src_oa->o_valid & OBD_MD_FLFLAGS) && (src_oa->o_flags & OBD_FL_RECREATE_OBJS)) { - rc = lov_recreate(exp, src_oa, ea, oti); + rc = lov_recreate(exp, src_oa, ea, oti); } obd_putref(exp->exp_obd); @@ -1052,7 +1060,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, #define ASSERT_LSM_MAGIC(lsmp) \ do { \ - LASSERT((lsmp) != NULL); \ + LASSERT((lsmp)); \ LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \ (lsmp)->lsm_magic == LOV_MAGIC_V3), \ "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \ @@ -1065,7 +1073,6 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, struct lov_request_set *set; struct obd_info oinfo; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0, err = 0; @@ -1085,9 +1092,7 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, if (rc) goto out; - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (oa->o_valid & OBD_MD_FLCOOKIE) oti->oti_logcookies = set->set_cookies + req->rq_stripe; @@ -1105,10 +1110,9 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, } } - if (rc == 0) { - LASSERT(lsm_op_find(lsm->lsm_magic) != NULL); + if (rc == 0) rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp); - } + err = lov_fini_destroy_set(set); out: obd_putref(exp->exp_obd); @@ -1129,11 +1133,10 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset, } static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct ptlrpc_request_set *rqset) + struct ptlrpc_request_set *rqset) { struct lov_request_set *lovset; struct lov_obd *lov; - struct list_head *pos; struct lov_request *req; int rc = 0, err; @@ -1153,9 +1156,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count, oinfo->oi_md->lsm_stripe_size); - list_for_each(pos, &lovset->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &lovset->set_list, rq_link) { CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe, POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx); @@ -1174,7 +1175,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, if (!list_empty(&rqset->set_requests)) { LASSERT(rc == 0); - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_getattr_interpret; rqset->set_arg = (void *)lovset; return rc; @@ -1199,14 +1200,14 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset, } /* If @oti is given, the request goes from MDS and responses from OSTs are not - needed. Otherwise, a client is waiting for responses. */ + * needed. Otherwise, a client is waiting for responses. + */ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, struct ptlrpc_request_set *rqset) { struct lov_request_set *set; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0; @@ -1230,9 +1231,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, oinfo->oi_md->lsm_stripe_count, oinfo->oi_md->lsm_stripe_size); - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) oti->oti_logcookies = set->set_cookies + req->rq_stripe; @@ -1262,7 +1261,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, return rc ? rc : err; } - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_setattr_interpret; rqset->set_arg = (void *)set; @@ -1272,7 +1271,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, /* find any ldlm lock of the inode in lov * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int lov_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t it, void *data) @@ -1326,20 +1326,17 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_device *obd = class_exp2obd(exp); struct lov_request_set *set; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0; - LASSERT(oinfo != NULL); - LASSERT(oinfo->oi_osfs != NULL); + LASSERT(oinfo->oi_osfs); lov = &obd->u.lov; rc = lov_prep_statfs_set(obd, oinfo, &set); if (rc) return rc; - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); + list_for_each_entry(req, &set->set_list, rq_link) { rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp, &req->rq_oi, max_age, rqset); if (rc) @@ -1355,7 +1352,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, return rc ? rc : err; } - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_statfs_interpret; rqset->set_arg = (void *)set; return 0; @@ -1369,9 +1366,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; /* for obdclass we forbid using obd_statfs_rqset, but prefer using async - * statfs requests */ + * statfs requests + */ set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; oinfo.oi_osfs = osfs; @@ -1385,7 +1383,7 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, } static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lov_obd *lov = &obddev->u.lov; @@ -1416,11 +1414,13 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) return -EFAULT; - flags = uarg ? *(__u32 *)uarg : 0; + memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32)); + flags = flags & LL_STATFS_NODELAY ? OBD_STATFS_NODELAY : 0; + /* got statfs data */ rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf, cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), @@ -1428,8 +1428,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (rc) return rc; if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int) data->ioc_plen1, - (int) sizeof(stat_buf)))) + min((int)data->ioc_plen1, + (int)sizeof(stat_buf)))) return -EFAULT; break; } @@ -1501,7 +1501,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, &qctl->obd_uuid)) continue; - if (tgt->ltd_exp == NULL) + if (!tgt->ltd_exp) return -EINVAL; break; @@ -1543,14 +1543,15 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, continue; /* ll_umount_begin() sets force flag but for lov, not - * osc. Let's pass it through */ + * osc. Let's pass it through + */ osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp); osc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp, len, karg, uarg); - if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { + if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) return err; - } else if (err) { + if (err) { if (lov->lov_tgts[i]->ltd_active) { CDEBUG(err == -ENOTTY ? D_IOCTL : D_WARNING, @@ -1620,7 +1621,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap, return -EINVAL; /* If we have finished mapping on previous device, shift logical - * offset to start of next device */ + * offset to start of next device + */ if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end, &lun_start, &lun_end)) != 0 && local_end < lun_end) { @@ -1628,7 +1630,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap, *start_stripe = stripe_no; } else { /* This is a special value to indicate that caller should - * calculate offset in next stripe. */ + * calculate offset in next stripe. + */ fm_end_offset = 0; *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count; } @@ -1739,7 +1742,7 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count); fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS); - if (fm_local == NULL) { + if (!fm_local) { rc = -ENOMEM; goto out; } @@ -1759,7 +1762,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, fm_end = fm_key->oa.o_size; last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end, - actual_start_stripe, &stripe_count); + actual_start_stripe, + &stripe_count); fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end, &start_stripe); @@ -1796,7 +1800,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, /* If this is a continuation FIEMAP call and we are on * starting stripe then lun_start needs to be set to - * fm_end_offset */ + * fm_end_offset + */ if (fm_end_offset != 0 && cur_stripe == start_stripe) lun_start = fm_end_offset; @@ -1818,7 +1823,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, len_mapped_single_call = 0; /* If the output buffer is very large and the objects have many - * extents we may need to loop on a single OST repeatedly */ + * extents we may need to loop on a single OST repeatedly + */ ost_eof = 0; ost_done = 0; do { @@ -1874,7 +1880,8 @@ inactive_tgt: if (ext_count == 0) { ost_done = 1; /* If last stripe has hole at the end, - * then we need to return */ + * then we need to return + */ if (cur_stripe_wrap == last_stripe) { fiemap->fm_mapped_extents = 0; goto finish; @@ -1896,7 +1903,8 @@ inactive_tgt: ost_done = 1; /* Clear the EXTENT_LAST flag which can be present on - * last extent */ + * last extent + */ if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST) lcl_fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST; @@ -1925,7 +1933,8 @@ inactive_tgt: finish: /* Indicate that we are returning device offsets unless file just has - * single stripe */ + * single stripe + */ if (lsm->lsm_stripe_count > 1) fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER; @@ -1933,7 +1942,8 @@ finish: goto skip_last_device_calc; /* Check if we have reached the last stripe and whether mapping for that - * stripe is done. */ + * stripe is done. + */ if (cur_stripe_wrap == last_stripe) { if (ost_done || ost_eof) fiemap->fm_extents[current_extent - 1].fe_flags |= @@ -1978,10 +1988,12 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp, /* XXX This is another one of those bits that will need to * change if we ever actually support nested LOVs. It uses - * the lock's export to find out which stripe it is. */ + * the lock's export to find out which stripe it is. + */ /* XXX - it's assumed all the locks for deleted OSTs have * been cancelled. Also, the export for deleted OSTs will - * be NULL and won't match the lock's export. */ + * be NULL and won't match the lock's export. + */ for (i = 0; i < lsm->lsm_stripe_count; i++) { loi = lsm->lsm_oinfo[i]; if (lov_oinfo_is_dummy(loi)) @@ -2070,7 +2082,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, unsigned next_id = 0, mds_con = 0; incr = check_uuid = do_inactive = no_set = 0; - if (set == NULL) { + if (!set) { no_set = 1; set = ptlrpc_prep_set(); if (!set) @@ -2093,7 +2105,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, } else if (KEY_IS(KEY_MDS_CONN)) { mds_con = 1; } else if (KEY_IS(KEY_CACHE_SET)) { - LASSERT(lov->lov_cache == NULL); + LASSERT(!lov->lov_cache); lov->lov_cache = val; do_inactive = 1; } @@ -2119,12 +2131,12 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, /* Only want a specific OSC */ if (mgi->uuid && !obd_uuid_equals(mgi->uuid, - &tgt->ltd_uuid)) + &tgt->ltd_uuid)) continue; err = obd_set_info_async(env, tgt->ltd_exp, - keylen, key, sizeof(int), - &mgi->group, set); + keylen, key, sizeof(int), + &mgi->group, set); } else if (next_id) { err = obd_set_info_async(env, tgt->ltd_exp, keylen, key, vallen, @@ -2136,7 +2148,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, continue; err = obd_set_info_async(env, tgt->ltd_exp, - keylen, key, vallen, val, set); + keylen, key, vallen, val, set); } if (!rc) @@ -2187,7 +2199,7 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp, oqctl->qc_cmd != Q_INITQUOTA && oqctl->qc_cmd != LUSTRE_Q_SETQUOTA && oqctl->qc_cmd != Q_FINVALIDATE) { - CERROR("bad quota opc %x for lov obd", oqctl->qc_cmd); + CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd); return -EFAULT; } @@ -2317,7 +2329,8 @@ static int __init lov_init(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches); rc = lu_kmem_init(lov_caches); @@ -2325,9 +2338,9 @@ static int __init lov_init(void) return rc; lov_oinfo_slab = kmem_cache_create("lov_oinfo", - sizeof(struct lov_oinfo), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (lov_oinfo_slab == NULL) { + sizeof(struct lov_oinfo), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!lov_oinfo_slab) { lu_kmem_fini(lov_caches); return -ENOMEM; } @@ -2353,7 +2366,7 @@ static void /*__exit*/ lov_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver"); +MODULE_DESCRIPTION("Lustre Logical Object Volume"); MODULE_LICENSE("GPL"); MODULE_VERSION(LUSTRE_VERSION_STRING); diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c index 3b79ebc8e..1f8ed95a6 100644 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ b/drivers/staging/lustre/lustre/lov/lov_object.c @@ -59,7 +59,7 @@ struct lov_layout_operations { const struct cl_object_conf *conf, union lov_layout_state *state); int (*llo_delete)(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state); + union lov_layout_state *state); void (*llo_fini)(const struct lu_env *env, struct lov_object *lov, union lov_layout_state *state); void (*llo_install)(const struct lu_env *env, struct lov_object *lov, @@ -67,7 +67,7 @@ struct lov_layout_operations { int (*llo_print)(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o); int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, struct page *vmpage); int (*llo_lock_init)(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); @@ -135,7 +135,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, * Do not leave the object in cache to avoid accessing * freed memory. This is because osc_object is referring to * lov_oinfo of lsm_stripe_data which will be freed due to - * this failure. */ + * this failure. + */ cl_object_kill(env, stripe); cl_object_put(env, stripe); return -EIO; @@ -154,7 +155,7 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, /* reuse ->coh_attr_guard to protect coh_parent change */ spin_lock(&subhdr->coh_attr_guard); parent = subhdr->coh_parent; - if (parent == NULL) { + if (!parent) { subhdr->coh_parent = hdr; spin_unlock(&subhdr->coh_attr_guard); subhdr->coh_nesting = hdr->coh_nesting + 1; @@ -170,11 +171,12 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, spin_unlock(&subhdr->coh_attr_guard); old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type); - LASSERT(old_obj != NULL); + LASSERT(old_obj); old_lov = cl2lov(lu2cl(old_obj)); if (old_lov->lo_layout_invalid) { /* the object's layout has already changed but isn't - * refreshed */ + * refreshed + */ lu_object_unhash(env, &stripe->co_lu); result = -EAGAIN; } else { @@ -212,14 +214,14 @@ static int lov_init_raid0(const struct lu_env *env, LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic); } - LASSERT(lov->lo_lsm == NULL); + LASSERT(!lov->lo_lsm); lov->lo_lsm = lsm_addref(lsm); r0->lo_nr = lsm->lsm_stripe_count; LASSERT(r0->lo_nr <= lov_targets_nr(dev)); r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]), GFP_NOFS); - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { result = 0; subconf->coc_inode = conf->coc_inode; spin_lock_init(&r0->lo_sub_lock); @@ -241,9 +243,10 @@ static int lov_init_raid0(const struct lu_env *env, subdev = lovsub2cl_dev(dev->ld_target[ost_idx]); subconf->u.coc_oinfo = oinfo; - LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx); + LASSERTF(subdev, "not init ost %d\n", ost_idx); /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() */ + * lu_obj_hop_keycmp() + */ /* coverity[overrun-buffer-val] */ stripe = lov_sub_find(env, subdev, ofid, subconf); if (!IS_ERR(stripe)) { @@ -263,15 +266,15 @@ out: } static int lov_init_released(const struct lu_env *env, - struct lov_device *dev, struct lov_object *lov, - const struct cl_object_conf *conf, - union lov_layout_state *state) + struct lov_device *dev, struct lov_object *lov, + const struct cl_object_conf *conf, + union lov_layout_state *state) { struct lov_stripe_md *lsm = conf->u.coc_md->lsm; - LASSERT(lsm != NULL); + LASSERT(lsm); LASSERT(lsm_is_released(lsm)); - LASSERT(lov->lo_lsm == NULL); + LASSERT(!lov->lo_lsm); lov->lo_lsm = lsm_addref(lsm); return 0; @@ -310,7 +313,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, cl_object_put(env, sub); /* ... wait until it is actually destroyed---sub-object clears its - * ->lo_sub[] slot in lovsub_object_fini() */ + * ->lo_sub[] slot in lovsub_object_fini() + */ if (r0->lo_sub[idx] == los) { waiter = &lov_env_info(env)->lti_waiter; init_waitqueue_entry(waiter, current); @@ -318,7 +322,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, set_current_state(TASK_UNINTERRUPTIBLE); while (1) { /* this wait-queue is signaled at the end of - * lu_object_free(). */ + * lu_object_free(). + */ set_current_state(TASK_UNINTERRUPTIBLE); spin_lock(&r0->lo_sub_lock); if (r0->lo_sub[idx] == los) { @@ -332,7 +337,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, } remove_wait_queue(&bkt->lsb_marche_funebre, waiter); } - LASSERT(r0->lo_sub[idx] == NULL); + LASSERT(!r0->lo_sub[idx]); } static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov, @@ -345,11 +350,11 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov, dump_lsm(D_INODE, lsm); lov_layout_wait(env, lov); - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { for (i = 0; i < r0->lo_nr; ++i) { struct lovsub_object *los = r0->lo_sub[i]; - if (los != NULL) { + if (los) { cl_locks_prune(env, &los->lso_cl, 1); /* * If top-level object is to be evicted from @@ -374,7 +379,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov, { struct lov_layout_raid0 *r0 = &state->raid0; - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { kvfree(r0->lo_sub); r0->lo_sub = NULL; } @@ -384,7 +389,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov, } static void lov_fini_released(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) + union lov_layout_state *state) { dump_lsm(D_INODE, lov->lo_lsm); lov_free_memmd(&lov->lo_lsm); @@ -406,13 +411,13 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie, int i; (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n", - r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); + r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm, + lsm->lsm_magic, atomic_read(&lsm->lsm_refc), + lsm->lsm_stripe_count, lsm->lsm_layout_gen); for (i = 0; i < r0->lo_nr; ++i) { struct lu_object *sub; - if (r0->lo_sub[i] != NULL) { + if (r0->lo_sub[i]) { sub = lovsub2lu(r0->lo_sub[i]); lu_object_print(env, cookie, p, sub); } else { @@ -423,16 +428,16 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie, } static int lov_print_released(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) + lu_printer_t p, const struct lu_object *o) { struct lov_object *lov = lu2lov(o); struct lov_stripe_md *lsm = lov->lo_lsm; (*p)(env, cookie, - "released: %s, lsm{%p 0x%08X %d %u %u}:\n", - lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); + "released: %s, lsm{%p 0x%08X %d %u %u}:\n", + lov->lo_layout_invalid ? "invalid" : "valid", lsm, + lsm->lsm_magic, atomic_read(&lsm->lsm_refc), + lsm->lsm_stripe_count, lsm->lsm_layout_gen); return 0; } @@ -465,7 +470,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, * context, and this function is called in ccc_lock_state(), it will * hit this assertion. * Anyway, it's still okay to call attr_get w/o type guard as layout - * can't go if locks exist. */ + * can't go if locks exist. + */ /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */ if (!r0->lo_attr_valid) { @@ -475,7 +481,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, memset(lvb, 0, sizeof(*lvb)); /* XXX: timestamps can be negative by sanity:test_39m, - * how can it be? */ + * how can it be? + */ lvb->lvb_atime = LLONG_MIN; lvb->lvb_ctime = LLONG_MIN; lvb->lvb_mtime = LLONG_MIN; @@ -569,7 +576,7 @@ static const struct lov_layout_operations lov_dispatch[] = { */ static enum lov_layout_type lov_type(struct lov_stripe_md *lsm) { - if (lsm == NULL) + if (!lsm) return LLT_EMPTY; if (lsm_is_released(lsm)) return LLT_RELEASED; @@ -624,7 +631,7 @@ static void lov_conf_lock(struct lov_object *lov) { LASSERT(lov->lo_owner != current); down_write(&lov->lo_type_guard); - LASSERT(lov->lo_owner == NULL); + LASSERT(!lov->lo_owner); lov->lo_owner = current; } @@ -639,9 +646,9 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov) struct l_wait_info lwi = { 0 }; while (atomic_read(&lov->lo_active_ios) > 0) { - CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n", - PFID(lu_object_fid(lov2lu(lov))), - atomic_read(&lov->lo_active_ios)); + CDEBUG(D_INODE, "file:" DFID " wait for active IO, now: %d.\n", + PFID(lu_object_fid(lov2lu(lov))), + atomic_read(&lov->lo_active_ios)); l_wait_event(lov->lo_waitq, atomic_read(&lov->lo_active_ios) == 0, &lwi); @@ -666,7 +673,7 @@ static int lov_layout_change(const struct lu_env *unused, LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch)); - if (conf->u.coc_md != NULL) + if (conf->u.coc_md) llt = lov_type(conf->u.coc_md->lsm); LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch)); @@ -689,7 +696,7 @@ static int lov_layout_change(const struct lu_env *unused, old_ops->llo_fini(env, lov, &lov->u); LASSERT(atomic_read(&lov->lo_active_ios) == 0); - LASSERT(hdr->coh_tree.rnode == NULL); + LASSERT(!hdr->coh_tree.rnode); LASSERT(hdr->coh_pages == 0); lov->lo_type = LLT_EMPTY; @@ -767,10 +774,10 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, LASSERT(conf->coc_opc == OBJECT_CONF_SET); - if (conf->u.coc_md != NULL) + if (conf->u.coc_md) lsm = conf->u.coc_md->lsm; - if ((lsm == NULL && lov->lo_lsm == NULL) || - ((lsm != NULL && lov->lo_lsm != NULL) && + if ((!lsm && !lov->lo_lsm) || + ((lsm && lov->lo_lsm) && (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) && (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) { /* same version of layout */ @@ -818,7 +825,7 @@ static int lov_object_print(const struct lu_env *env, void *cookie, } int lov_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page, vmpage); @@ -845,7 +852,8 @@ static int lov_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { /* do not take lock, as this function is called under a - * spin-lock. Layout is protected from changing by ongoing IO. */ + * spin-lock. Layout is protected from changing by ongoing IO. + */ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr); } @@ -891,8 +899,8 @@ struct lu_object *lov_object_alloc(const struct lu_env *env, struct lov_object *lov; struct lu_object *obj; - lov = kmem_cache_alloc(lov_object_kmem, GFP_NOFS | __GFP_ZERO); - if (lov != NULL) { + lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS); + if (lov) { obj = lov2lu(lov); lu_object_init(obj, NULL, dev); lov->lo_cl.co_ops = &lov_ops; @@ -913,11 +921,11 @@ static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov) struct lov_stripe_md *lsm = NULL; lov_conf_freeze(lov); - if (lov->lo_lsm != NULL) { + if (lov->lo_lsm) { lsm = lsm_addref(lov->lo_lsm); CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n", - lsm, atomic_read(&lsm->lsm_refc), - lov->lo_layout_invalid, current); + lsm, atomic_read(&lsm->lsm_refc), + lov->lo_layout_invalid, current); } lov_conf_thaw(lov); return lsm; @@ -928,12 +936,12 @@ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj) struct lu_object *luobj; struct lov_stripe_md *lsm = NULL; - if (clobj == NULL) + if (!clobj) return NULL; luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu, &lov_device_type); - if (luobj != NULL) + if (luobj) lsm = lov_lsm_addref(lu2lov(luobj)); return lsm; } @@ -941,7 +949,7 @@ EXPORT_SYMBOL(lov_lsm_get); void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm) { - if (lsm != NULL) + if (lsm) lov_free_memmd(&lsm); } EXPORT_SYMBOL(lov_lsm_put); @@ -953,7 +961,7 @@ int lov_read_and_clear_async_rc(struct cl_object *clob) luobj = lu_object_locate(&cl_object_header(clob)->coh_lu, &lov_device_type); - if (luobj != NULL) { + if (luobj) { struct lov_object *lov = lu2lov(luobj); lov_conf_freeze(lov); @@ -963,7 +971,6 @@ int lov_read_and_clear_async_rc(struct cl_object *clob) int i; lsm = lov->lo_lsm; - LASSERT(lsm != NULL); for (i = 0; i < lsm->lsm_stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_oinfo[i]; diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c index aa520aa76..ae83eb0f6 100644 --- a/drivers/staging/lustre/lustre/lov/lov_offset.c +++ b/drivers/staging/lustre/lustre/lov/lov_offset.c @@ -43,8 +43,7 @@ #include "lov_internal.h" /* compute object size given "stripeno" and the ost size */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, - int stripeno) +u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno) { unsigned long ssize = lsm->lsm_stripe_size; unsigned long stripe_size; @@ -55,7 +54,6 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, if (ost_size == 0) return 0; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth); /* lov_do_div64(a, b) returns a % b, and a = a / b */ @@ -115,7 +113,8 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, * this function returns < 0 when the offset was "before" the stripe and * was moved forward to the start of the stripe in question; 0 when it * falls in the stripe and no shifting was done; > 0 when the offset - * was outside the stripe and was pulled back to its final byte. */ + * was outside the stripe and was pulled back to its final byte. + */ int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, int stripeno, u64 *obdoff) { @@ -129,8 +128,6 @@ int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, return 0; } - LASSERT(lsm_op_find(magic) != NULL); - lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off, &swidth); @@ -183,7 +180,6 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, if (file_size == OBD_OBJECT_EOF) return OBD_OBJECT_EOF; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size, &swidth); @@ -213,7 +209,8 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, /* given an extent in an lov and a stripe, calculate the extent of the stripe * that is contained within the lov extent. this returns true if the given - * stripe does intersect with the lov extent. */ + * stripe does intersect with the lov extent. + */ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, u64 start, u64 end, u64 *obd_start, u64 *obd_end) { @@ -227,7 +224,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, /* this stripe doesn't intersect the file extent when neither * start or the end intersected the stripe and obd_start and - * obd_end got rounded up to the save value. */ + * obd_end got rounded up to the save value. + */ if (start_side != 0 && end_side != 0 && *obd_start == *obd_end) return 0; @@ -238,7 +236,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, * in the wrong direction and touch it up. * interestingly, this can't underflow since end must be > start * if we passed through the previous check. - * (should we assert for that somewhere?) */ + * (should we assert for that somewhere?) + */ if (end_side != 0) (*obd_end)--; @@ -252,7 +251,6 @@ int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off) u64 stripe_off, swidth; int magic = lsm->lsm_magic; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth); stripe_off = lov_do_div64(lov_off, swidth); diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c index 6b2d10071..3925633a9 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pack.c +++ b/drivers/staging/lustre/lustre/lov/lov_pack.c @@ -134,17 +134,18 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, if ((lmm_magic != LOV_MAGIC_V1) && (lmm_magic != LOV_MAGIC_V3)) { CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", - lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); + lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); return -EINVAL; } if (lsm) { /* If we are just sizing the EA, limit the stripe count - * to the actual number of OSTs in this filesystem. */ + * to the actual number of OSTs in this filesystem. + */ if (!lmmp) { stripe_count = lov_get_stripecnt(lov, lmm_magic, - lsm->lsm_stripe_count); + lsm->lsm_stripe_count); lsm->lsm_stripe_count = stripe_count; } else if (!lsm_is_released(lsm)) { stripe_count = lsm->lsm_stripe_count; @@ -155,7 +156,8 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, /* No need to allocate more than maximum supported stripes. * Anyway, this is pretty inaccurate since ld_tgt_count now * represents max index and we should rely on the actual number - * of OSTs instead */ + * of OSTs instead + */ stripe_count = lov_mds_md_max_stripe_count( lov->lov_ocd.ocd_max_easize, lmm_magic); @@ -183,7 +185,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, return -ENOMEM; } - CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n", + CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n", lmm_magic, lmm_size); lmmv1 = *lmmp; @@ -241,7 +243,8 @@ __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count) stripe_count = 1; /* stripe count is based on whether ldiskfs can handle - * larger EA sizes */ + * larger EA sizes + */ if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE && lov->lov_ocd.ocd_max_easize) max_stripes = lov_mds_md_max_stripe_count( @@ -257,14 +260,15 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count) { int rc; - if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) { + if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) { CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n", le32_to_cpu(*(__u32 *)lmm), lmm_bytes); CERROR("%*phN\n", lmm_bytes, lmm); return -EINVAL; } rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm, - lmm_bytes, stripe_count); + lmm_bytes, + stripe_count); return rc; } @@ -306,10 +310,9 @@ int lov_free_memmd(struct lov_stripe_md **lsmp) *lsmp = NULL; LASSERT(atomic_read(&lsm->lsm_refc) > 0); refc = atomic_dec_return(&lsm->lsm_refc); - if (refc == 0) { - LASSERT(lsm_op_find(lsm->lsm_magic) != NULL); + if (refc == 0) lsm_op_find(lsm->lsm_magic)->lsm_free(lsm); - } + return refc; } @@ -359,7 +362,6 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, if (!lmm) return lsm_size; - LASSERT(lsm_op_find(magic) != NULL); rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm); if (rc) { lov_free_memmd(lsmp); @@ -376,7 +378,7 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, * lmm_magic must be LOV_USER_MAGIC. */ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, - struct lov_user_md *lump) + struct lov_user_md __user *lump) { /* * XXX huge struct allocated on stack. @@ -399,13 +401,15 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, set_fs(KERNEL_DS); /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) */ + * lmm_stripe_count, (the header part is common to v1 and v3) + */ lum_size = sizeof(struct lov_user_md_v1); if (copy_from_user(&lum, lump, lum_size)) { rc = -EFAULT; goto out_set; - } else if ((lum.lmm_magic != LOV_USER_MAGIC) && - (lum.lmm_magic != LOV_USER_MAGIC_V3)) { + } + if ((lum.lmm_magic != LOV_USER_MAGIC) && + (lum.lmm_magic != LOV_USER_MAGIC_V3)) { rc = -EINVAL; goto out_set; } diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c index 037ae91b7..fdcaf8047 100644 --- a/drivers/staging/lustre/lustre/lov/lov_page.c +++ b/drivers/staging/lustre/lustre/lov/lov_page.c @@ -57,7 +57,7 @@ static int lov_page_invariant(const struct cl_page_slice *slice) const struct cl_page *page = slice->cpl_page; const struct cl_page *sub = lov_sub_page(slice); - return ergo(sub != NULL, + return ergo(sub, page->cp_child == sub && sub->cp_parent == page && page->cp_state == sub->cp_state); @@ -70,7 +70,7 @@ static void lov_page_fini(const struct lu_env *env, LINVRNT(lov_page_invariant(slice)); - if (sub != NULL) { + if (sub) { LASSERT(sub->cp_state == CPS_FREEING); lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent); sub->cp_parent = NULL; @@ -151,7 +151,7 @@ static const struct cl_page_operations lov_page_ops = { static void lov_empty_page_fini(const struct lu_env *env, struct cl_page_slice *slice) { - LASSERT(slice->cpl_page->cp_child == NULL); + LASSERT(!slice->cpl_page->cp_child); } int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, @@ -172,8 +172,7 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, offset = cl_offset(obj, page->cp_index); stripe = lov_stripe_number(loo->lo_lsm, offset); LASSERT(stripe < r0->lo_nr); - rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, - &suboff); + rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff); LASSERT(rc == 0); lpg->lps_invalid = 1; diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c index b43ce6cd6..9ae1d6f42 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pool.c +++ b/drivers/staging/lustre/lustre/lov/lov_pool.c @@ -64,7 +64,7 @@ void lov_pool_putref(struct pool_desc *pool) if (atomic_dec_and_test(&pool->pool_refcount)) { LASSERT(hlist_unhashed(&pool->pool_hash)); LASSERT(list_empty(&pool->pool_list)); - LASSERT(pool->pool_debugfs_entry == NULL); + LASSERT(!pool->pool_debugfs_entry); lov_ost_pool_free(&(pool->pool_rr.lqr_pool)); lov_ost_pool_free(&(pool->pool_obds)); kfree(pool); @@ -152,9 +152,8 @@ struct cfs_hash_ops pool_hash_operations = { }; -/* ifdef needed for liblustre support */ /* - * pool /proc seq_file methods + * pool debugfs seq_file methods */ /* * iterator is used to go through the target pool entries @@ -174,7 +173,7 @@ static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos) struct pool_iterator *iter = (struct pool_iterator *)s->private; int prev_idx; - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); + LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); /* test if end of file */ if (*pos >= pool_tgt_count(iter->pool)) @@ -204,7 +203,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) if ((pool_tgt_count(pool) == 0) || (*pos >= pool_tgt_count(pool))) { /* iter is not created, so stop() has no way to - * find pool to dec ref */ + * find pool to dec ref + */ lov_pool_putref(pool); return NULL; } @@ -217,7 +217,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) iter->idx = 0; /* we use seq_file private field to memorized iterator so - * we can free it at stop() */ + * we can free it at stop() + */ /* /!\ do not forget to restore it to pool before freeing it */ s->private = iter; if (*pos > 0) { @@ -226,8 +227,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) i = 0; do { - ptr = pool_proc_next(s, &iter, &i); - } while ((i < *pos) && (ptr != NULL)); + ptr = pool_proc_next(s, &iter, &i); + } while ((i < *pos) && ptr); return ptr; } return iter; @@ -239,15 +240,16 @@ static void pool_proc_stop(struct seq_file *s, void *v) /* in some cases stop() method is called 2 times, without * calling start() method (see seq_read() from fs/seq_file.c) - * we have to free only if s->private is an iterator */ + * we have to free only if s->private is an iterator + */ if ((iter) && (iter->magic == POOL_IT_MAGIC)) { /* we restore s->private so next call to pool_proc_start() - * will work */ + * will work + */ s->private = iter->pool; lov_pool_putref(iter->pool); kfree(iter); } - return; } static int pool_proc_show(struct seq_file *s, void *v) @@ -255,8 +257,8 @@ static int pool_proc_show(struct seq_file *s, void *v) struct pool_iterator *iter = (struct pool_iterator *)v; struct lov_tgt_desc *tgt; - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); - LASSERT(iter->pool != NULL); + LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); + LASSERT(iter->pool); LASSERT(iter->idx <= pool_tgt_count(iter->pool)); down_read(&pool_tgt_rw_sem(iter->pool)); @@ -305,7 +307,7 @@ int lov_ost_pool_init(struct ost_pool *op, unsigned int count) init_rwsem(&op->op_rw_sem); op->op_size = count; op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS); - if (op->op_array == NULL) { + if (!op->op_array) { op->op_size = 0; return -ENOMEM; } @@ -325,7 +327,7 @@ int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count) new_size = max(min_count, 2 * op->op_size); new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS); - if (new == NULL) + if (!new) return -ENOMEM; /* copy old array to new one */ @@ -429,8 +431,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname) INIT_HLIST_NODE(&new_pool->pool_hash); - /* we need this assert seq_file is not implemented for liblustre */ - /* get ref for /proc file */ + /* get ref for debugfs file */ lov_pool_getref(new_pool); new_pool->pool_debugfs_entry = ldebugfs_add_simple( lov->lov_pool_debugfs_entry, @@ -443,7 +444,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname) lov_pool_putref(new_pool); } CDEBUG(D_INFO, "pool %p - proc %p\n", - new_pool, new_pool->pool_debugfs_entry); + new_pool, new_pool->pool_debugfs_entry); spin_lock(&obd->obd_dev_lock); list_add_tail(&new_pool->pool_list, &lov->lov_pool_list); @@ -487,7 +488,7 @@ int lov_pool_del(struct obd_device *obd, char *poolname) /* lookup and kill hash reference */ pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) { @@ -518,7 +519,7 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname) lov = &(obd->u.lov); pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; obd_str2uuid(&ost_uuid, ostname); @@ -564,7 +565,7 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname) lov = &(obd->u.lov); pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; obd_str2uuid(&ost_uuid, ostname); @@ -632,12 +633,12 @@ struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname) pool = NULL; if (poolname[0] != '\0') { pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n", poolname); - if ((pool != NULL) && (pool_tgt_count(pool) == 0)) { + if (pool && (pool_tgt_count(pool) == 0)) { CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n", - poolname); + poolname); /* pool is ignored, so we remove ref on it */ lov_pool_putref(pool); pool = NULL; diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c index 42deda71f..7178a02d6 100644 --- a/drivers/staging/lustre/lustre/lov/lov_request.c +++ b/drivers/staging/lustre/lustre/lov/lov_request.c @@ -156,7 +156,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) tgt = lov->lov_tgts[ost_idx]; - if (unlikely(tgt == NULL)) { + if (unlikely(!tgt)) { rc = 0; goto out; } @@ -178,7 +178,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi); - if (tgt != NULL && tgt->ltd_active) + if (tgt->ltd_active) return 1; return 0; @@ -190,28 +190,23 @@ out: static int common_attr_done(struct lov_request_set *set) { - struct list_head *pos; struct lov_request *req; struct obdo *tmp_oa; int rc = 0, attrset = 0; - LASSERT(set->set_oi != NULL); - - if (set->set_oi->oi_oa == NULL) + if (!set->set_oi->oi_oa) return 0; if (!atomic_read(&set->set_success)) return -EIO; - tmp_oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (tmp_oa == NULL) { + tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!tmp_oa) { rc = -ENOMEM; goto out; } - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (!req->rq_complete || req->rq_rc) continue; if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */ @@ -227,7 +222,8 @@ static int common_attr_done(struct lov_request_set *set) if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) && (set->set_oi->oi_md->lsm_stripe_count != attrset)) { /* When we take attributes of some epoch, we require all the - * ost to be active. */ + * ost to be active. + */ CERROR("Not all the stripes had valid attrs\n"); rc = -EIO; goto out; @@ -246,7 +242,7 @@ int lov_fini_getattr_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) @@ -258,7 +254,8 @@ int lov_fini_getattr_set(struct lov_request_set *set) } /* The callback for osc_getattr_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_getattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -310,9 +307,8 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -337,7 +333,7 @@ out_set: int lov_fini_destroy_set(struct lov_request_set *set) { - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { @@ -368,7 +364,7 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo, set->set_oi->oi_md = lsm; set->set_oi->oi_oa = src_oa; set->set_oti = oti; - if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < lsm->lsm_stripe_count; i++) { @@ -393,9 +389,8 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -419,7 +414,7 @@ int lov_fini_setattr_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { @@ -460,7 +455,8 @@ int lov_update_setattr_set(struct lov_request_set *set, } /* The callback for osc_setattr_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_setattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -486,7 +482,7 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, set->set_exp = exp; set->set_oti = oti; set->set_oi = oinfo; - if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) { @@ -509,9 +505,8 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -581,7 +576,7 @@ int lov_fini_statfs_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; if (atomic_read(&set->set_completes)) { @@ -648,7 +643,8 @@ static void lov_update_statfs(struct obd_statfs *osfs, } /* The callback for osc_statfs_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_statfs_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -668,7 +664,8 @@ static int cb_statfs_update(void *cookie, int rc) lov_sfs = oinfo->oi_osfs; success = atomic_read(&set->set_success); /* XXX: the same is done in lov_update_common_set, however - lovset->set_exp is not initialized. */ + * lovset->set_exp is not initialized. + */ lov_update_set(set, lovreq, rc); if (rc) goto out; @@ -718,7 +715,7 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, for (i = 0; i < lov->desc.ld_tgt_count; i++) { struct lov_request *req; - if (lov->lov_tgts[i] == NULL || + if (!lov->lov_tgts[i] || (!lov_check_and_wait_active(lov, i) && (oinfo->oi_flags & OBD_STATFS_NODELAY))) { CDEBUG(D_HA, "lov idx %d inactive\n", i); @@ -726,7 +723,8 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, } /* skip targets that have been explicitly disabled by the - * administrator */ + * administrator + */ if (!lov->lov_tgts[i]->ltd_exp) { CDEBUG(D_HA, "lov idx %d administratively disabled\n", i); continue; diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c index f1795c3e2..c335c020f 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c @@ -101,7 +101,6 @@ static int lovsub_device_init(const struct lu_env *env, struct lu_device *d, next->ld_site = d->ld_site; ldt = next->ld_type; - LASSERT(ldt != NULL); rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL); if (rc) { next->ld_site = NULL; @@ -148,8 +147,8 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev, struct lovsub_req *lsr; int result; - lsr = kmem_cache_alloc(lovsub_req_kmem, GFP_NOFS | __GFP_ZERO); - if (lsr != NULL) { + lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS); + if (lsr) { cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops); result = 0; } else @@ -175,7 +174,7 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env, struct lovsub_device *lsd; lsd = kzalloc(sizeof(*lsd), GFP_NOFS); - if (lsd != NULL) { + if (lsd) { int result; result = cl_device_init(&lsd->acid_cl, t); diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c index 1a3e30a14..3bb0c9068 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c @@ -148,7 +148,8 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in, { pgoff_t size; /* stripe size in pages */ pgoff_t skip; /* how many pages in every stripe are occupied by - * "other" stripes */ + * "other" stripes + */ pgoff_t start; pgoff_t end; @@ -284,7 +285,8 @@ static int lovsub_lock_delete_one(const struct lu_env *env, switch (parent->cll_state) { case CLS_ENQUEUED: /* See LU-1355 for the case that a glimpse lock is - * interrupted by signal */ + * interrupted by signal + */ LASSERT(parent->cll_flags & CLF_CANCELLED); break; case CLS_QUEUING: @@ -402,7 +404,7 @@ static void lovsub_lock_delete(const struct lu_env *env, restart = 0; list_for_each_entry_safe(scan, temp, - &sub->lss_parents, lll_list) { + &sub->lss_parents, lll_list) { lov = scan->lll_super; subdata = &lov->lls_sub[scan->lll_idx]; lovsub_parent_lock(env, lov); @@ -429,7 +431,7 @@ static int lovsub_lock_print(const struct lu_env *env, void *cookie, list_for_each_entry(scan, &sub->lss_parents, lll_list) { lov = scan->lll_super; (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov); - if (lov != NULL) + if (lov) cl_lock_descr_print(env, cookie, p, &lov->lls_cl.cls_lock->cll_descr); (*p)(env, cookie, "] "); @@ -453,8 +455,8 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, struct lovsub_lock *lsk; int result; - lsk = kmem_cache_alloc(lovsub_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lsk != NULL) { + lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS); + if (lsk) { INIT_LIST_HEAD(&lsk->lss_parents); cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); result = 0; diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c index 5ba5ee1b8..6c5430d93 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_object.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c @@ -63,7 +63,7 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, under = &dev->acid_next->cd_lu_dev; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below != NULL) { + if (below) { lu_object_add(obj, below); cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page)); result = 0; @@ -143,8 +143,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lovsub_object *los; struct lu_object *obj; - los = kmem_cache_alloc(lovsub_object_kmem, GFP_NOFS | __GFP_ZERO); - if (los != NULL) { + los = kmem_cache_zalloc(lovsub_object_kmem, GFP_NOFS); + if (los) { struct cl_object_header *hdr; obj = lovsub2lu(los); diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c index 3f00ce967..2d945532b 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_page.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c @@ -60,7 +60,7 @@ static const struct cl_page_operations lovsub_page_ops = { }; int lovsub_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *unused) + struct cl_page *page, struct page *unused) { struct lovsub_page *lsb = cl_object_page_slice(obj, page); diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c index 337241d84..0dcb6b6a7 100644 --- a/drivers/staging/lustre/lustre/lov/lproc_lov.c +++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c @@ -46,22 +46,22 @@ static int lov_stripesize_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%llu\n", desc->ld_default_stripe_size); return 0; } static ssize_t lov_stripesize_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; __u64 val; int rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_u64_helper(buffer, count, &val); if (rc) @@ -79,22 +79,22 @@ static int lov_stripeoffset_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%llu\n", desc->ld_default_stripe_offset); return 0; } static ssize_t lov_stripeoffset_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; __u64 val; int rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_u64_helper(buffer, count, &val); if (rc) @@ -111,21 +111,21 @@ static int lov_stripetype_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%u\n", desc->ld_pattern); return 0; } static ssize_t lov_stripetype_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; int val, rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -143,21 +143,21 @@ static int lov_stripecount_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1); return 0; } static ssize_t lov_stripecount_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; int val, rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -199,7 +199,7 @@ static int lov_desc_uuid_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_obd *lov; - LASSERT(dev != NULL); + LASSERT(dev); lov = &dev->u.lov; seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid); return 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h index 3d2997a16..c5519aeb0 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h +++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h @@ -53,7 +53,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size, void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags, struct md_op_data *data, int ea_size); void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - void *ea, int ealen, void *ea2, int ea2len); + void *ea, int ealen, void *ea2, int ea2len); void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const void *data, int datalen, __u32 mode, __u32 uid, __u32 gid, cfs_cap_t capability, __u64 rdev); @@ -90,7 +90,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, struct ptlrpc_request **req, __u64 extra_lock_flags); int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, ldlm_mode_t mode, + struct list_head *cancels, enum ldlm_mode mode, __u64 bits); /* mdc/mdc_request.c */ int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid, @@ -119,8 +119,8 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request); int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - ldlm_cancel_flags_t flags, void *opaque); + ldlm_policy_data_t *policy, enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque); int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, struct lu_fid *fid, __u64 *bits); @@ -129,10 +129,10 @@ int mdc_intent_getattr_async(struct obd_export *exp, struct md_enqueue_info *minfo, struct ldlm_enqueue_info *einfo); -ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh); +enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, enum ldlm_type type, + ldlm_policy_data_t *policy, enum ldlm_mode mode, + struct lustre_handle *lockh); static inline int mdc_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, int opc, diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c index 7218532ff..b3bfdcb73 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c @@ -41,8 +41,6 @@ static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid) { - LASSERT(b != NULL); - b->suppgid = suppgid; b->uid = from_kuid(&init_user_ns, current_uid()); b->gid = from_kgid(&init_user_ns, current_gid()); @@ -83,7 +81,6 @@ void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid, { struct mdt_body *b = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(b != NULL); b->valid = valid; b->eadatasize = ea_size; b->flags = flags; @@ -323,7 +320,7 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, return; lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); - if (ea == NULL) { /* Remove LOV EA */ + if (!ea) { /* Remove LOV EA */ lum->lmm_magic = LOV_USER_MAGIC_V1; lum->lmm_stripe_size = 0; lum->lmm_stripe_count = 0; @@ -346,7 +343,6 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - LASSERT(rec != NULL); rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ? REINT_RMENTRY : REINT_UNLINK; @@ -362,7 +358,7 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) rec->ul_bias = op_data->op_bias; tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); - LASSERT(tmp != NULL); + LASSERT(tmp); LOGL0(op_data->op_name, op_data->op_namelen, tmp); } @@ -373,7 +369,6 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - LASSERT(rec != NULL); rec->lk_opcode = REINT_LINK; rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */ @@ -456,10 +451,9 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req, struct ldlm_lock *lock; data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); - LASSERT(data != NULL); lock = ldlm_handle2lock(&op_data->op_lease_handle); - if (lock != NULL) { + if (lock) { data->cd_handle = lock->l_remote_handle; ldlm_lock_put(lock); } @@ -495,7 +489,8 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw) /* We record requests in flight in cli->cl_r_in_flight here. * There is only one write rpc possible in mdc anyway. If this to change - * in the future - the code may need to be revisited. */ + * in the future - the code may need to be revisited. + */ int mdc_enter_request(struct client_obd *cli) { int rc = 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c index ef9a1e124..958a164f6 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c @@ -129,7 +129,7 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, lock = ldlm_handle2lock((struct lustre_handle *)lockh); - LASSERT(lock != NULL); + LASSERT(lock); lock_res_and_lock(lock); if (lock->l_resource->lr_lvb_inode && lock->l_resource->lr_lvb_inode != data) { @@ -151,13 +151,13 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, return 0; } -ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) +enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, enum ldlm_type type, + ldlm_policy_data_t *policy, enum ldlm_mode mode, + struct lustre_handle *lockh) { struct ldlm_res_id res_id; - ldlm_mode_t rc; + enum ldlm_mode rc; fid_build_reg_res_name(fid, &res_id); /* LU-4405: Clear bits not supported by server */ @@ -170,8 +170,8 @@ ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_res_id res_id; @@ -191,12 +191,12 @@ int mdc_null_inode(struct obd_export *exp, struct ldlm_resource *res; struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace; - LASSERTF(ns != NULL, "no namespace passed\n"); + LASSERTF(ns, "no namespace passed\n"); fid_build_reg_res_name(fid, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; lock_res(res); @@ -210,7 +210,8 @@ int mdc_null_inode(struct obd_export *exp, /* find any ldlm lock of the inode in mdc * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ int mdc_find_cbdata(struct obd_export *exp, const struct lu_fid *fid, ldlm_iterator_t it, void *data) @@ -252,7 +253,8 @@ static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) * OOM here may cause recovery failure if lmm is needed (only for the * original open if the MDS crashed just when this client also OOM'd) * but this is incredibly unlikely, and questionable whether the client - * could do MDS recovery under OOM anyways... */ + * could do MDS recovery under OOM anyways... + */ static void mdc_realloc_openmsg(struct ptlrpc_request *req, struct mdt_body *body) { @@ -317,7 +319,7 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_OPEN); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return ERR_PTR(-ENOMEM); } @@ -364,8 +366,8 @@ mdc_intent_getxattr_pack(struct obd_export *exp, LIST_HEAD(cancels); req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_GETXATTR); - if (req == NULL) + &RQF_LDLM_INTENT_GETXATTR); + if (!req) return ERR_PTR(-ENOMEM); rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); @@ -384,14 +386,12 @@ mdc_intent_getxattr_pack(struct obd_export *exp, mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, maxdata, -1, 0); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, - RCL_SERVER, maxdata); + req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, maxdata); - req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, - RCL_SERVER, maxdata); + req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, RCL_SERVER, maxdata); req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS, - RCL_SERVER, maxdata); + RCL_SERVER, maxdata); ptlrpc_request_set_replen(req); @@ -409,7 +409,7 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_UNLINK); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -437,8 +437,8 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, } static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *op_data) + struct lookup_intent *it, + struct md_op_data *op_data) { struct ptlrpc_request *req; struct obd_device *obddev = class_exp2obd(exp); @@ -453,7 +453,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_GETATTR); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -496,8 +496,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_LAYOUT); - if (req == NULL) + &RQF_LDLM_INTENT_LAYOUT); + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0); @@ -514,7 +514,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, /* pack the layout intent request */ layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT); /* LAYOUT_INTENT_ACCESS is generic, specific operation will be - * set for replication */ + * set for replication + */ layout->li_opc = LAYOUT_INTENT_ACCESS; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, @@ -530,7 +531,7 @@ mdc_enqueue_pack(struct obd_export *exp, int lvb_len) int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); @@ -561,7 +562,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, LASSERT(rc >= 0); /* Similarly, if we're going to replay this request, we don't want to - * actually get a lock, just perform the intent. */ + * actually get a lock, just perform the intent. + */ if (req->rq_transno || req->rq_replay) { lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ); lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY); @@ -573,10 +575,10 @@ static int mdc_finish_enqueue(struct obd_export *exp, rc = 0; } else { /* rc = 0 */ lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); /* If the server gave us back a different lock mode, we should - * fix up our variables. */ + * fix up our variables. + */ if (lock->l_req_mode != einfo->ei_mode) { ldlm_lock_addref(lockh, lock->l_req_mode); ldlm_lock_decref(lockh, einfo->ei_mode); @@ -586,7 +588,6 @@ static int mdc_finish_enqueue(struct obd_export *exp, } lockrep = req_capsule_server_get(pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */ intent->it_disposition = (int)lockrep->lock_policy_res1; intent->it_status = (int)lockrep->lock_policy_res2; @@ -595,7 +596,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, intent->it_data = req; /* Technically speaking rq_transno must already be zero if - * it_status is in error, so the check is a bit redundant */ + * it_status is in error, so the check is a bit redundant + */ if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay) mdc_clear_replay_flag(req, intent->it_status); @@ -605,7 +607,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, * * It's important that we do this first! Otherwise we might exit the * function without doing so, and try to replay a failed create - * (bug 3440) */ + * (bug 3440) + */ if (it->it_op & IT_OPEN && req->rq_replay && (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0)) mdc_clear_replay_flag(req, intent->it_status); @@ -618,7 +621,7 @@ static int mdc_finish_enqueue(struct obd_export *exp, struct mdt_body *body; body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { CERROR("Can't swab mdt_body\n"); return -EPROTO; } @@ -645,11 +648,12 @@ static int mdc_finish_enqueue(struct obd_export *exp, */ eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, body->eadatasize); - if (eadata == NULL) + if (!eadata) return -EPROTO; /* save lvb data and length in case this is for layout - * lock */ + * lock + */ lvb_data = eadata; lvb_len = body->eadatasize; @@ -690,31 +694,32 @@ static int mdc_finish_enqueue(struct obd_export *exp, LASSERT(client_is_remote(exp)); perm = req_capsule_server_swab_get(pill, &RMF_ACL, lustre_swab_mdt_remote_perm); - if (perm == NULL) + if (!perm) return -EPROTO; } } else if (it->it_op & IT_LAYOUT) { /* maybe the lock was granted right away and layout - * is packed into RMF_DLM_LVB of req */ + * is packed into RMF_DLM_LVB of req + */ lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER); if (lvb_len > 0) { lvb_data = req_capsule_server_sized_get(pill, &RMF_DLM_LVB, lvb_len); - if (lvb_data == NULL) + if (!lvb_data) return -EPROTO; } } /* fill in stripe data for layout lock */ lock = ldlm_handle2lock(lockh); - if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) { + if (lock && ldlm_has_layout(lock) && lvb_data) { void *lmm; LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n", - ldlm_it2str(it->it_op), lvb_len); + ldlm_it2str(it->it_op), lvb_len); lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS); - if (lmm == NULL) { + if (!lmm) { LDLM_LOCK_PUT(lock); return -ENOMEM; } @@ -722,24 +727,25 @@ static int mdc_finish_enqueue(struct obd_export *exp, /* install lvb_data */ lock_res_and_lock(lock); - if (lock->l_lvb_data == NULL) { + if (!lock->l_lvb_data) { lock->l_lvb_type = LVB_T_LAYOUT; lock->l_lvb_data = lmm; lock->l_lvb_len = lvb_len; lmm = NULL; } unlock_res_and_lock(lock); - if (lmm != NULL) + if (lmm) kvfree(lmm); } - if (lock != NULL) + if (lock) LDLM_LOCK_PUT(lock); return rc; } /* We always reserve enough space in the reply packet for a stripe MD, because - * we don't know in advance the file type. */ + * we don't know in advance the file type. + */ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, struct lookup_intent *it, struct md_op_data *op_data, struct lustre_handle *lockh, void *lmm, int lmmsize, @@ -782,14 +788,15 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, policy = &getxattr_policy; } - LASSERT(reqp == NULL); + LASSERT(!reqp); generation = obddev->u.cli.cl_import->imp_generation; resend: flags = saved_flags; if (!it) { /* The only way right now is FLOCK, in this case we hide flock - policy as lmm, but lmmsize is 0 */ + * policy as lmm, but lmmsize is 0 + */ LASSERT(lmm && lmmsize == 0); LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n", einfo->ei_type); @@ -823,9 +830,10 @@ resend: if (IS_ERR(req)) return PTR_ERR(req); - if (req != NULL && it && it->it_op & IT_CREAT) + if (req && it && it->it_op & IT_CREAT) /* ask ptlrpc not to resend on EINPROGRESS since we have our own - * retry logic */ + * retry logic + */ req->rq_no_retry_einprogress = 1; if (resends) { @@ -836,7 +844,8 @@ resend: /* It is important to obtain rpc_lock first (if applicable), so that * threads that are serialised with rpc_lock are not polluting our - * rpcs in flight counter. We do not do flock request limiting, though*/ + * rpcs in flight counter. We do not do flock request limiting, though + */ if (it) { mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); rc = mdc_enter_request(&obddev->u.cli); @@ -852,13 +861,14 @@ resend: 0, lvb_type, lockh, 0); if (!it) { /* For flock requests we immediately return without further - delay and let caller deal with the rest, since rest of - this function metadata processing makes no sense for flock - requests anyway. But in case of problem during comms with - Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we - can not rely on caller and this mainly for F_UNLCKs - (explicits or automatically generated by Kernel to clean - current FLocks upon exit) that can't be trashed */ + * delay and let caller deal with the rest, since rest of + * this function metadata processing makes no sense for flock + * requests anyway. But in case of problem during comms with + * Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we + * can not rely on caller and this mainly for F_UNLCKs + * (explicits or automatically generated by Kernel to clean + * current FLocks upon exit) that can't be trashed + */ if ((rc == -EINTR) || (rc == -ETIMEDOUT)) goto resend; return rc; @@ -878,13 +888,13 @@ resend: } lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); lockrep->lock_policy_res2 = ptlrpc_status_ntoh(lockrep->lock_policy_res2); /* Retry the create infinitely when we get -EINPROGRESS from - * server. This is required by the new quota design. */ + * server. This is required by the new quota design. + */ if (it->it_op & IT_CREAT && (int)lockrep->lock_policy_res2 == -EINPROGRESS) { mdc_clear_replay_flag(req, rc); @@ -930,13 +940,13 @@ static int mdc_finish_intent_lock(struct obd_export *exp, struct ldlm_lock *lock; int rc; - LASSERT(request != NULL); LASSERT(request != LP_POISON); LASSERT(request->rq_repmsg != LP_POISON); if (!it_disposition(it, DISP_IT_EXECD)) { /* The server failed before it even started executing the - * intent, i.e. because it couldn't unpack the request. */ + * intent, i.e. because it couldn't unpack the request. + */ LASSERT(it->d.lustre.it_status != 0); return it->d.lustre.it_status; } @@ -945,10 +955,11 @@ static int mdc_finish_intent_lock(struct obd_export *exp, return rc; mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); - LASSERT(mdt_body != NULL); /* mdc_enqueue checked */ + LASSERT(mdt_body); /* mdc_enqueue checked */ /* If we were revalidating a fid/name pair, mark the intent in - * case we fail and get called again from lookup */ + * case we fail and get called again from lookup + */ if (fid_is_sane(&op_data->op_fid2) && it->it_create_mode & M_CHECK_STALE && it->it_op != IT_GETATTR) { @@ -957,7 +968,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp, /* sever can return one of two fids: * op_fid2 - new allocated fid - if file is created. * op_fid3 - existent fid - if file only open. - * op_fid3 is saved in lmv_intent_open */ + * op_fid3 is saved in lmv_intent_open + */ if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) && (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) { CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID @@ -1001,7 +1013,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp, * one. We have to set the data here instead of in * mdc_enqueue, because we need to use the child's inode as * the l_ast_data to match, and that's not available until - * intent_finish has performed the iget().) */ + * intent_finish has performed the iget().) + */ lock = ldlm_handle2lock(lockh); if (lock) { ldlm_policy_data_t policy = lock->l_policy_data; @@ -1036,11 +1049,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's - * verify that. */ + * verify that. + */ struct ldlm_res_id res_id; struct lustre_handle lockh; ldlm_policy_data_t policy; - ldlm_mode_t mode; + enum ldlm_mode mode; if (it->d.lustre.it_lock_handle) { lockh.cookie = it->d.lustre.it_lock_handle; @@ -1059,10 +1073,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, * Unfortunately, if the bits are split across multiple * locks, there's no easy way to match all of them here, * so an extra RPC would be performed to fetch all - * of those bits at once for now. */ + * of those bits at once for now. + */ /* For new MDTs(> 2.4), UPDATE|PERM should be enough, * but for old MDTs (< 2.4), permission is covered - * by LOOKUP lock, so it needs to match all bits here.*/ + * by LOOKUP lock, so it needs to match all bits here. + */ policy.l_inodebits.bits = MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM; @@ -1076,7 +1092,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, } mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid, - LDLM_IBITS, &policy, + LDLM_IBITS, &policy, LCK_CR | LCK_CW | LCK_PR | LCK_PW, &lockh); } @@ -1147,11 +1163,13 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, (it->it_op & (IT_LOOKUP | IT_GETATTR))) { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's - * verify that. */ + * verify that. + */ it->d.lustre.it_lock_handle = 0; rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL); /* Only return failure if it was not GETATTR by cfid - (from inode_revalidate) */ + * (from inode_revalidate) + */ if (rc || op_data->op_namelen != 0) return rc; } @@ -1206,7 +1224,6 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env, } lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); lockrep->lock_policy_res2 = ptlrpc_status_ntoh(lockrep->lock_policy_res2); @@ -1235,7 +1252,8 @@ int mdc_intent_getattr_async(struct obd_export *exp, struct ldlm_res_id res_id; /*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed * for statahead currently. Consider CMD in future, such two bits - * maybe managed by different MDS, should be adjusted then. */ + * maybe managed by different MDS, should be adjusted then. + */ ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE } @@ -1244,9 +1262,9 @@ int mdc_intent_getattr_async(struct obd_export *exp, __u64 flags = LDLM_FL_HAS_INTENT; CDEBUG(D_DLMTRACE, - "name: %.*s in inode "DFID", intent: %s flags %#Lo\n", - op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), - ldlm_it2str(it->it_op), it->it_flags); + "name: %.*s in inode " DFID ", intent: %s flags %#Lo\n", + op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), + ldlm_it2str(it->it_op), it->it_flags); fid_build_reg_res_name(&op_data->op_fid1, &res_id); req = mdc_intent_getattr_pack(exp, it, op_data); diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c index ac7695a10..4ef3db147 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c @@ -65,9 +65,10 @@ static int mdc_reint(struct ptlrpc_request *request, /* Find and cancel locally locks matched by inode @bits & @mode in the resource * found by @fid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. */ + * locks added to @cancels list. + */ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, ldlm_mode_t mode, + struct list_head *cancels, enum ldlm_mode mode, __u64 bits) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; @@ -81,14 +82,15 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, * * This distinguishes from a case when ELC is not supported originally, * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. */ + * locally, without sending any RPC. + */ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) return 0; fid_build_reg_res_name(fid, &res_id); res = ldlm_resource_get(exp->exp_obd->obd_namespace, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); /* Initialize ibits lock policy. */ @@ -111,8 +113,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, int count = 0, rc; __u64 bits; - LASSERT(op_data != NULL); - bits = MDS_INODELOCK_UPDATE; if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) bits |= MDS_INODELOCK_LOOKUP; @@ -123,7 +123,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, &cancels, LCK_EX, bits); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_SETATTR); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -151,10 +151,10 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, ptlrpc_request_set_replen(req); if (mod && (op_data->op_flags & MF_EPOCH_OPEN) && req->rq_import->imp_replayable) { - LASSERT(*mod == NULL); + LASSERT(!*mod); *mod = obd_mod_alloc(); - if (*mod == NULL) { + if (!*mod) { DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data"); } else { req->rq_replay = 1; @@ -181,8 +181,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(epoch != NULL); - LASSERT(body != NULL); epoch->handle = body->handle; epoch->ioepoch = body->ioepoch; req->rq_replay_cb = mdc_replay_open; @@ -195,7 +193,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, *request = req; if (rc && req->rq_commit_cb) { /* Put an extra reference on \var mod on error case. */ - if (mod != NULL && *mod != NULL) + if (mod && *mod) obd_mod_put(*mod); req->rq_commit_cb(req); } @@ -237,7 +235,7 @@ rebuild: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_CREATE_RMT_ACL); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -262,7 +260,8 @@ rebuild: ptlrpc_request_set_replen(req); /* ask ptlrpc not to resend on EINPROGRESS since we have our own retry - * logic here */ + * logic here + */ req->rq_no_retry_einprogress = 1; if (resends) { @@ -280,7 +279,8 @@ rebuild: goto resend; } else if (rc == -EINPROGRESS) { /* Retry create infinitely until succeed or get other - * error code. */ + * error code. + */ ptlrpc_req_finished(req); resends++; @@ -308,7 +308,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request *req = *request; int count = 0, rc; - LASSERT(req == NULL); + LASSERT(!req); if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && (fid_is_sane(&op_data->op_fid1)) && @@ -324,7 +324,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, MDS_INODELOCK_FULL); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_UNLINK); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -373,7 +373,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data, MDS_INODELOCK_UPDATE); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -422,14 +422,14 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, &cancels, LCK_EX, MDS_INODELOCK_LOOKUP); if ((op_data->op_flags & MF_MDC_CANCEL_FID4) && - (fid_is_sane(&op_data->op_fid4))) + (fid_is_sane(&op_data->op_fid4))) count += mdc_resource_get_unused(exp, &op_data->op_fid4, &cancels, LCK_EX, MDS_INODELOCK_FULL); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_RENAME); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index 57e0fc1e8..b91d3ff18 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -48,6 +48,7 @@ #include "../include/lprocfs_status.h" #include "../include/lustre_param.h" #include "../include/lustre_log.h" +#include "../include/lustre_kernelcomm.h" #include "mdc_internal.h" @@ -62,7 +63,8 @@ static inline int mdc_queue_wait(struct ptlrpc_request *req) /* mdc_enter_request() ensures that this client has no more * than cl_max_rpcs_in_flight RPCs simultaneously inf light - * against an MDT. */ + * against an MDT. + */ rc = mdc_enter_request(cli); if (rc != 0) return rc; @@ -82,7 +84,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid) req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETSTATUS, LUSTRE_MDS_VERSION, MDS_GETSTATUS); - if (req == NULL) + if (!req) return -ENOMEM; mdc_pack_body(req, NULL, 0, 0, -1, 0); @@ -95,7 +97,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid) goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -135,7 +137,7 @@ static int mdc_getattr_common(struct obd_export *exp, /* sanity check for the reply */ body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; CDEBUG(D_NET, "mode: %o\n", body->mode); @@ -145,7 +147,7 @@ static int mdc_getattr_common(struct obd_export *exp, eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, body->eadatasize); - if (eadata == NULL) + if (!eadata) return -EPROTO; } @@ -155,7 +157,7 @@ static int mdc_getattr_common(struct obd_export *exp, LASSERT(client_is_remote(exp)); perm = req_capsule_server_swab_get(pill, &RMF_ACL, lustre_swab_mdt_remote_perm); - if (perm == NULL) + if (!perm) return -EPROTO; } @@ -163,7 +165,7 @@ static int mdc_getattr_common(struct obd_export *exp, } static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) + struct ptlrpc_request **request) { struct ptlrpc_request *req; int rc; @@ -175,7 +177,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, } *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); @@ -205,7 +207,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, } static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) + struct ptlrpc_request **request) { struct ptlrpc_request *req; int rc; @@ -213,7 +215,7 @@ static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR_NAME); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -260,7 +262,7 @@ static int mdc_is_subdir(struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION, MDS_IS_SUBDIR); - if (req == NULL) + if (!req) return -ENOMEM; mdc_is_subdir_pack(req, pfid, cfid, 0); @@ -289,7 +291,7 @@ static int mdc_xattr_common(struct obd_export *exp, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt); - if (req == NULL) + if (!req) return -ENOMEM; if (xattr_name) { @@ -424,7 +426,7 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md) return -EPROTO; acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize); - if (acl == NULL) + if (!acl) return 0; if (IS_ERR(acl)) { @@ -460,7 +462,6 @@ static int mdc_get_lustre_md(struct obd_export *exp, memset(md, 0, sizeof(*md)); md->body = req_capsule_server_get(pill, &RMF_MDT_BODY); - LASSERT(md->body != NULL); if (md->body->valid & OBD_MD_FLEASIZE) { int lmmsize; @@ -592,17 +593,16 @@ void mdc_replay_open(struct ptlrpc_request *req) struct lustre_handle old; struct mdt_body *body; - if (mod == NULL) { + if (!mod) { DEBUG_REQ(D_ERROR, req, "Can't properly replay without open data."); return; } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); och = mod->mod_och; - if (och != NULL) { + if (och) { struct lustre_handle *file_fh; LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC); @@ -614,7 +614,7 @@ void mdc_replay_open(struct ptlrpc_request *req) *file_fh = body->handle; } close_req = mod->mod_close_req; - if (close_req != NULL) { + if (close_req) { __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg); struct mdt_ioepoch *epoch; @@ -623,7 +623,7 @@ void mdc_replay_open(struct ptlrpc_request *req) &RMF_MDT_EPOCH); LASSERT(epoch); - if (och != NULL) + if (och) LASSERT(!memcmp(&old, &epoch->handle, sizeof(old))); DEBUG_REQ(D_HA, close_req, "updating close body with new fh"); epoch->handle = body->handle; @@ -634,7 +634,7 @@ void mdc_commit_open(struct ptlrpc_request *req) { struct md_open_data *mod = req->rq_cb_data; - if (mod == NULL) + if (!mod) return; /** @@ -674,15 +674,15 @@ int mdc_set_open_replay_data(struct obd_export *exp, rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT); body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); - LASSERT(rec != NULL); + LASSERT(rec); /* Incoming message in my byte order (it's been swabbed). */ /* Outgoing messages always in my byte order. */ - LASSERT(body != NULL); + LASSERT(body); /* Only if the import is replayable, we set replay_open data */ if (och && imp->imp_replayable) { mod = obd_mod_alloc(); - if (mod == NULL) { + if (!mod) { DEBUG_REQ(D_ERROR, open_req, "Can't allocate md_open_data"); return 0; @@ -748,11 +748,11 @@ static int mdc_clear_open_replay_data(struct obd_export *exp, * It is possible to not have \var mod in a case of eviction between * lookup and ll_file_open(). **/ - if (mod == NULL) + if (!mod) return 0; LASSERT(mod != LP_POISON); - LASSERT(mod->mod_open_req != NULL); + LASSERT(mod->mod_open_req); mdc_free_open(mod); mod->mod_och = NULL; @@ -803,7 +803,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE); @@ -814,13 +814,14 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a * portal whose threads are not taking any DLM locks and are therefore - * always progressing */ + * always progressing + */ req->rq_request_portal = MDS_READPAGE_PORTAL; ptlrpc_at_set_req_timeout(req); /* Ensure that this close's handle is fixed up during replay. */ - if (likely(mod != NULL)) { - LASSERTF(mod->mod_open_req != NULL && + if (likely(mod)) { + LASSERTF(mod->mod_open_req && mod->mod_open_req->rq_type != LI_POISON, "POISONED open %p!\n", mod->mod_open_req); @@ -828,7 +829,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, DEBUG_REQ(D_HA, mod->mod_open_req, "matched open"); /* We no longer want to preserve this open for replay even - * though the open was committed. b=3632, b=3633 */ + * though the open was committed. b=3632, b=3633 + */ spin_lock(&mod->mod_open_req->rq_lock); mod->mod_open_req->rq_replay = 0; spin_unlock(&mod->mod_open_req->rq_lock); @@ -850,7 +852,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, rc = ptlrpc_queue_wait(req); mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL); - if (req->rq_repmsg == NULL) { + if (!req->rq_repmsg) { CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req, req->rq_status); if (rc == 0) @@ -866,7 +868,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, rc = -rc; } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) rc = -EPROTO; } else if (rc == -ESTALE) { /** @@ -876,7 +878,6 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, */ if (mod) { DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc); - LASSERT(mod->mod_open_req != NULL); if (mod->mod_open_req->rq_committed) rc = 0; } @@ -886,7 +887,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, if (rc != 0) mod->mod_close_req = NULL; /* Since now, mod is accessed through open_req only, - * thus close req does not keep a reference on mod anymore. */ + * thus close req does not keep a reference on mod anymore. + */ obd_mod_put(mod); } *request = req; @@ -903,7 +905,7 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_DONE_WRITING); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING); @@ -912,15 +914,16 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, return rc; } - if (mod != NULL) { - LASSERTF(mod->mod_open_req != NULL && + if (mod) { + LASSERTF(mod->mod_open_req && mod->mod_open_req->rq_type != LI_POISON, "POISONED setattr %p!\n", mod->mod_open_req); mod->mod_close_req = req; DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr"); /* We no longer want to preserve this setattr for replay even - * though the open was committed. b=3632, b=3633 */ + * though the open was committed. b=3632, b=3633 + */ spin_lock(&mod->mod_open_req->rq_lock); mod->mod_open_req->rq_replay = 0; spin_unlock(&mod->mod_open_req->rq_lock); @@ -940,7 +943,6 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, * Let's check if mod exists and return no error in that case */ if (mod) { - LASSERT(mod->mod_open_req != NULL); if (mod->mod_open_req->rq_committed) rc = 0; } @@ -949,11 +951,12 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, if (mod) { if (rc != 0) mod->mod_close_req = NULL; - LASSERT(mod->mod_open_req != NULL); + LASSERT(mod->mod_open_req); mdc_free_open(mod); /* Since now, mod is accessed through setattr req only, - * thus DW req does not keep a reference on mod anymore. */ + * thus DW req does not keep a reference on mod anymore. + */ obd_mod_put(mod); } @@ -978,7 +981,7 @@ static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data, restart_bulk: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE); @@ -992,17 +995,17 @@ restart_bulk: desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK, MDS_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { ptlrpc_request_free(req); return -ENOMEM; } /* NB req now owns desc and will free it when it gets freed */ for (i = 0; i < op_data->op_npages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); mdc_readdir_pack(req, op_data->op_offset, - PAGE_CACHE_SIZE * op_data->op_npages, + PAGE_SIZE * op_data->op_npages, &op_data->op_fid1); ptlrpc_request_set_replen(req); @@ -1033,8 +1036,8 @@ restart_bulk: if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", - req->rq_bulk->bd_nob_transferred, - PAGE_CACHE_SIZE * op_data->op_npages); + req->rq_bulk->bd_nob_transferred, + PAGE_SIZE * op_data->op_npages); ptlrpc_req_finished(req); return -EPROTO; } @@ -1066,7 +1069,7 @@ static int mdc_statfs(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS, LUSTRE_MDS_VERSION, MDS_STATFS); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto output; } @@ -1088,7 +1091,7 @@ static int mdc_statfs(const struct lu_env *env, } msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -1161,7 +1164,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp, req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS, LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1170,7 +1173,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp, /* Copy hsm_progress struct */ req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS); - if (req_hpk == NULL) { + if (!req_hpk) { rc = -EPROTO; goto out; } @@ -1195,7 +1198,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives) req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER, LUSTRE_MDS_VERSION, MDS_HSM_CT_REGISTER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1205,7 +1208,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives) /* Copy hsm_progress struct */ archive_mask = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_ARCHIVE); - if (archive_mask == NULL) { + if (!archive_mask) { rc = -EPROTO; goto out; } @@ -1230,7 +1233,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_ACTION); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION); @@ -1250,7 +1253,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp, req_hca = req_capsule_server_get(&req->rq_pill, &RMF_MDS_HSM_CURRENT_ACTION); - if (req_hca == NULL) { + if (!req_hca) { rc = -EPROTO; goto out; } @@ -1270,7 +1273,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp) req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER, LUSTRE_MDS_VERSION, MDS_HSM_CT_UNREGISTER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1295,7 +1298,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_STATE_GET); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET); @@ -1314,7 +1317,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp, goto out; req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE); - if (req_hus == NULL) { + if (!req_hus) { rc = -EPROTO; goto out; } @@ -1336,7 +1339,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_STATE_SET); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET); @@ -1350,7 +1353,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp, /* Copy states */ req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET); - if (req_hss == NULL) { + if (!req_hss) { rc = -EPROTO; goto out; } @@ -1375,7 +1378,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, int rc; req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1396,7 +1399,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy hsm_request struct */ req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST); - if (req_hr == NULL) { + if (!req_hr) { rc = -EPROTO; goto out; } @@ -1404,7 +1407,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy hsm_user_item structs */ req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM); - if (req_hui == NULL) { + if (!req_hui) { rc = -EPROTO; goto out; } @@ -1413,7 +1416,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy opaque field */ req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA); - if (req_opaque == NULL) { + if (!req_opaque) { rc = -EPROTO; goto out; } @@ -1512,7 +1515,7 @@ static int mdc_changelog_send_thread(void *csdata) /* Set up the remote catalog handle */ ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT); - if (ctxt == NULL) { + if (!ctxt) { rc = -ENOENT; goto out; } @@ -1553,6 +1556,7 @@ static int mdc_ioc_changelog_send(struct obd_device *obd, struct ioc_changelog *icc) { struct changelog_show *cs; + struct task_struct *task; int rc; /* Freed in mdc_changelog_send_thread */ @@ -1570,15 +1574,20 @@ static int mdc_ioc_changelog_send(struct obd_device *obd, * New thread because we should return to user app before * writing into our pipe */ - rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs, - "mdc_clg_send_thread")); - if (!IS_ERR_VALUE(rc)) { - CDEBUG(D_CHANGELOG, "start changelog thread\n"); - return 0; + task = kthread_run(mdc_changelog_send_thread, cs, + "mdc_clg_send_thread"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: can't start changelog thread: rc = %d\n", + obd->obd_name, rc); + kfree(cs); + } else { + rc = 0; + CDEBUG(D_CHANGELOG, "%s: started changelog thread\n", + obd->obd_name); } CERROR("Failed to start changelog thread: %d\n", rc); - kfree(cs); return rc; } @@ -1596,7 +1605,7 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION, MDS_QUOTACHECK); - if (req == NULL) + if (!req) return -ENOMEM; body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -1605,7 +1614,8 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp, ptlrpc_request_set_replen(req); /* the next poll will find -ENODATA, that means quotacheck is - * going on */ + * going on + */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) @@ -1640,7 +1650,7 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION, MDS_QUOTACTL); - if (req == NULL) + if (!req) return -ENOMEM; oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -1694,7 +1704,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SWAP_LAYOUTS); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -1721,7 +1731,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp, } static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct obd_ioctl_data *data = karg; @@ -1729,7 +1739,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, int rc; if (!try_module_get(THIS_MODULE)) { - CERROR("Can't get module. Is it alive?"); + CERROR("%s: cannot get module '%s'\n", obd->obd_name, + module_name(THIS_MODULE)); return -EINVAL; } switch (cmd) { @@ -1805,7 +1816,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd), min_t(size_t, data->ioc_plen2, - sizeof(struct obd_uuid)))) { + sizeof(struct obd_uuid)))) { rc = -EFAULT; goto out; } @@ -1818,7 +1829,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (copy_to_user(data->ioc_pbuf1, &stat_buf, min_t(size_t, data->ioc_plen1, - sizeof(stat_buf)))) { + sizeof(stat_buf)))) { rc = -EFAULT; goto out; } @@ -1880,7 +1891,7 @@ static int mdc_get_info_rpc(struct obd_export *exp, int rc = -EINVAL; req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, @@ -1905,7 +1916,8 @@ static int mdc_get_info_rpc(struct obd_export *exp, rc = ptlrpc_queue_wait(req); /* -EREMOTE means the get_info result is partial, and it needs to - * continue on another MDT, see fid2path part in lmv_iocontrol */ + * continue on another MDT, see fid2path part in lmv_iocontrol + */ if (rc == 0 || rc == -EREMOTE) { tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL); memcpy(val, tmp, vallen); @@ -2013,21 +2025,27 @@ static int mdc_hsm_copytool_send(int len, void *val) /** * callback function passed to kuc for re-registering each HSM copytool * running on MDC, after MDT shutdown/recovery. - * @param data archive id served by the copytool + * @param data copytool registration data * @param cb_arg callback argument (obd_import) */ -static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg) +static int mdc_hsm_ct_reregister(void *data, void *cb_arg) { + struct kkuc_ct_data *kcd = data; struct obd_import *imp = (struct obd_import *)cb_arg; - __u32 archive = data; int rc; - CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n", - archive); - rc = mdc_ioc_hsm_ct_register(imp, archive); + if (!kcd || kcd->kcd_magic != KKUC_CT_DATA_MAGIC) + return -EPROTO; + + if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid)) + return 0; + + CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n", + imp->imp_obd->obd_name, kcd->kcd_archive); + rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive); /* ignore error if the copytool is already registered */ - return ((rc != 0) && (rc != -EEXIST)) ? rc : 0; + return (rc == -EEXIST) ? 0 : rc; } static int mdc_set_info_async(const struct lu_env *env, @@ -2133,7 +2151,7 @@ static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC); @@ -2175,7 +2193,7 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp, * Flush current sequence to make client obtain new one * from server in case of disconnect/reconnect. */ - if (cli->cl_seq != NULL) + if (cli->cl_seq) seq_client_flush(cli->cl_seq); rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL); @@ -2238,7 +2256,8 @@ static int mdc_cancel_for_recovery(struct ldlm_lock *lock) /* FIXME: if we ever get into a situation where there are too many * opened files with open locks on a single node, then we really - * should replay these open locks to reget it */ + * should replay these open locks to reget it + */ if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN) return 0; @@ -2422,7 +2441,7 @@ static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); @@ -2519,6 +2538,7 @@ static void /*__exit*/ mdc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. "); MODULE_DESCRIPTION("Lustre Metadata Client"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(mdc_init); diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c index ab4800c20..3924b095b 100644 --- a/drivers/staging/lustre/lustre/mgc/mgc_request.c +++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c @@ -90,7 +90,8 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id, int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type) { /* fsname is at most 8 chars long, maybe contain "-". - * e.g. "lustre", "SUN-000" */ + * e.g. "lustre", "SUN-000" + */ return mgc_name2resid(fsname, strlen(fsname), res_id, type); } EXPORT_SYMBOL(mgc_fsname2resid); @@ -102,7 +103,8 @@ static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type /* logname consists of "fsname-nodetype". * e.g. "lustre-MDT0001", "SUN-000-client" - * there is an exception: llog "params" */ + * there is an exception: llog "params" + */ name_end = strrchr(logname, '-'); if (!name_end) len = strlen(logname); @@ -125,7 +127,8 @@ static int config_log_get(struct config_llog_data *cld) } /* Drop a reference to a config log. When no longer referenced, - we can free the config log data */ + * we can free the config log data + */ static void config_log_put(struct config_llog_data *cld) { CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname, @@ -162,7 +165,7 @@ struct config_llog_data *config_log_find(char *logname, struct config_llog_data *found = NULL; void *instance; - LASSERT(logname != NULL); + LASSERT(logname); instance = cfg ? cfg->cfg_instance : NULL; spin_lock(&config_list_lock); @@ -242,17 +245,18 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd, return cld; } -static struct config_llog_data *config_recover_log_add(struct obd_device *obd, - char *fsname, - struct config_llog_instance *cfg, - struct super_block *sb) +static struct config_llog_data * +config_recover_log_add(struct obd_device *obd, char *fsname, + struct config_llog_instance *cfg, + struct super_block *sb) { struct config_llog_instance lcfg = *cfg; struct config_llog_data *cld; char logname[32]; /* we have to use different llog for clients and mdts for cmd - * where only clients are notified if one of cmd server restarts */ + * where only clients are notified if one of cmd server restarts + */ LASSERT(strlen(fsname) < sizeof(logname) / 2); strcpy(logname, fsname); LASSERT(lcfg.cfg_instance); @@ -262,8 +266,9 @@ static struct config_llog_data *config_recover_log_add(struct obd_device *obd, return cld; } -static struct config_llog_data *config_params_log_add(struct obd_device *obd, - struct config_llog_instance *cfg, struct super_block *sb) +static struct config_llog_data * +config_params_log_add(struct obd_device *obd, + struct config_llog_instance *cfg, struct super_block *sb) { struct config_llog_instance lcfg = *cfg; struct config_llog_data *cld; @@ -300,7 +305,7 @@ static int config_log_add(struct obd_device *obd, char *logname, * -sptlrpc. multiple regular logs may share one sptlrpc log. */ ptr = strrchr(logname, '-'); - if (ptr == NULL || ptr - logname > 8) { + if (!ptr || ptr - logname > 8) { CERROR("logname %s is too long\n", logname); return -EINVAL; } @@ -309,7 +314,7 @@ static int config_log_add(struct obd_device *obd, char *logname, strcpy(seclogname + (ptr - logname), "-sptlrpc"); sptlrpc_cld = config_log_find(seclogname, NULL); - if (sptlrpc_cld == NULL) { + if (!sptlrpc_cld) { sptlrpc_cld = do_config_log_add(obd, seclogname, CONFIG_T_SPTLRPC, NULL, NULL); if (IS_ERR(sptlrpc_cld)) { @@ -339,7 +344,16 @@ static int config_log_add(struct obd_device *obd, char *logname, LASSERT(lsi->lsi_lmd); if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) { struct config_llog_data *recover_cld; - *strrchr(seclogname, '-') = 0; + + ptr = strrchr(seclogname, '-'); + if (ptr) { + *ptr = 0; + } else { + CERROR("%s: sptlrpc log name not correct, %s: rc = %d\n", + obd->obd_name, seclogname, -EINVAL); + config_log_put(cld); + return -EINVAL; + } recover_cld = config_recover_log_add(obd, seclogname, cfg, sb); if (IS_ERR(recover_cld)) { rc = PTR_ERR(recover_cld); @@ -376,7 +390,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg) int rc = 0; cld = config_log_find(logname, cfg); - if (cld == NULL) + if (!cld) return -ENOENT; mutex_lock(&cld->cld_lock); @@ -450,16 +464,16 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data) ocd = &imp->imp_connect_data; seq_printf(m, "imperative_recovery: %s\n", - OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); + OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); seq_printf(m, "client_state:\n"); spin_lock(&config_list_lock); list_for_each_entry(cld, &config_llog_list, cld_list_chain) { - if (cld->cld_recover == NULL) + if (!cld->cld_recover) continue; - seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", - cld->cld_logname, - cld->cld_recover->cld_cfg.cfg_last_idx); + seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", + cld->cld_logname, + cld->cld_recover->cld_cfg.cfg_last_idx); } spin_unlock(&config_list_lock); @@ -483,8 +497,9 @@ static void do_requeue(struct config_llog_data *cld) LASSERT(atomic_read(&cld->cld_refcount) > 0); /* Do not run mgc_process_log on a disconnected export or an - export which is being disconnected. Take the client - semaphore to make the check non-racy. */ + * export which is being disconnected. Take the client + * semaphore to make the check non-racy. + */ down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem); if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) { CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname); @@ -529,8 +544,9 @@ static int mgc_requeue_thread(void *data) } /* Always wait a few seconds to allow the server who - caused the lock revocation to finish its setup, plus some - random so everyone doesn't try to reconnect at once. */ + * caused the lock revocation to finish its setup, plus some + * random so everyone doesn't try to reconnect at once. + */ to = MGC_TIMEOUT_MIN_SECONDS * HZ; to += rand * HZ / 100; /* rand is centi-seconds */ lwi = LWI_TIMEOUT(to, NULL, NULL); @@ -549,8 +565,7 @@ static int mgc_requeue_thread(void *data) spin_lock(&config_list_lock); rq_state &= ~RQ_PRECLEANUP; - list_for_each_entry(cld, &config_llog_list, - cld_list_chain) { + list_for_each_entry(cld, &config_llog_list, cld_list_chain) { if (!cld->cld_lostlock) continue; @@ -559,7 +574,8 @@ static int mgc_requeue_thread(void *data) LASSERT(atomic_read(&cld->cld_refcount) > 0); /* Whether we enqueued again or not in mgc_process_log, - * we're done with the ref from the old enqueue */ + * we're done with the ref from the old enqueue + */ if (cld_prev) config_log_put(cld_prev); cld_prev = cld; @@ -575,7 +591,8 @@ static int mgc_requeue_thread(void *data) config_log_put(cld_prev); /* break after scanning the list so that we can drop - * refcount to losing lock clds */ + * refcount to losing lock clds + */ if (unlikely(stopped)) { spin_lock(&config_list_lock); break; @@ -598,7 +615,8 @@ static int mgc_requeue_thread(void *data) } /* Add a cld to the list to requeue. Start the requeue thread if needed. - We are responsible for dropping the config log reference from here on out. */ + * We are responsible for dropping the config log reference from here on out. + */ static void mgc_requeue_add(struct config_llog_data *cld) { CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n", @@ -635,7 +653,8 @@ static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd) int rc; /* setup only remote ctxt, the local disk context is switched per each - * filesystem during mgc_fs_setup() */ + * filesystem during mgc_fs_setup() + */ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd, &llog_client_ops); if (rc) @@ -697,7 +716,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) static int mgc_cleanup(struct obd_device *obd) { /* COMPAT_146 - old config logs may have added profiles we don't - know about */ + * know about + */ if (obd->obd_type->typ_refcnt <= 1) /* Only for the last mgc */ class_del_profiles(); @@ -711,6 +731,7 @@ static int mgc_cleanup(struct obd_device *obd) static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { struct lprocfs_static_vars lvars = { NULL }; + struct task_struct *task; int rc; ptlrpcd_addref(); @@ -734,10 +755,10 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) init_waitqueue_head(&rq_waitq); /* start requeue thread */ - rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL, - "ll_cfg_requeue")); - if (IS_ERR_VALUE(rc)) { - CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n", + task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n", obd->obd_name, rc); goto err_cleanup; } @@ -793,7 +814,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, break; } /* Make sure not to re-enqueue when the mgc is stopping - (we get called from client_disconnect_export) */ + * (we get called from client_disconnect_export) + */ if (!lock->l_conn_export || !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) { CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n", @@ -815,7 +837,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, /* Not sure where this should go... */ /* This is the timeout value for MGS_CONNECT request plus a ping interval, such - * that we can have a chance to try the secondary MGS if any. */ + * that we can have a chance to try the secondary MGS if any. + */ #define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \ + PING_INTERVAL) #define MGC_TARGET_REG_LIMIT 10 @@ -879,11 +902,12 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, cld->cld_resid.name[0]); /* We need a callback for every lockholder, so don't try to - ldlm_lock_match (see rev 1.1.2.11.2.47) */ + * ldlm_lock_match (see rev 1.1.2.11.2.47) + */ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0); @@ -894,7 +918,8 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags, NULL, 0, LVB_T_NONE, lockh, 0); /* A failed enqueue should still call the mgc_blocking_ast, - where it will be requeued if needed ("grant failed"). */ + * where it will be requeued if needed ("grant failed"). + */ ptlrpc_req_finished(req); return rc; } @@ -921,7 +946,7 @@ static int mgc_target_register(struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION, MGS_TARGET_REG); - if (req == NULL) + if (!req) return -ENOMEM; req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO); @@ -950,8 +975,8 @@ static int mgc_target_register(struct obd_export *exp, } static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set) + u32 keylen, void *key, u32 vallen, + void *val, struct ptlrpc_request_set *set) { int rc = -EINVAL; @@ -1088,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd, } enum { - CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), + CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT), CONFIG_READ_NRPAGES = 4 }; @@ -1109,22 +1134,22 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, int rc = 0; int off = 0; - LASSERT(cfg->cfg_instance != NULL); + LASSERT(cfg->cfg_instance); LASSERT(cfg->cfg_sb == cfg->cfg_instance); - inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + inst = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!inst) return -ENOMEM; - pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); - if (pos >= PAGE_CACHE_SIZE) { + pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance); + if (pos >= PAGE_SIZE) { kfree(inst); return -E2BIG; } ++pos; buf = inst + pos; - bufsz = PAGE_CACHE_SIZE - pos; + bufsz = PAGE_SIZE - pos; while (datalen > 0) { int entry_len = sizeof(*entry); @@ -1156,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* Keep this swab for normal mixed endian handling. LU-1644 */ if (mne_swab) lustre_swab_mgs_nidtbl_entry(entry); - if (entry->mne_length > PAGE_CACHE_SIZE) { + if (entry->mne_length > PAGE_SIZE) { CERROR("MNE too large (%u)\n", entry->mne_length); break; } @@ -1195,7 +1220,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* lustre-OST0001-osc- */ strcpy(obdname, cld->cld_logname); cname = strrchr(obdname, '-'); - if (cname == NULL) { + if (!cname) { CERROR("mgc %s: invalid logname %s\n", mgc->obd_name, obdname); break; @@ -1212,7 +1237,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* find the obd by obdname */ obd = class_name2obd(obdname); - if (obd == NULL) { + if (!obd) { CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n", mgc->obd_name, obdname); rc = 0; @@ -1227,7 +1252,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, uuid = buf + pos; down_read(&obd->u.cli.cl_sem); - if (obd->u.cli.cl_import == NULL) { + if (!obd->u.cli.cl_import) { /* client does not connect to the OST yet */ up_read(&obd->u.cli.cl_sem); rc = 0; @@ -1257,7 +1282,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, rc = -ENOMEM; lcfg = lustre_cfg_new(LCFG_PARAM, &bufs); - if (lcfg == NULL) { + if (IS_ERR(lcfg)) { CERROR("mgc: cannot allocate memory\n"); break; } @@ -1309,14 +1334,14 @@ static int mgc_process_recover_log(struct obd_device *obd, nrpages = CONFIG_READ_NRPAGES_INIT; pages = kcalloc(nrpages, sizeof(*pages), GFP_KERNEL); - if (pages == NULL) { + if (!pages) { rc = -ENOMEM; goto out; } for (i = 0; i < nrpages; i++) { pages[i] = alloc_page(GFP_KERNEL); - if (pages[i] == NULL) { + if (!pages[i]) { rc = -ENOMEM; goto out; } @@ -1327,7 +1352,7 @@ again: LASSERT(mutex_is_locked(&cld->cld_lock)); req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp), &RQF_MGS_CONFIG_READ); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1338,7 +1363,6 @@ again: /* pack request */ body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY); - LASSERT(body != NULL); LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname)); if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name)) >= sizeof(body->mcb_name)) { @@ -1347,19 +1371,19 @@ again: } body->mcb_offset = cfg->cfg_last_idx + 1; body->mcb_type = cld->cld_type; - body->mcb_bits = PAGE_CACHE_SHIFT; + body->mcb_bits = PAGE_SHIFT; body->mcb_units = nrpages; /* allocate bulk transfer descriptor */ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK, MGS_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { rc = -ENOMEM; goto out; } for (i = 0; i < nrpages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -1373,7 +1397,8 @@ again: } /* always update the index even though it might have errors with - * handling the recover logs */ + * handling the recover logs + */ cfg->cfg_last_idx = res->mcr_offset; eof = res->mcr_offset == res->mcr_size; @@ -1386,7 +1411,7 @@ again: goto out; } - if (ealen > nrpages << PAGE_CACHE_SHIFT) { + if (ealen > nrpages << PAGE_SHIFT) { rc = -EINVAL; goto out; } @@ -1400,7 +1425,8 @@ again: mne_swab = !!ptlrpc_rep_need_swab(req); #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0) /* This import flag means the server did an extra swab of IR MNE - * records (fixed in LU-1252), reverse it here if needed. LU-1644 */ + * records (fixed in LU-1252), reverse it here if needed. LU-1644 + */ if (unlikely(req->rq_import->imp_need_mne_swab)) mne_swab = !mne_swab; #else @@ -1413,7 +1439,7 @@ again: ptr = kmap(pages[i]); rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, - min_t(int, ealen, PAGE_CACHE_SIZE), + min_t(int, ealen, PAGE_SIZE), mne_swab); kunmap(pages[i]); if (rc2 < 0) { @@ -1422,7 +1448,7 @@ again: break; } - ealen -= PAGE_CACHE_SIZE; + ealen -= PAGE_SIZE; } out: @@ -1434,7 +1460,7 @@ out: if (pages) { for (i = 0; i < nrpages; i++) { - if (pages[i] == NULL) + if (!pages[i]) break; __free_page(pages[i]); } @@ -1489,7 +1515,8 @@ static int mgc_process_cfg_log(struct obd_device *mgc, /* logname and instance info should be the same, so use our * copy of the instance for the update. The cfg_last_idx will - * be updated here. */ + * be updated here. + */ rc = class_config_parse_llog(env, ctxt, cld->cld_logname, &cld->cld_cfg); @@ -1529,9 +1556,10 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) LASSERT(cld); /* I don't want multiple processes running process_log at once -- - sounds like badness. It actually might be fine, as long as - we're not trying to update from the same log - simultaneously (in which case we should use a per-log sem.) */ + * sounds like badness. It actually might be fine, as long as + * we're not trying to update from the same log + * simultaneously (in which case we should use a per-log sem.) + */ mutex_lock(&cld->cld_lock); if (cld->cld_stopping) { mutex_unlock(&cld->cld_lock); @@ -1556,7 +1584,8 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); /* mark cld_lostlock so that it will requeue - * after MGC becomes available. */ + * after MGC becomes available. + */ cld->cld_lostlock = 1; /* Get extra reference, it will be put in requeue thread */ config_log_get(cld); @@ -1635,18 +1664,19 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf) if (rc) break; cld = config_log_find(logname, cfg); - if (cld == NULL) { + if (!cld) { rc = -ENOENT; break; } /* COMPAT_146 */ /* FIXME only set this for old logs! Right now this forces - us to always skip the "inside markers" check */ + * us to always skip the "inside markers" check + */ cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146; rc = mgc_process_log(obd, cld); - if (rc == 0 && cld->cld_recover != NULL) { + if (rc == 0 && cld->cld_recover) { if (OCD_HAS_FLAG(&obd->u.cli.cl_import-> imp_connect_data, IMP_RECOV)) { rc = mgc_process_log(obd, cld->cld_recover); @@ -1660,7 +1690,7 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf) CERROR("Cannot process recover llog %d\n", rc); } - if (rc == 0 && cld->cld_params != NULL) { + if (rc == 0 && cld->cld_params) { rc = mgc_process_log(obd, cld->cld_params); if (rc == -ENOENT) { CDEBUG(D_MGC, @@ -1727,6 +1757,7 @@ static void /*__exit*/ mgc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. "); MODULE_DESCRIPTION("Lustre Management Client"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(mgc_init); diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile index acc685712..c404eb386 100644 --- a/drivers/staging/lustre/lustre/obdclass/Makefile +++ b/drivers/staging/lustre/lustre/obdclass/Makefile @@ -2,8 +2,8 @@ obj-$(CONFIG_LUSTRE_FS) += obdclass.o obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \ llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \ - genops.o uuid.o lprocfs_status.o \ - lustre_handles.o lustre_peer.o \ - statfs_pack.o obdo.o obd_config.o obd_mount.o \ - lu_object.o cl_object.o \ - cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o lprocfs_counters.o + genops.o uuid.o lprocfs_status.o lprocfs_counters.o \ + lustre_handles.o lustre_peer.o statfs_pack.o \ + obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \ + cl_object.o cl_page.o cl_lock.o cl_io.o \ + acl.o kernelcomm.o diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c index 49ba8851c..0e02ae97b 100644 --- a/drivers/staging/lustre/lustre/obdclass/acl.c +++ b/drivers/staging/lustre/lustre/obdclass/acl.c @@ -104,7 +104,7 @@ static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header, return old_size; new = kmemdup(*header, new_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; kfree(*header); @@ -124,7 +124,7 @@ static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header, return 0; new = kmemdup(*header, ext_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; kfree(*header); @@ -149,7 +149,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size) count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr); esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr); new = kzalloc(esize, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return ERR_PTR(-ENOMEM); new->a_count = cpu_to_le32(count); @@ -180,7 +180,7 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size, return -EINVAL; new = kzalloc(size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION); @@ -300,7 +300,7 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size, ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr); new = kzalloc(ext_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return ERR_PTR(-ENOMEM); for (i = 0, j = 0; i < posix_count; i++) { diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c index 63246ba36..f5128b4f1 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_io.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c @@ -44,6 +44,7 @@ #include "../include/obd_support.h" #include "../include/lustre_fid.h" #include +#include #include "../include/cl_object.h" #include "cl_internal.h" @@ -93,7 +94,7 @@ static int cl_io_invariant(const struct cl_io *io) * CIS_IO_GOING. */ ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING || - (io->ci_state == CIS_LOCKED && up != NULL)); + (io->ci_state == CIS_LOCKED && up)); } /** @@ -111,7 +112,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) slice = container_of(io->ci_layers.prev, struct cl_io_slice, cis_linkage); list_del_init(&slice->cis_linkage); - if (slice->cis_iop->op[io->ci_type].cio_fini != NULL) + if (slice->cis_iop->op[io->ci_type].cio_fini) slice->cis_iop->op[io->ci_type].cio_fini(env, slice); /* * Invalidate slice to catch use after free. This assumes that @@ -138,7 +139,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) case CIT_MISC: /* Check ignore layout change conf */ LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout, - !io->ci_need_restart)); + !io->ci_need_restart)); break; default: LBUG(); @@ -164,7 +165,7 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io, result = 0; cl_object_for_each(scan, obj) { - if (scan->co_ops->coo_io_init != NULL) { + if (scan->co_ops->coo_io_init) { result = scan->co_ops->coo_io_init(env, scan, io); if (result != 0) break; @@ -186,7 +187,7 @@ int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, struct cl_thread_info *info = cl_env_info(env); LASSERT(obj != cl_object_top(obj)); - if (info->clt_current_io == NULL) + if (!info->clt_current_io) info->clt_current_io = io; return cl_io_init0(env, io, iot, obj); } @@ -208,7 +209,7 @@ int cl_io_init(const struct lu_env *env, struct cl_io *io, struct cl_thread_info *info = cl_env_info(env); LASSERT(obj == cl_object_top(obj)); - LASSERT(info->clt_current_io == NULL); + LASSERT(!info->clt_current_io); info->clt_current_io = io; return cl_io_init0(env, io, iot, obj); @@ -224,7 +225,7 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, enum cl_io_type iot, loff_t pos, size_t count) { LINVRNT(iot == CIT_READ || iot == CIT_WRITE); - LINVRNT(io->ci_obj != NULL); + LINVRNT(io->ci_obj); LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu, "io range: %u [%llu, %llu) %u %u\n", @@ -290,11 +291,11 @@ static void cl_io_locks_sort(struct cl_io *io) prev = NULL; list_for_each_entry_safe(curr, temp, - &io->ci_lockset.cls_todo, - cill_linkage) { - if (prev != NULL) { + &io->ci_lockset.cls_todo, + cill_linkage) { + if (prev) { switch (cl_lock_descr_sort(&prev->cill_descr, - &curr->cill_descr)) { + &curr->cill_descr)) { case 0: /* * IMPOSSIBLE: Identical locks are @@ -305,10 +306,11 @@ static void cl_io_locks_sort(struct cl_io *io) LBUG(); case 1: list_move_tail(&curr->cill_linkage, - &prev->cill_linkage); + &prev->cill_linkage); done = 0; continue; /* don't change prev: it's - * still "previous" */ + * still "previous" + */ case -1: /* already in order */ break; } @@ -327,32 +329,31 @@ static void cl_io_locks_sort(struct cl_io *io) int cl_queue_match(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; + struct cl_io_lock_link *scan; - list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_match(&scan->cill_descr, need)) - return 1; - } - return 0; + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_match(&scan->cill_descr, need)) + return 1; + } + return 0; } EXPORT_SYMBOL(cl_queue_match); static int cl_queue_merge(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; - - list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_cmp(&scan->cill_descr, need)) - continue; - cl_lock_descr_merge(&scan->cill_descr, need); - CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", - scan->cill_descr.cld_mode, scan->cill_descr.cld_start, - scan->cill_descr.cld_end); - return 1; - } - return 0; + struct cl_io_lock_link *scan; + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_cmp(&scan->cill_descr, need)) + continue; + cl_lock_descr_merge(&scan->cill_descr, need); + CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", + scan->cill_descr.cld_mode, scan->cill_descr.cld_start, + scan->cill_descr.cld_end); + return 1; + } + return 0; } static int cl_lockset_match(const struct cl_lockset *set, @@ -384,8 +385,7 @@ static int cl_lockset_lock_one(const struct lu_env *env, if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) { result = cl_wait(env, lock); if (result == 0) - list_move(&link->cill_linkage, - &set->cls_done); + list_move(&link->cill_linkage, &set->cls_done); } else result = 0; } else @@ -399,11 +399,11 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io, struct cl_lock *lock = link->cill_lock; list_del_init(&link->cill_linkage); - if (lock != NULL) { + if (lock) { cl_lock_release(env, lock, "io", io); link->cill_lock = NULL; } - if (link->cill_fini != NULL) + if (link->cill_fini) link->cill_fini(env, link); } @@ -419,7 +419,8 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { if (!cl_lockset_match(set, &link->cill_descr)) { /* XXX some locking to guarantee that locks aren't - * expanded in between. */ + * expanded in between. + */ result = cl_lockset_lock_one(env, io, set, link); if (result != 0) break; @@ -428,12 +429,11 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, } if (result == 0) { list_for_each_entry_safe(link, temp, - &set->cls_curr, cill_linkage) { + &set->cls_curr, cill_linkage) { lock = link->cill_lock; result = cl_wait(env, lock); if (result == 0) - list_move(&link->cill_linkage, - &set->cls_done); + list_move(&link->cill_linkage, &set->cls_done); else break; } @@ -458,7 +458,7 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_lock == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_lock) continue; result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan); if (result != 0) @@ -503,7 +503,7 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io) cl_lock_link_fini(env, io, link); } cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL) + if (scan->cis_iop->op[io->ci_type].cio_unlock) scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); } io->ci_state = CIS_UNLOCKED; @@ -529,7 +529,7 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io) result = 0; cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_iter_init) continue; result = scan->cis_iop->op[io->ci_type].cio_iter_init(env, scan); @@ -556,7 +556,7 @@ void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL) + if (scan->cis_iop->op[io->ci_type].cio_iter_fini) scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan); } io->ci_state = CIS_IT_ENDED; @@ -581,7 +581,7 @@ static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, /* layers have to be notified. */ cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_advance != NULL) + if (scan->cis_iop->op[io->ci_type].cio_advance) scan->cis_iop->op[io->ci_type].cio_advance(env, scan, nob); } @@ -621,7 +621,7 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, int result; link = kzalloc(sizeof(*link), GFP_NOFS); - if (link != NULL) { + if (link) { link->cill_descr = *descr; link->cill_fini = cl_free_io_lock_link; result = cl_io_lock_add(env, io, link); @@ -648,7 +648,7 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io) io->ci_state = CIS_IO_GOING; cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_start == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_start) continue; result = scan->cis_iop->op[io->ci_type].cio_start(env, scan); if (result != 0) @@ -673,7 +673,7 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_end != NULL) + if (scan->cis_iop->op[io->ci_type].cio_end) scan->cis_iop->op[io->ci_type].cio_end(env, scan); /* TODO: error handling. */ } @@ -687,7 +687,7 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page) const struct cl_page_slice *slice; slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type); - LINVRNT(slice != NULL); + LINVRNT(slice); return slice; } @@ -759,11 +759,11 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io, * "parallel io" (see CLO_REPEAT loops in cl_lock.c). */ cl_io_for_each(scan, io) { - if (scan->cis_iop->cio_read_page != NULL) { + if (scan->cis_iop->cio_read_page) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); - LINVRNT(slice != NULL); + LINVRNT(slice); result = scan->cis_iop->cio_read_page(env, scan, slice); if (result != 0) break; @@ -798,7 +798,7 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, LASSERT(cl_page_in_io(page, io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->cio_prepare_write != NULL) { + if (scan->cis_iop->cio_prepare_write) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); @@ -833,11 +833,11 @@ int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, * state. Better (and more general) way of dealing with such situation * is needed. */ - LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL); + LASSERT(cl_page_is_owned(page, io) || page->cp_parent); LASSERT(cl_page_in_io(page, io)); cl_io_for_each(scan, io) { - if (scan->cis_iop->cio_commit_write != NULL) { + if (scan->cis_iop->cio_commit_write) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); @@ -872,7 +872,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op)); cl_io_for_each(scan, io) { - if (scan->cis_iop->req_op[crt].cio_submit == NULL) + if (!scan->cis_iop->req_op[crt].cio_submit) continue; result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt, queue); @@ -900,7 +900,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, int rc; cl_page_list_for_each(pg, &queue->c2_qin) { - LASSERT(pg->cp_sync_io == NULL); + LASSERT(!pg->cp_sync_io); pg->cp_sync_io = anchor; } @@ -913,14 +913,14 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, * clean pages), count them as completed to avoid infinite * wait. */ - cl_page_list_for_each(pg, &queue->c2_qin) { + cl_page_list_for_each(pg, &queue->c2_qin) { pg->cp_sync_io = NULL; cl_sync_io_note(anchor, 1); - } + } - /* wait for the IO to be finished. */ - rc = cl_sync_io_wait(env, io, &queue->c2_qout, - anchor, timeout); + /* wait for the IO to be finished. */ + rc = cl_sync_io_wait(env, io, &queue->c2_qout, + anchor, timeout); } else { LASSERT(list_empty(&queue->c2_qout.pl_pages)); cl_page_list_for_each(pg, &queue->c2_qin) @@ -1026,7 +1026,7 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, { struct list_head *linkage = &slice->cis_linkage; - LASSERT((linkage->prev == NULL && linkage->next == NULL) || + LASSERT((!linkage->prev && !linkage->next) || list_empty(linkage)); list_add_tail(linkage, &io->ci_layers); @@ -1053,8 +1053,9 @@ EXPORT_SYMBOL(cl_page_list_init); void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page) { /* it would be better to check that page is owned by "current" io, but - * it is not passed here. */ - LASSERT(page->cp_owner != NULL); + * it is not passed here. + */ + LASSERT(page->cp_owner); LINVRNT(plist->pl_owner == current); lockdep_off(); @@ -1263,7 +1264,7 @@ EXPORT_SYMBOL(cl_2queue_init_page); */ struct cl_io *cl_io_top(struct cl_io *io) { - while (io->ci_parent != NULL) + while (io->ci_parent) io = io->ci_parent; return io; } @@ -1296,13 +1297,13 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req) LASSERT(list_empty(&req->crq_pages)); LASSERT(req->crq_nrpages == 0); LINVRNT(list_empty(&req->crq_layers)); - LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL)); + LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o)); - if (req->crq_o != NULL) { + if (req->crq_o) { for (i = 0; i < req->crq_nrobjs; ++i) { struct cl_object *obj = req->crq_o[i].ro_obj; - if (obj != NULL) { + if (obj) { lu_object_ref_del_at(&obj->co_lu, &req->crq_o[i].ro_obj_ref, "cl_req", req); @@ -1326,7 +1327,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req, do { list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev); - if (dev->cd_ops->cdo_req_init != NULL) { + if (dev->cd_ops->cdo_req_init) { result = dev->cd_ops->cdo_req_init(env, dev, req); if (result != 0) @@ -1334,7 +1335,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req, } } page = page->cp_child; - } while (page != NULL && result == 0); + } while (page && result == 0); return result; } @@ -1351,9 +1352,9 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc) */ while (!list_empty(&req->crq_layers)) { slice = list_entry(req->crq_layers.prev, - struct cl_req_slice, crs_linkage); + struct cl_req_slice, crs_linkage); list_del_init(&slice->crs_linkage); - if (slice->crs_ops->cro_completion != NULL) + if (slice->crs_ops->cro_completion) slice->crs_ops->cro_completion(env, slice, rc); } cl_req_free(env, req); @@ -1371,7 +1372,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, LINVRNT(nr_objects > 0); req = kzalloc(sizeof(*req), GFP_NOFS); - if (req != NULL) { + if (req) { int result; req->crq_type = crt; @@ -1380,7 +1381,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]), GFP_NOFS); - if (req->crq_o != NULL) { + if (req->crq_o) { req->crq_nrobjs = nr_objects; result = cl_req_init(env, req, page); } else @@ -1408,7 +1409,7 @@ void cl_req_page_add(const struct lu_env *env, page = cl_page_top(page); LASSERT(list_empty(&page->cp_flight)); - LASSERT(page->cp_req == NULL); + LASSERT(!page->cp_req); CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n", req, req->crq_type, req->crq_nrpages); @@ -1418,7 +1419,7 @@ void cl_req_page_add(const struct lu_env *env, page->cp_req = req; obj = cl_object_top(page->cp_obj); for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) { - if (rqo->ro_obj == NULL) { + if (!rqo->ro_obj) { rqo->ro_obj = obj; cl_object_get(obj); lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref, @@ -1463,11 +1464,11 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req) * of objects. */ for (i = 0; i < req->crq_nrobjs; ++i) - LASSERT(req->crq_o[i].ro_obj != NULL); + LASSERT(req->crq_o[i].ro_obj); result = 0; list_for_each_entry(slice, &req->crq_layers, crs_linkage) { - if (slice->crs_ops->cro_prep != NULL) { + if (slice->crs_ops->cro_prep) { result = slice->crs_ops->cro_prep(env, slice); if (result != 0) break; @@ -1501,9 +1502,8 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, scan = cl_page_at(page, slice->crs_dev->cd_lu_dev.ld_type); - LASSERT(scan != NULL); obj = scan->cpl_obj; - if (slice->crs_ops->cro_attr_set != NULL) + if (slice->crs_ops->cro_attr_set) slice->crs_ops->cro_attr_set(env, slice, obj, attr + i, flags); } @@ -1511,9 +1511,6 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, } EXPORT_SYMBOL(cl_req_attr_set); -/* XXX complete(), init_completion(), and wait_for_completion(), until they are - * implemented in libcfs. */ -# include /** * Initialize synchronous io wait anchor, for transfer of \a nrpages pages. diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c index 1836dc014..aec644eb4 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c @@ -96,8 +96,8 @@ static int cl_lock_invariant(const struct lu_env *env, result = atomic_read(&lock->cll_ref) > 0 && cl_lock_invariant_trusted(env, lock); - if (!result && env != NULL) - CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken"); + if (!result && env) + CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n"); return result; } @@ -259,7 +259,7 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) struct cl_lock_slice *slice; slice = list_entry(lock->cll_layers.next, - struct cl_lock_slice, cls_linkage); + struct cl_lock_slice, cls_linkage); list_del_init(lock->cll_layers.next); slice->cls_ops->clo_fini(env, slice); } @@ -288,7 +288,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); obj = lock->cll_descr.cld_obj; - LINVRNT(obj != NULL); + LINVRNT(obj); CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n", atomic_read(&lock->cll_ref), lock, RETIP); @@ -361,8 +361,8 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, struct cl_lock *lock; struct lu_object_header *head; - lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lock != NULL) { + lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS); + if (lock) { atomic_set(&lock->cll_ref, 1); lock->cll_descr = *descr; lock->cll_state = CLS_NEW; @@ -382,8 +382,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, CS_LOCK_INC(obj, total); CS_LOCK_INC(obj, create); cl_lock_lockdep_init(lock); - list_for_each_entry(obj, &head->loh_layers, - co_lu.lo_linkage) { + list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) { int err; err = obj->co_ops->coo_lock_init(env, obj, lock, io); @@ -461,7 +460,7 @@ static int cl_lock_fits_into(const struct lu_env *env, LINVRNT(cl_lock_invariant_trusted(env, lock)); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_fits_into != NULL && + if (slice->cls_ops->clo_fits_into && !slice->cls_ops->clo_fits_into(env, slice, need, io)) return 0; } @@ -524,17 +523,17 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env, lock = cl_lock_lookup(env, obj, io, need); spin_unlock(&head->coh_lock_guard); - if (lock == NULL) { + if (!lock) { lock = cl_lock_alloc(env, obj, io, need); if (!IS_ERR(lock)) { struct cl_lock *ghost; spin_lock(&head->coh_lock_guard); ghost = cl_lock_lookup(env, obj, io, need); - if (ghost == NULL) { + if (!ghost) { cl_lock_get_trust(lock); list_add_tail(&lock->cll_linkage, - &head->coh_locks); + &head->coh_locks); spin_unlock(&head->coh_lock_guard); CS_LOCK_INC(obj, busy); } else { @@ -572,7 +571,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, spin_lock(&head->coh_lock_guard); lock = cl_lock_lookup(env, obj, io, need); spin_unlock(&head->coh_lock_guard); - if (lock == NULL) + if (!lock) return NULL; cl_lock_mutex_get(env, lock); @@ -584,7 +583,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, cl_lock_put(env, lock); lock = NULL; } - } while (lock == NULL); + } while (!lock); cl_lock_hold_add(env, lock, scope, source); cl_lock_user_add(env, lock); @@ -774,8 +773,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) lock->cll_flags |= CLF_CANCELLED; list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_cancel != NULL) + cls_linkage) { + if (slice->cls_ops->clo_cancel) slice->cls_ops->clo_cancel(env, slice); } } @@ -811,8 +810,8 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) * by cl_lock_lookup(). */ list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_delete != NULL) + cls_linkage) { + if (slice->cls_ops->clo_delete) slice->cls_ops->clo_delete(env, slice); } /* @@ -935,7 +934,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) if (result == 0) { /* To avoid being interrupted by the 'non-fatal' signals * (SIGCHLD, for instance), we'd block them temporarily. - * LU-305 */ + * LU-305 + */ blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); init_waitqueue_entry(&waiter, current); @@ -946,7 +946,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) LASSERT(cl_lock_nr_mutexed(env) == 0); /* Returning ERESTARTSYS instead of EINTR so syscalls - * can be restarted if signals are pending here */ + * can be restarted if signals are pending here + */ result = -ERESTARTSYS; if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) { schedule(); @@ -974,7 +975,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_invariant(env, lock)); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) - if (slice->cls_ops->clo_state != NULL) + if (slice->cls_ops->clo_state) slice->cls_ops->clo_state(env, slice, state); wake_up_all(&lock->cll_wq); } @@ -1038,8 +1039,8 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) result = -ENOSYS; list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_unuse != NULL) { + cls_linkage) { + if (slice->cls_ops->clo_unuse) { result = slice->cls_ops->clo_unuse(env, slice); if (result != 0) break; @@ -1072,7 +1073,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) result = -ENOSYS; state = cl_lock_intransit(env, lock); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_use != NULL) { + if (slice->cls_ops->clo_use) { result = slice->cls_ops->clo_use(env, slice); if (result != 0) break; @@ -1125,7 +1126,7 @@ static int cl_enqueue_kick(const struct lu_env *env, result = -ENOSYS; list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_enqueue != NULL) { + if (slice->cls_ops->clo_enqueue) { result = slice->cls_ops->clo_enqueue(env, slice, io, flags); if (result != 0) @@ -1170,7 +1171,8 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, /* kick layers. */ result = cl_enqueue_kick(env, lock, io, flags); /* For AGL case, the cl_lock::cll_state may - * become CLS_HELD already. */ + * become CLS_HELD already. + */ if (result == 0 && lock->cll_state == CLS_QUEUING) cl_lock_state_set(env, lock, CLS_ENQUEUED); break; @@ -1215,7 +1217,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); LASSERT(lock->cll_state == CLS_QUEUING); - LASSERT(lock->cll_conflict != NULL); + LASSERT(lock->cll_conflict); conflict = lock->cll_conflict; lock->cll_conflict = NULL; @@ -1258,7 +1260,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, do { result = cl_enqueue_try(env, lock, io, enqflags); if (result == CLO_WAIT) { - if (lock->cll_conflict != NULL) + if (lock->cll_conflict) result = cl_lock_enqueue_wait(env, lock, 1); else result = cl_lock_state_wait(env, lock); @@ -1300,7 +1302,8 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock) } /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold - * underlying resources. */ + * underlying resources. + */ if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) { cl_lock_user_del(env, lock); return 0; @@ -1416,7 +1419,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock) result = -ENOSYS; list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_wait != NULL) { + if (slice->cls_ops->clo_wait) { result = slice->cls_ops->clo_wait(env, slice); if (result != 0) break; @@ -1449,7 +1452,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD, - "Wrong state %d \n", lock->cll_state); + "Wrong state %d\n", lock->cll_state); LASSERT(lock->cll_holds > 0); do { @@ -1487,7 +1490,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) pound = 0; list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_weigh != NULL) { + if (slice->cls_ops->clo_weigh) { ounce = slice->cls_ops->clo_weigh(env, slice); pound += ounce; if (pound < ounce) /* over-weight^Wflow */ @@ -1523,7 +1526,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_invariant(env, lock)); list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_modify != NULL) { + if (slice->cls_ops->clo_modify) { result = slice->cls_ops->clo_modify(env, slice, desc); if (result != 0) return result; @@ -1584,7 +1587,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, result = cl_lock_enclosure(env, lock, closure); if (result == 0) { list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_closure != NULL) { + if (slice->cls_ops->clo_closure) { result = slice->cls_ops->clo_closure(env, slice, closure); if (result != 0) @@ -1654,7 +1657,7 @@ void cl_lock_disclosure(const struct lu_env *env, cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); list_for_each_entry_safe(scan, temp, &closure->clc_list, - cll_inclosure){ + cll_inclosure) { list_del_init(&scan->cll_inclosure); cl_lock_mutex_put(env, scan); lu_ref_del(&scan->cll_reference, "closure", closure); @@ -1777,13 +1780,15 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, lock = NULL; need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but - * not PHANTOM */ + * not PHANTOM + */ need->cld_start = need->cld_end = index; need->cld_enq_flags = 0; spin_lock(&head->coh_lock_guard); /* It is fine to match any group lock since there could be only one - * with a uniq gid and it conflicts with all other lock modes too */ + * with a uniq gid and it conflicts with all other lock modes too + */ list_for_each_entry(scan, &head->coh_locks, cll_linkage) { if (scan != except && (scan->cll_descr.cld_mode == CLM_GROUP || @@ -1798,7 +1803,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, (canceld || !(scan->cll_flags & CLF_CANCELLED)) && (pending || !(scan->cll_flags & CLF_CANCELPEND))) { /* Don't increase cs_hit here since this - * is just a helper function. */ + * is just a helper function. + */ cl_lock_get_trust(scan); lock = scan; break; @@ -1820,7 +1826,6 @@ static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock) dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type; slice = cl_page_at(page, dtype); - LASSERT(slice != NULL); return slice->cpl_page->cp_index; } @@ -1839,12 +1844,13 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io, /* refresh non-overlapped index */ tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index, - lock, 1, 0); - if (tmp != NULL) { + lock, 1, 0); + if (tmp) { /* Cache the first-non-overlapped index so as to skip * all pages within [index, clt_fn_index). This * is safe because if tmp lock is canceled, it will - * discard these pages. */ + * discard these pages. + */ info->clt_fn_index = tmp->cll_descr.cld_end + 1; if (tmp->cll_descr.cld_end == CL_PAGE_EOF) info->clt_fn_index = CL_PAGE_EOF; @@ -1950,7 +1956,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) * already destroyed (as otherwise they will be left unprotected). */ LASSERT(ergo(!cancel, - head->coh_tree.rnode == NULL && head->coh_pages == 0)); + !head->coh_tree.rnode && head->coh_pages == 0)); spin_lock(&head->coh_lock_guard); while (!list_empty(&head->coh_locks)) { @@ -2166,8 +2172,8 @@ EXPORT_SYMBOL(cl_lock_mode_name); * Prints human readable representation of a lock description. */ void cl_lock_descr_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_lock_descr *descr) + lu_printer_t printer, + const struct cl_lock_descr *descr) { const struct lu_fid *fid; @@ -2194,7 +2200,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie, (*printer)(env, cookie, " %s@%p: ", slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name, slice); - if (slice->cls_ops->clo_print != NULL) + if (slice->cls_ops->clo_print) slice->cls_ops->clo_print(env, cookie, printer, slice); (*printer)(env, cookie, "\n"); } diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c index 57c8d5412..43e299d4d 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_object.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c @@ -152,7 +152,7 @@ struct cl_object *cl_object_top(struct cl_object *o) struct cl_object_header *hdr = cl_object_header(o); struct cl_object *top; - while (hdr->coh_parent != NULL) + while (hdr->coh_parent) hdr = hdr->coh_parent; top = lu2cl(lu_object_top(&hdr->coh_lu)); @@ -217,7 +217,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_get != NULL) { + if (obj->co_ops->coo_attr_get) { result = obj->co_ops->coo_attr_get(env, obj, attr); if (result != 0) { if (result > 0) @@ -247,9 +247,8 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_set != NULL) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_attr_set) { result = obj->co_ops->coo_attr_set(env, obj, attr, v); if (result != 0) { if (result > 0) @@ -278,9 +277,8 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_glimpse != NULL) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_glimpse) { result = obj->co_ops->coo_glimpse(env, obj, lvb); if (result != 0) break; @@ -306,7 +304,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_conf_set != NULL) { + if (obj->co_ops->coo_conf_set) { result = obj->co_ops->coo_conf_set(env, obj, conf); if (result != 0) break; @@ -328,7 +326,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj) struct cl_object_header *hdr; hdr = cl_object_header(obj); - LASSERT(hdr->coh_tree.rnode == NULL); + LASSERT(!hdr->coh_tree.rnode); LASSERT(hdr->coh_pages == 0); set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); @@ -362,7 +360,8 @@ void cache_stats_init(struct cache_stats *cs, const char *name) atomic_set(&cs->cs_stats[i], 0); } -int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h) +static int cache_stats_print(const struct cache_stats *cs, + struct seq_file *m, int h) { int i; /* @@ -456,13 +455,13 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......] seq_printf(m, " ["); for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i) seq_printf(m, "%s: %u ", pstate[i], - atomic_read(&site->cs_pages_state[i])); + atomic_read(&site->cs_pages_state[i])); seq_printf(m, "]\n"); cache_stats_print(&site->cs_locks, m, 0); seq_printf(m, " ["); for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i) seq_printf(m, "%s: %u ", lstate[i], - atomic_read(&site->cs_locks_state[i])); + atomic_read(&site->cs_locks_state[i])); seq_printf(m, "]\n"); cache_stats_print(&cl_env_stats, m, 0); seq_printf(m, "\n"); @@ -482,7 +481,6 @@ EXPORT_SYMBOL(cl_site_stats_print); * because Lustre code may call into other fs which has certain assumptions * about journal_info. Currently following fields in task_struct are identified * can be used for this purpose: - * - cl_env: for liblustre. * - tux_info: only on RedHat kernel. * - ... * \note As long as we use task_struct to store cl_env, we assume that once @@ -540,7 +538,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug) { LASSERT(cle->ce_ref == 0); LASSERT(cle->ce_magic == &cl_env_init0); - LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL); + LASSERT(!cle->ce_debug && !cle->ce_owner); cle->ce_ref = 1; cle->ce_debug = debug; @@ -575,7 +573,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn) { struct cl_env *cle = cl_env_hops_obj(hn); - LASSERT(cle->ce_owner != NULL); + LASSERT(cle->ce_owner); return (key == cle->ce_owner); } @@ -609,7 +607,7 @@ static inline void cl_env_attach(struct cl_env *cle) if (cle) { int rc; - LASSERT(cle->ce_owner == NULL); + LASSERT(!cle->ce_owner); cle->ce_owner = (void *) (long) current->pid; rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, &cle->ce_node); @@ -637,7 +635,7 @@ static int cl_env_store_init(void) CFS_HASH_MAX_THETA, &cl_env_hops, CFS_HASH_RW_BKTLOCK); - return cl_env_hash != NULL ? 0 : -ENOMEM; + return cl_env_hash ? 0 : -ENOMEM; } static void cl_env_store_fini(void) @@ -647,7 +645,7 @@ static void cl_env_store_fini(void) static inline struct cl_env *cl_env_detach(struct cl_env *cle) { - if (cle == NULL) + if (!cle) cle = cl_env_fetch(); if (cle && cle->ce_owner) @@ -661,8 +659,8 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) struct lu_env *env; struct cl_env *cle; - cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO); - if (cle != NULL) { + cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS); + if (cle) { int rc; INIT_LIST_HEAD(&cle->ce_linkage); @@ -716,7 +714,7 @@ static struct lu_env *cl_env_peek(int *refcheck) env = NULL; cle = cl_env_fetch(); - if (cle != NULL) { + if (cle) { CL_ENV_INC(hit); env = &cle->ce_lu; *refcheck = ++cle->ce_ref; @@ -741,7 +739,7 @@ struct lu_env *cl_env_get(int *refcheck) struct lu_env *env; env = cl_env_peek(refcheck); - if (env == NULL) { + if (!env) { env = cl_env_new(lu_context_tags_default, lu_session_tags_default, __builtin_return_address(0)); @@ -768,7 +766,7 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags) { struct lu_env *env; - LASSERT(cl_env_peek(refcheck) == NULL); + LASSERT(!cl_env_peek(refcheck)); env = cl_env_new(tags, tags, __builtin_return_address(0)); if (!IS_ERR(env)) { struct cl_env *cle; @@ -783,7 +781,7 @@ EXPORT_SYMBOL(cl_env_alloc); static void cl_env_exit(struct cl_env *cle) { - LASSERT(cle->ce_owner == NULL); + LASSERT(!cle->ce_owner); lu_context_exit(&cle->ce_lu.le_ctx); lu_context_exit(&cle->ce_ses); } @@ -802,7 +800,7 @@ void cl_env_put(struct lu_env *env, int *refcheck) cle = cl_env_container(env); LASSERT(cle->ce_ref > 0); - LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck)); + LASSERT(ergo(refcheck, cle->ce_ref == *refcheck)); CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); if (--cle->ce_ref == 0) { @@ -877,7 +875,7 @@ struct lu_env *cl_env_nested_get(struct cl_env_nest *nest) nest->cen_cookie = NULL; env = cl_env_peek(&nest->cen_refcheck); - if (env != NULL) { + if (env) { if (!cl_io_is_going(env)) return env; cl_env_put(env, &nest->cen_refcheck); @@ -929,14 +927,12 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, const char *typename; struct lu_device *d; - LASSERT(ldt != NULL); - typename = ldt->ldt_name; d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL); if (!IS_ERR(d)) { int rc; - if (site != NULL) + if (site) d->ld_site = site; rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next); if (rc == 0) { diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c index 61f28ebfc..394580016 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_page.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c @@ -69,7 +69,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, */ static struct cl_page *cl_page_top_trusted(struct cl_page *page) { - while (page->cp_parent != NULL) + while (page->cp_parent) page = page->cp_parent; return page; } @@ -110,7 +110,7 @@ cl_page_at_trusted(const struct cl_page *page, return slice; } page = page->cp_child; - } while (page != NULL); + } while (page); return NULL; } @@ -127,7 +127,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index) assert_spin_locked(&hdr->coh_page_guard); page = radix_tree_lookup(&hdr->coh_tree, index); - if (page != NULL) + if (page) cl_page_get_trust(page); return page; } @@ -188,7 +188,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, * Pages for lsm-less file has no underneath sub-page * for osc, in case of ... */ - PASSERT(env, page, slice != NULL); + PASSERT(env, page, slice); page = slice->cpl_page; /* @@ -245,9 +245,9 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_object *obj = page->cp_obj; PASSERT(env, page, list_empty(&page->cp_batch)); - PASSERT(env, page, page->cp_owner == NULL); - PASSERT(env, page, page->cp_req == NULL); - PASSERT(env, page, page->cp_parent == NULL); + PASSERT(env, page, !page->cp_owner); + PASSERT(env, page, !page->cp_req); + PASSERT(env, page, !page->cp_parent); PASSERT(env, page, page->cp_state == CPS_FREEING); might_sleep(); @@ -255,7 +255,7 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_page_slice *slice; slice = list_entry(page->cp_layers.next, - struct cl_page_slice, cpl_linkage); + struct cl_page_slice, cpl_linkage); list_del_init(page->cp_layers.next); slice->cpl_ops->cpo_fini(env, slice); } @@ -277,14 +277,15 @@ static inline void cl_page_state_set_trust(struct cl_page *page, } static struct cl_page *cl_page_alloc(const struct lu_env *env, - struct cl_object *o, pgoff_t ind, struct page *vmpage, - enum cl_page_type type) + struct cl_object *o, pgoff_t ind, + struct page *vmpage, + enum cl_page_type type) { struct cl_page *page; struct lu_object_header *head; page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS); - if (page != NULL) { + if (page) { int result = 0; atomic_set(&page->cp_ref, 1); @@ -303,9 +304,8 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env, mutex_init(&page->cp_mutex); lu_ref_init(&page->cp_reference); head = o->co_lu.lo_header; - list_for_each_entry(o, &head->loh_layers, - co_lu.lo_linkage) { - if (o->co_ops->coo_page_init != NULL) { + list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { + if (o->co_ops->coo_page_init) { result = o->co_ops->coo_page_init(env, o, page, vmpage); if (result != 0) { @@ -369,13 +369,13 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, */ page = cl_vmpage_page(vmpage, o); PINVRNT(env, page, - ergo(page != NULL, + ergo(page, cl_page_vmpage(env, page) == vmpage && (void *)radix_tree_lookup(&hdr->coh_tree, idx) == page)); } - if (page != NULL) + if (page) return page; /* allocate and initialize cl_page */ @@ -385,7 +385,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, if (type == CPT_TRANSIENT) { if (parent) { - LASSERT(page->cp_parent == NULL); + LASSERT(!page->cp_parent); page->cp_parent = parent; parent->cp_child = page; } @@ -418,7 +418,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, "fail to insert into radix tree: %d\n", err); } else { if (parent) { - LASSERT(page->cp_parent == NULL); + LASSERT(!page->cp_parent); page->cp_parent = parent; parent->cp_child = page; } @@ -426,7 +426,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, } spin_unlock(&hdr->coh_page_guard); - if (unlikely(ghost != NULL)) { + if (unlikely(ghost)) { cl_page_delete0(env, ghost, 0); cl_page_free(env, ghost); } @@ -467,14 +467,13 @@ static inline int cl_page_invariant(const struct cl_page *pg) owner = pg->cp_owner; return cl_page_in_use(pg) && - ergo(parent != NULL, parent->cp_child == pg) && - ergo(child != NULL, child->cp_parent == pg) && - ergo(child != NULL, pg->cp_obj != child->cp_obj) && - ergo(parent != NULL, pg->cp_obj != parent->cp_obj) && - ergo(owner != NULL && parent != NULL, + ergo(parent, parent->cp_child == pg) && + ergo(child, child->cp_parent == pg) && + ergo(child, pg->cp_obj != child->cp_obj) && + ergo(parent, pg->cp_obj != parent->cp_obj) && + ergo(owner && parent, parent->cp_owner == pg->cp_owner->ci_parent) && - ergo(owner != NULL && child != NULL, - child->cp_owner->ci_parent == owner) && + ergo(owner && child, child->cp_owner->ci_parent == owner) && /* * Either page is early in initialization (has neither child * nor parent yet), or it is in the object radix tree. @@ -482,7 +481,7 @@ static inline int cl_page_invariant(const struct cl_page *pg) ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE, (void *)radix_tree_lookup(&header->coh_tree, pg->cp_index) == pg || - (child == NULL && parent == NULL)); + (!child && !parent)); } static void cl_page_state_set0(const struct lu_env *env, @@ -535,10 +534,10 @@ static void cl_page_state_set0(const struct lu_env *env, old = page->cp_state; PASSERT(env, page, allowed_transitions[old][state]); CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state); - for (; page != NULL; page = page->cp_child) { + for (; page; page = page->cp_child) { PASSERT(env, page, page->cp_state == old); PASSERT(env, page, - equi(state == CPS_OWNED, page->cp_owner != NULL)); + equi(state == CPS_OWNED, page->cp_owner)); cl_page_state_set_trust(page, state); } @@ -584,7 +583,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page) LASSERT(page->cp_state == CPS_FREEING); LASSERT(atomic_read(&page->cp_ref) == 0); - PASSERT(env, page, page->cp_owner == NULL); + PASSERT(env, page, !page->cp_owner); PASSERT(env, page, list_empty(&page->cp_batch)); /* * Page is no longer reachable by other threads. Tear @@ -609,11 +608,11 @@ struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) page = cl_page_top(page); do { list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { - if (slice->cpl_ops->cpo_vmpage != NULL) + if (slice->cpl_ops->cpo_vmpage) return slice->cpl_ops->cpo_vmpage(env, slice); } page = page->cp_child; - } while (page != NULL); + } while (page); LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */ } EXPORT_SYMBOL(cl_page_vmpage); @@ -639,10 +638,10 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) * can be rectified easily. */ top = (struct cl_page *)vmpage->private; - if (top == NULL) + if (!top) return NULL; - for (page = top; page != NULL; page = page->cp_child) { + for (page = top; page; page = page->cp_child) { if (cl_object_same(page->cp_obj, obj)) { cl_page_get_trust(page); break; @@ -689,7 +688,7 @@ EXPORT_SYMBOL(cl_page_at); cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) { \ + if (__method) { \ __result = (*__method)(__env, __scan, \ ## __VA_ARGS__); \ if (__result != 0) \ @@ -697,7 +696,7 @@ EXPORT_SYMBOL(cl_page_at); } \ } \ __page = __page->cp_child; \ - } while (__page != NULL && __result == 0); \ + } while (__page && __result == 0); \ if (__result > 0) \ __result = 0; \ __result; \ @@ -717,12 +716,12 @@ do { \ cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) \ + if (__method) \ (*__method)(__env, __scan, \ ## __VA_ARGS__); \ } \ __page = __page->cp_child; \ - } while (__page != NULL); \ + } while (__page); \ } while (0) #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \ @@ -734,19 +733,19 @@ do { \ void (*__method)_proto; \ \ /* get to the bottom page. */ \ - while (__page->cp_child != NULL) \ + while (__page->cp_child) \ __page = __page->cp_child; \ do { \ list_for_each_entry_reverse(__scan, &__page->cp_layers, \ cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) \ + if (__method) \ (*__method)(__env, __scan, \ ## __VA_ARGS__); \ } \ __page = __page->cp_parent; \ - } while (__page != NULL); \ + } while (__page); \ } while (0) static int cl_page_invoke(const struct lu_env *env, @@ -772,8 +771,8 @@ static void cl_page_invoid(const struct lu_env *env, static void cl_page_owner_clear(struct cl_page *page) { - for (page = cl_page_top(page); page != NULL; page = page->cp_child) { - if (page->cp_owner != NULL) { + for (page = cl_page_top(page); page; page = page->cp_child) { + if (page->cp_owner) { LASSERT(page->cp_owner->ci_owned_nr > 0); page->cp_owner->ci_owned_nr--; page->cp_owner = NULL; @@ -784,10 +783,8 @@ static void cl_page_owner_clear(struct cl_page *page) static void cl_page_owner_set(struct cl_page *page) { - for (page = cl_page_top(page); page != NULL; page = page->cp_child) { - LASSERT(page->cp_owner != NULL); + for (page = cl_page_top(page); page; page = page->cp_child) page->cp_owner->ci_owned_nr++; - } } void cl_page_disown0(const struct lu_env *env, @@ -862,8 +859,8 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io, struct cl_io *, int), io, nonblock); if (result == 0) { - PASSERT(env, pg, pg->cp_owner == NULL); - PASSERT(env, pg, pg->cp_req == NULL); + PASSERT(env, pg, !pg->cp_owner); + PASSERT(env, pg, !pg->cp_req); pg->cp_owner = io; pg->cp_task = current; cl_page_owner_set(pg); @@ -921,7 +918,7 @@ void cl_page_assume(const struct lu_env *env, io = cl_io_top(io); cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume)); - PASSERT(env, pg, pg->cp_owner == NULL); + PASSERT(env, pg, !pg->cp_owner); pg->cp_owner = io; pg->cp_task = current; cl_page_owner_set(pg); @@ -1037,7 +1034,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, * skip removing it. */ tmp = pg->cp_child; - for (; tmp != NULL; tmp = tmp->cp_child) { + for (; tmp; tmp = tmp->cp_child) { void *value; struct cl_object_header *hdr; @@ -1135,7 +1132,7 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg) pg = cl_page_top_trusted((struct cl_page *)pg); slice = container_of(pg->cp_layers.next, const struct cl_page_slice, cpl_linkage); - PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL); + PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked); /* * Call ->cpo_is_vmlocked() directly instead of going through * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by @@ -1216,7 +1213,7 @@ void cl_page_completion(const struct lu_env *env, PASSERT(env, pg, crt < CRT_NR); /* cl_page::cp_req already cleared by the caller (osc_completion()) */ - PASSERT(env, pg, pg->cp_req == NULL); + PASSERT(env, pg, !pg->cp_req); PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt)); CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret); @@ -1304,7 +1301,7 @@ int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, return -EINVAL; list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) { - if (scan->cpl_ops->io[crt].cpo_cache_add == NULL) + if (!scan->cpl_ops->io[crt].cpo_cache_add) continue; result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io); @@ -1450,8 +1447,8 @@ void cl_page_print(const struct lu_env *env, void *cookie, { struct cl_page *scan; - for (scan = cl_page_top((struct cl_page *)pg); - scan != NULL; scan = scan->cp_child) + for (scan = cl_page_top((struct cl_page *)pg); scan; + scan = scan->cp_child) cl_page_header_print(env, cookie, printer, scan); CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print), (const struct lu_env *env, @@ -1480,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) /* * XXX for now. */ - return (loff_t)idx << PAGE_CACHE_SHIFT; + return (loff_t)idx << PAGE_SHIFT; } EXPORT_SYMBOL(cl_offset); @@ -1492,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) /* * XXX for now. */ - return offset >> PAGE_CACHE_SHIFT; + return offset >> PAGE_SHIFT; } EXPORT_SYMBOL(cl_index); int cl_page_size(const struct cl_object *obj) { - return 1 << PAGE_CACHE_SHIFT; + return 1 << PAGE_SHIFT; } EXPORT_SYMBOL(cl_page_size); diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c index 0975e4430..c2cf01596 100644 --- a/drivers/staging/lustre/lustre/obdclass/class_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c @@ -42,7 +42,6 @@ #include "../../include/linux/lnet/lnetctl.h" #include "../include/lustre_debug.h" #include "../include/lprocfs_status.h" -#include "../include/lustre/lustre_build_version.h" #include #include "../include/cl_object.h" #include "llog_internal.h" @@ -52,7 +51,7 @@ EXPORT_SYMBOL(obd_devs); struct list_head obd_types; DEFINE_RWLOCK(obd_dev_lock); -/* The following are visible and mutable through /proc/sys/lustre/. */ +/* The following are visible and mutable through /sys/fs/lustre. */ unsigned int obd_debug_peer_on_timeout; EXPORT_SYMBOL(obd_debug_peer_on_timeout); unsigned int obd_dump_on_timeout; @@ -67,7 +66,7 @@ unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */ EXPORT_SYMBOL(obd_timeout); unsigned int obd_timeout_set; EXPORT_SYMBOL(obd_timeout_set); -/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */ +/* Adaptive timeout defs here instead of ptlrpc module for /sys/fs/ access */ unsigned int at_min; EXPORT_SYMBOL(at_min); unsigned int at_max = 600; @@ -180,7 +179,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) } CDEBUG(D_IOCTL, "cmd = %x\n", cmd); - if (obd_ioctl_getdata(&buf, &len, (void *)arg)) { + if (obd_ioctl_getdata(&buf, &len, (void __user *)arg)) { CERROR("OBD ioctl: data error\n"); return -EINVAL; } @@ -200,8 +199,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) err = -ENOMEM; goto out; } - err = copy_from_user(lcfg, data->ioc_pbuf1, - data->ioc_plen1); + err = copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1); if (!err) err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1); if (!err) @@ -218,16 +216,16 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - if (strlen(BUILD_VERSION) + 1 > data->ioc_inllen1) { + if (strlen(LUSTRE_VERSION_STRING) + 1 > data->ioc_inllen1) { CERROR("ioctl buffer too small to hold version\n"); err = -EINVAL; goto out; } - memcpy(data->ioc_bulk, BUILD_VERSION, - strlen(BUILD_VERSION) + 1); + memcpy(data->ioc_bulk, LUSTRE_VERSION_STRING, + strlen(LUSTRE_VERSION_STRING) + 1); - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); if (err) err = -EFAULT; goto out; @@ -246,7 +244,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - err = obd_ioctl_popdata((void *)arg, data, sizeof(*data)); + err = obd_ioctl_popdata((void __user *)arg, data, + sizeof(*data)); if (err) err = -EFAULT; goto out; @@ -283,7 +282,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1, dev); - err = obd_ioctl_popdata((void *)arg, data, sizeof(*data)); + err = obd_ioctl_popdata((void __user *)arg, data, + sizeof(*data)); if (err) err = -EFAULT; goto out; @@ -330,7 +330,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) (int)index, status, obd->obd_type->typ_name, obd->obd_name, obd->obd_uuid.uuid, atomic_read(&obd->obd_refcount)); - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); err = 0; goto out; @@ -339,7 +339,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) } if (data->ioc_dev == OBD_DEV_BY_DEVNAME) { - if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL) { + if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) { err = -EINVAL; goto out; } @@ -356,7 +356,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - if (obd == NULL) { + if (!obd) { CERROR("OBD ioctl : No Device %d\n", data->ioc_dev); err = -EINVAL; goto out; @@ -388,7 +388,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) if (err) goto out; - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); if (err) err = -EFAULT; goto out; @@ -461,9 +461,9 @@ static int obd_init_checks(void) CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); ret = -EINVAL; } - if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { + if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) { CWARN("mask failed: u64val %llu >= %llu\n", u64val, - (__u64)PAGE_CACHE_SIZE); + (__u64)PAGE_SIZE); ret = -EINVAL; } @@ -473,13 +473,13 @@ static int obd_init_checks(void) extern int class_procfs_init(void); extern int class_procfs_clean(void); -static int __init init_obdclass(void) +static int __init obdclass_init(void) { int i, err; int lustre_register_fs(void); - LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n"); + LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n"); spin_lock_init(&obd_types_lock); obd_zombie_impexp_init(); @@ -507,8 +507,9 @@ static int __init init_obdclass(void) /* Default the dirty page cache cap to 1/2 of system memory. * For clients with less memory, a larger fraction is needed - * for other purposes (mostly for BGL). */ - if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) + * for other purposes (mostly for BGL). + */ + if (totalram_pages <= 512 << (20 - PAGE_SHIFT)) obd_max_dirty_pages = totalram_pages / 4; else obd_max_dirty_pages = totalram_pages / 2; @@ -542,9 +543,7 @@ static int __init init_obdclass(void) return err; } -/* liblustre doesn't call cleanup_obdclass, apparently. we carry on in this - * ifdef to the end of the file to cover module and versioning goo.*/ -static void cleanup_obdclass(void) +static void obdclass_exit(void) { int i; @@ -577,9 +576,9 @@ static void cleanup_obdclass(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Lustre Class Driver"); MODULE_VERSION(LUSTRE_VERSION_STRING); +MODULE_LICENSE("GPL"); -module_init(init_obdclass); -module_exit(cleanup_obdclass); +module_init(obdclass_init); +module_exit(obdclass_exit); diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c index 228c44c37..cf97b8f06 100644 --- a/drivers/staging/lustre/lustre/obdclass/genops.c +++ b/drivers/staging/lustre/lustre/obdclass/genops.c @@ -42,6 +42,7 @@ #define DEBUG_SUBSYSTEM S_CLASS #include "../include/obd_class.h" #include "../include/lprocfs_status.h" +#include "../include/lustre_kernelcomm.h" spinlock_t obd_types_lock; @@ -68,18 +69,17 @@ static struct obd_device *obd_device_alloc(void) { struct obd_device *obd; - obd = kmem_cache_alloc(obd_device_cachep, GFP_NOFS | __GFP_ZERO); - if (obd != NULL) + obd = kmem_cache_zalloc(obd_device_cachep, GFP_NOFS); + if (obd) obd->obd_magic = OBD_DEVICE_MAGIC; return obd; } static void obd_device_free(struct obd_device *obd) { - LASSERT(obd != NULL); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n", obd, obd->obd_magic, OBD_DEVICE_MAGIC); - if (obd->obd_namespace != NULL) { + if (obd->obd_namespace) { CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n", obd, obd->obd_namespace, obd->obd_force); LBUG(); @@ -112,15 +112,6 @@ static struct obd_type *class_get_type(const char *name) if (!type) { const char *modname = name; - if (strcmp(modname, "obdfilter") == 0) - modname = "ofd"; - - if (strcmp(modname, LUSTRE_LWP_NAME) == 0) - modname = LUSTRE_OSP_NAME; - - if (!strncmp(modname, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME))) - modname = LUSTRE_MDT_NAME; - if (!request_module("%s", modname)) { CDEBUG(D_INFO, "Loaded module '%s'\n", modname); type = class_search_type(name); @@ -202,7 +193,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops, goto failed; } - if (ldt != NULL) { + if (ldt) { type->typ_lu = ldt; rc = lu_device_type_init(ldt); if (rc != 0) @@ -364,7 +355,7 @@ void class_release_dev(struct obd_device *obd) obd, obd->obd_magic, OBD_DEVICE_MAGIC); LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n", obd, obd->obd_minor, obd_devs[obd->obd_minor]); - LASSERT(obd_type != NULL); + LASSERT(obd_type); CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n", obd->obd_name, obd->obd_minor, obd->obd_type->typ_name); @@ -390,7 +381,8 @@ int class_name2dev(const char *name) if (obd && strcmp(name, obd->obd_name) == 0) { /* Make sure we finished attaching before we give - out any references */ + * out any references + */ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); if (obd->obd_attached) { read_unlock(&obd_dev_lock); @@ -465,11 +457,12 @@ struct obd_device *class_num2obd(int num) EXPORT_SYMBOL(class_num2obd); /* Search for a client OBD connected to tgt_uuid. If grp_uuid is - specified, then only the client with that uuid is returned, - otherwise any client connected to the tgt is returned. */ + * specified, then only the client with that uuid is returned, + * otherwise any client connected to the tgt is returned. + */ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid) + const char *typ_name, + struct obd_uuid *grp_uuid) { int i; @@ -497,9 +490,10 @@ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, EXPORT_SYMBOL(class_find_client_obd); /* Iterate the obd_device list looking devices have grp_uuid. Start - searching at *next, and if a device is found, the next index to look - at is saved in *next. If next is NULL, then the first matching device - will always be returned. */ + * searching at *next, and if a device is found, the next index to look + * at is saved in *next. If next is NULL, then the first matching device + * will always be returned. + */ struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next) { int i; @@ -588,21 +582,21 @@ int obd_init_caches(void) { LASSERT(!obd_device_cachep); obd_device_cachep = kmem_cache_create("ll_obd_dev_cache", - sizeof(struct obd_device), - 0, 0, NULL); + sizeof(struct obd_device), + 0, 0, NULL); if (!obd_device_cachep) goto out; LASSERT(!obdo_cachep); obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo), - 0, 0, NULL); + 0, 0, NULL); if (!obdo_cachep) goto out; LASSERT(!import_cachep); import_cachep = kmem_cache_create("ll_import_cache", - sizeof(struct obd_import), - 0, 0, NULL); + sizeof(struct obd_import), + 0, 0, NULL); if (!import_cachep) goto out; @@ -658,7 +652,7 @@ static void class_export_destroy(struct obd_export *exp) struct obd_device *obd = exp->exp_obd; LASSERT_ATOMIC_ZERO(&exp->exp_refcount); - LASSERT(obd != NULL); + LASSERT(obd); CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp, exp->exp_client_uuid.uuid, obd->obd_name); @@ -698,7 +692,6 @@ EXPORT_SYMBOL(class_export_get); void class_export_put(struct obd_export *exp) { - LASSERT(exp != NULL); LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON); CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp, atomic_read(&exp->exp_refcount) - 1); @@ -718,7 +711,8 @@ EXPORT_SYMBOL(class_export_put); /* Creates a new export, adds it to the hash table, and returns a * pointer to it. The refcount is 2: one for the hash reference, and - * one for the pointer returned by this function. */ + * one for the pointer returned by this function. + */ struct obd_export *class_new_export(struct obd_device *obd, struct obd_uuid *cluuid) { @@ -834,7 +828,7 @@ EXPORT_SYMBOL(class_unlink_export); static void class_import_destroy(struct obd_import *imp) { CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp, - imp->imp_obd->obd_name); + imp->imp_obd->obd_name); LASSERT_ATOMIC_ZERO(&imp->imp_refcount); @@ -844,7 +838,7 @@ static void class_import_destroy(struct obd_import *imp) struct obd_import_conn *imp_conn; imp_conn = list_entry(imp->imp_conn_list.next, - struct obd_import_conn, oic_item); + struct obd_import_conn, oic_item); list_del_init(&imp_conn->oic_item); ptlrpc_put_connection_superhack(imp_conn->oic_conn); kfree(imp_conn); @@ -901,8 +895,9 @@ static void init_imp_at(struct imp_at *at) at_init(&at->iat_net_latency, 0, 0); for (i = 0; i < IMP_AT_MAX_PORTALS; i++) { /* max service estimates are tracked on the server side, so - don't use the AT history here, just use the last reported - val. (But keep hist for proc histogram, worst_ever) */ + * don't use the AT history here, just use the last reported + * val. (But keep hist for proc histogram, worst_ever) + */ at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT, AT_FLG_NOHIST); } @@ -941,7 +936,8 @@ struct obd_import *class_new_import(struct obd_device *obd) init_imp_at(&imp->imp_at); /* the default magic is V2, will be used in connect RPC, and - * then adjusted according to the flags in request/reply. */ + * then adjusted according to the flags in request/reply. + */ imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2; return imp; @@ -950,7 +946,7 @@ EXPORT_SYMBOL(class_new_import); void class_destroy_import(struct obd_import *import) { - LASSERT(import != NULL); + LASSERT(import); LASSERT(import != LP_POISON); class_handle_unhash(&import->imp_handle); @@ -970,8 +966,7 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock) LASSERT(lock->l_exp_refs_nr >= 0); - if (lock->l_exp_refs_target != NULL && - lock->l_exp_refs_target != exp) { + if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) { LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n", exp, lock, lock->l_exp_refs_target); } @@ -1005,17 +1000,18 @@ EXPORT_SYMBOL(__class_export_del_lock_ref); #endif /* A connection defines an export context in which preallocation can - be managed. This releases the export pointer reference, and returns - the export handle, so the export refcount is 1 when this function - returns. */ + * be managed. This releases the export pointer reference, and returns + * the export handle, so the export refcount is 1 when this function + * returns. + */ int class_connect(struct lustre_handle *conn, struct obd_device *obd, struct obd_uuid *cluuid) { struct obd_export *export; - LASSERT(conn != NULL); - LASSERT(obd != NULL); - LASSERT(cluuid != NULL); + LASSERT(conn); + LASSERT(obd); + LASSERT(cluuid); export = class_new_export(obd, cluuid); if (IS_ERR(export)) @@ -1035,7 +1031,8 @@ EXPORT_SYMBOL(class_connect); * and if disconnect really need * 2 - removing from hash * 3 - in client_unlink_export - * The export pointer passed to this function can destroyed */ + * The export pointer passed to this function can destroyed + */ int class_disconnect(struct obd_export *export) { int already_disconnected; @@ -1052,7 +1049,8 @@ int class_disconnect(struct obd_export *export) /* class_cleanup(), abort_recovery(), and class_fail_export() * all end up in here, and if any of them race we shouldn't - * call extra class_export_puts(). */ + * call extra class_export_puts(). + */ if (already_disconnected) goto no_disconn; @@ -1092,7 +1090,8 @@ void class_fail_export(struct obd_export *exp) /* Most callers into obd_disconnect are removing their own reference * (request, for example) in addition to the one from the hash table. - * We don't have such a reference here, so make one. */ + * We don't have such a reference here, so make one. + */ class_export_get(exp); rc = obd_disconnect(exp); if (rc) @@ -1126,29 +1125,29 @@ static void obd_zombie_impexp_cull(void) import = NULL; if (!list_empty(&obd_zombie_imports)) { import = list_entry(obd_zombie_imports.next, - struct obd_import, - imp_zombie_chain); + struct obd_import, + imp_zombie_chain); list_del_init(&import->imp_zombie_chain); } export = NULL; if (!list_empty(&obd_zombie_exports)) { export = list_entry(obd_zombie_exports.next, - struct obd_export, - exp_obd_chain); + struct obd_export, + exp_obd_chain); list_del_init(&export->exp_obd_chain); } spin_unlock(&obd_zombie_impexp_lock); - if (import != NULL) { + if (import) { class_import_destroy(import); spin_lock(&obd_zombie_impexp_lock); zombies_count--; spin_unlock(&obd_zombie_impexp_lock); } - if (export != NULL) { + if (export) { class_export_destroy(export); spin_lock(&obd_zombie_impexp_lock); zombies_count--; @@ -1156,7 +1155,7 @@ static void obd_zombie_impexp_cull(void) } cond_resched(); - } while (import != NULL || export != NULL); + } while (import || export); } static struct completion obd_zombie_start; diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c new file mode 100644 index 000000000..8405eccda --- /dev/null +++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c @@ -0,0 +1,246 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * Author: Nathan Rutman + * + * Kernel <-> userspace communication routines. + * Using pipes for all arches. + */ + +#define DEBUG_SUBSYSTEM S_CLASS +#define D_KUC D_OTHER + +#include "../include/obd_support.h" +#include "../include/lustre_kernelcomm.h" + +/** + * libcfs_kkuc_msg_put - send an message from kernel to userspace + * @param fp to send the message to + * @param payload Payload data. First field of payload is always + * struct kuc_hdr + */ +int libcfs_kkuc_msg_put(struct file *filp, void *payload) +{ + struct kuc_hdr *kuch = (struct kuc_hdr *)payload; + ssize_t count = kuch->kuc_msglen; + loff_t offset = 0; + mm_segment_t fs; + int rc = -ENXIO; + + if (IS_ERR_OR_NULL(filp)) + return -EBADF; + + if (kuch->kuc_magic != KUC_MAGIC) { + CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic); + return rc; + } + + fs = get_fs(); + set_fs(KERNEL_DS); + while (count > 0) { + rc = vfs_write(filp, (void __force __user *)payload, + count, &offset); + if (rc < 0) + break; + count -= rc; + payload += rc; + rc = 0; + } + set_fs(fs); + + if (rc < 0) + CWARN("message send failed (%d)\n", rc); + else + CDEBUG(D_KUC, "Sent message rc=%d, fp=%p\n", rc, filp); + + return rc; +} +EXPORT_SYMBOL(libcfs_kkuc_msg_put); + +/* + * Broadcast groups are global across all mounted filesystems; + * i.e. registering for a group on 1 fs will get messages for that + * group from any fs + */ +/** A single group registration has a uid and a file pointer */ +struct kkuc_reg { + struct list_head kr_chain; + int kr_uid; + struct file *kr_fp; + char kr_data[0]; +}; + +static struct list_head kkuc_groups[KUC_GRP_MAX + 1] = {}; +/* Protect message sending against remove and adds */ +static DECLARE_RWSEM(kg_sem); + +/** Add a receiver to a broadcast group + * @param filp pipe to write into + * @param uid identifier for this receiver + * @param group group number + * @param data user data + */ +int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, + void *data, size_t data_len) +{ + struct kkuc_reg *reg; + + if (group > KUC_GRP_MAX) { + CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); + return -EINVAL; + } + + /* fput in group_rem */ + if (!filp) + return -EBADF; + + /* freed in group_rem */ + reg = kmalloc(sizeof(*reg) + data_len, 0); + if (!reg) + return -ENOMEM; + + reg->kr_fp = filp; + reg->kr_uid = uid; + memcpy(reg->kr_data, data, data_len); + + down_write(&kg_sem); + if (!kkuc_groups[group].next) + INIT_LIST_HEAD(&kkuc_groups[group]); + list_add(®->kr_chain, &kkuc_groups[group]); + up_write(&kg_sem); + + CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group); + + return 0; +} +EXPORT_SYMBOL(libcfs_kkuc_group_add); + +int libcfs_kkuc_group_rem(int uid, unsigned int group) +{ + struct kkuc_reg *reg, *next; + + if (!kkuc_groups[group].next) + return 0; + + if (!uid) { + /* Broadcast a shutdown message */ + struct kuc_hdr lh; + + lh.kuc_magic = KUC_MAGIC; + lh.kuc_transport = KUC_TRANSPORT_GENERIC; + lh.kuc_msgtype = KUC_MSG_SHUTDOWN; + lh.kuc_msglen = sizeof(lh); + libcfs_kkuc_group_put(group, &lh); + } + + down_write(&kg_sem); + list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { + if (!uid || (uid == reg->kr_uid)) { + list_del(®->kr_chain); + CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", + reg->kr_uid, reg->kr_fp, group); + if (reg->kr_fp) + fput(reg->kr_fp); + kfree(reg); + } + } + up_write(&kg_sem); + + return 0; +} +EXPORT_SYMBOL(libcfs_kkuc_group_rem); + +int libcfs_kkuc_group_put(unsigned int group, void *payload) +{ + struct kkuc_reg *reg; + int rc = 0; + int one_success = 0; + + down_write(&kg_sem); + list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { + if (reg->kr_fp) { + rc = libcfs_kkuc_msg_put(reg->kr_fp, payload); + if (!rc) { + one_success = 1; + } else if (rc == -EPIPE) { + fput(reg->kr_fp); + reg->kr_fp = NULL; + } + } + } + up_write(&kg_sem); + + /* + * don't return an error if the message has been delivered + * at least to one agent + */ + if (one_success) + rc = 0; + + return rc; +} +EXPORT_SYMBOL(libcfs_kkuc_group_put); + +/** + * Calls a callback function for each link of the given kuc group. + * @param group the group to call the function on. + * @param cb_func the function to be called. + * @param cb_arg extra argument to be passed to the callback function. + */ +int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, + void *cb_arg) +{ + struct kkuc_reg *reg; + int rc = 0; + + if (group > KUC_GRP_MAX) { + CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); + return -EINVAL; + } + + /* no link for this group */ + if (!kkuc_groups[group].next) + return 0; + + down_read(&kg_sem); + list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { + if (reg->kr_fp) + rc = cb_func(reg->kr_data, cb_arg); + } + up_read(&kg_sem); + + return rc; +} +EXPORT_SYMBOL(libcfs_kkuc_group_foreach); diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c index a055cbb4f..8eddf206f 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include #include @@ -71,17 +70,16 @@ #include "../../include/obd_class.h" #include "../../include/lprocfs_status.h" #include "../../include/lustre_ver.h" -#include "../../include/lustre/lustre_build_version.h" /* buffer MUST be at least the size of obd_ioctl_hdr */ -int obd_ioctl_getdata(char **buf, int *len, void *arg) +int obd_ioctl_getdata(char **buf, int *len, void __user *arg) { struct obd_ioctl_hdr hdr; struct obd_ioctl_data *data; int err; int offset = 0; - if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) + if (copy_from_user(&hdr, arg, sizeof(hdr))) return -EFAULT; if (hdr.ioc_version != OBD_IOCTL_VERSION) { @@ -104,9 +102,10 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) /* When there are lots of processes calling vmalloc on multi-core * system, the high lock contention will hurt performance badly, * obdfilter-survey is an example, which relies on ioctl. So we'd - * better avoid vmalloc on ioctl path. LU-66 */ + * better avoid vmalloc on ioctl path. LU-66 + */ *buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS); - if (*buf == NULL) { + if (!*buf) { CERROR("Cannot allocate control buffer of len %d\n", hdr.ioc_len); return -EINVAL; @@ -114,7 +113,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) *len = hdr.ioc_len; data = (struct obd_ioctl_data *)*buf; - if (copy_from_user(*buf, (void *)arg, hdr.ioc_len)) { + if (copy_from_user(*buf, arg, hdr.ioc_len)) { err = -EFAULT; goto free_buf; } @@ -144,9 +143,8 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) offset += cfs_size_round(data->ioc_inllen3); } - if (data->ioc_inllen4) { + if (data->ioc_inllen4) data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset; - } return 0; @@ -156,7 +154,7 @@ free_buf: } EXPORT_SYMBOL(obd_ioctl_getdata); -int obd_ioctl_popdata(void *arg, void *data, int len) +int obd_ioctl_popdata(void __user *arg, void *data, int len) { int err; @@ -240,7 +238,7 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr, struct obd_device *obd; obd = class_num2obd(i); - if (obd == NULL || !obd->obd_attached || !obd->obd_set_up) + if (!obd || !obd->obd_attached || !obd->obd_set_up) continue; LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); @@ -250,9 +248,8 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr, class_incref(obd, __func__, current); read_unlock(&obd_dev_lock); - if (obd_health_check(NULL, obd)) { + if (obd_health_check(NULL, obd)) healthy = false; - } class_decref(obd, __func__, current); read_lock(&obd_dev_lock); } @@ -360,7 +357,7 @@ static int obd_device_list_seq_show(struct seq_file *p, void *v) struct obd_device *obd = class_num2obd((int)index); char *status; - if (obd == NULL) + if (!obd) return 0; LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); @@ -424,7 +421,7 @@ int class_procfs_init(void) struct dentry *file; lustre_kobj = kobject_create_and_add("lustre", fs_kobj); - if (lustre_kobj == NULL) + if (!lustre_kobj) goto out; /* Create the files associated with this kobject */ @@ -456,8 +453,7 @@ out: int class_procfs_clean(void) { - if (debugfs_lustre_root != NULL) - debugfs_remove_recursive(debugfs_lustre_root); + debugfs_remove_recursive(debugfs_lustre_root); debugfs_lustre_root = NULL; diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c index 9496c09b2..b41b65e2f 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c @@ -47,7 +47,6 @@ #include "../../include/lustre/lustre_idl.h" #include -#include /* for PAGE_CACHE_SIZE */ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) { @@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) dst->i_blkbits = ffs(src->o_blksize) - 1; - if (dst->i_blkbits < PAGE_CACHE_SHIFT) - dst->i_blkbits = PAGE_CACHE_SHIFT; + if (dst->i_blkbits < PAGE_SHIFT) + dst->i_blkbits = PAGE_SHIFT; /* allocation of space */ if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c index 42fc26f4a..e6bf414a4 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c @@ -62,8 +62,8 @@ struct static_lustre_uintvalue_attr { }; static ssize_t static_uintvalue_show(struct kobject *kobj, - struct attribute *attr, - char *buf) + struct attribute *attr, + char *buf) { struct static_lustre_uintvalue_attr *lattr = (void *)attr; @@ -71,8 +71,8 @@ static ssize_t static_uintvalue_show(struct kobject *kobj, } static ssize_t static_uintvalue_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, size_t count) + struct attribute *attr, + const char *buffer, size_t count) { struct static_lustre_uintvalue_attr *lattr = (void *)attr; int rc; @@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%ul\n", - obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT))); + obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT))); } static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, @@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, if (rc) return rc; - val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */ + val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */ if (val > ((totalram_pages / 10) * 9)) { /* Somebody wants to assign too much memory to dirty pages */ return -EINVAL; } - if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { + if (val < 4 << (20 - PAGE_SHIFT)) { /* Less than 4 Mb for dirty cache is also bad */ return -EINVAL; } diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c index f956d7ed6..992573eae 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog.c +++ b/drivers/staging/lustre/lustre/obdclass/llog.c @@ -76,8 +76,6 @@ static struct llog_handle *llog_alloc_handle(void) */ static void llog_free_handle(struct llog_handle *loghandle) { - LASSERT(loghandle != NULL); - /* failed llog_init_handle */ if (!loghandle->lgh_hdr) goto out; @@ -115,7 +113,7 @@ static int llog_read_header(const struct lu_env *env, if (rc) return rc; - if (lop->lop_read_header == NULL) + if (!lop->lop_read_header) return -EOPNOTSUPP; rc = lop->lop_read_header(env, handle); @@ -144,7 +142,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, struct llog_log_hdr *llh; int rc; - LASSERT(handle->lgh_hdr == NULL); + LASSERT(!handle->lgh_hdr); llh = kzalloc(sizeof(*llh), GFP_NOFS); if (!llh) @@ -228,11 +226,11 @@ static int llog_process_thread(void *arg) return 0; } - if (cd != NULL) { + if (cd) { last_called_index = cd->lpcd_first_idx; index = cd->lpcd_first_idx + 1; } - if (cd != NULL && cd->lpcd_last_idx) + if (cd && cd->lpcd_last_idx) last_index = cd->lpcd_last_idx; else last_index = LLOG_BITMAP_BYTES * 8 - 1; @@ -262,7 +260,8 @@ repeat: /* NB: when rec->lrh_len is accessed it is already swabbed * since it is used at the "end" of the loop and the rec - * swabbing is done at the beginning of the loop. */ + * swabbing is done at the beginning of the loop. + */ for (rec = (struct llog_rec_hdr *)buf; (char *)rec < buf + LLOG_CHUNK_SIZE; rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) { @@ -328,7 +327,7 @@ repeat: } out: - if (cd != NULL) + if (cd) cd->lpcd_last_idx = last_called_index; kfree(buf); @@ -366,27 +365,28 @@ int llog_process_or_fork(const struct lu_env *env, int rc; lpi = kzalloc(sizeof(*lpi), GFP_NOFS); - if (!lpi) { - CERROR("cannot alloc pointer\n"); + if (!lpi) return -ENOMEM; - } lpi->lpi_loghandle = loghandle; lpi->lpi_cb = cb; lpi->lpi_cbdata = data; lpi->lpi_catdata = catdata; if (fork) { + struct task_struct *task; + /* The new thread can't use parent env, - * init the new one in llog_process_thread_daemonize. */ + * init the new one in llog_process_thread_daemonize. + */ lpi->lpi_env = NULL; init_completion(&lpi->lpi_completion); - rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi, - "llog_process_thread")); - if (IS_ERR_VALUE(rc)) { + task = kthread_run(llog_process_thread_daemonize, lpi, + "llog_process_thread"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("%s: cannot start thread: rc = %d\n", loghandle->lgh_ctxt->loc_obd->obd_name, rc); - kfree(lpi); - return rc; + goto out_lpi; } wait_for_completion(&lpi->lpi_completion); } else { @@ -394,6 +394,7 @@ int llog_process_or_fork(const struct lu_env *env, llog_process_thread(lpi); } rc = lpi->lpi_rc; +out_lpi: kfree(lpi); return rc; } @@ -416,13 +417,13 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, LASSERT(ctxt); LASSERT(ctxt->loc_logops); - if (ctxt->loc_logops->lop_open == NULL) { + if (!ctxt->loc_logops->lop_open) { *lgh = NULL; return -EOPNOTSUPP; } *lgh = llog_alloc_handle(); - if (*lgh == NULL) + if (!*lgh) return -ENOMEM; (*lgh)->lgh_ctxt = ctxt; (*lgh)->lgh_logops = ctxt->loc_logops; @@ -449,7 +450,7 @@ int llog_close(const struct lu_env *env, struct llog_handle *loghandle) rc = llog_handle2ops(loghandle, &lop); if (rc) goto out; - if (lop->lop_close == NULL) { + if (!lop->lop_close) { rc = -EOPNOTSUPP; goto out; } diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c index 0f05e9c4a..c27d4ec1d 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c @@ -69,12 +69,12 @@ static int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *loghandle; int rc = 0; - if (cathandle == NULL) + if (!cathandle) return -EBADF; down_write(&cathandle->lgh_lock); list_for_each_entry(loghandle, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + u.phd.phd_entry) { struct llog_logid *cgl = &loghandle->lgh_id; if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) && @@ -130,7 +130,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle) int rc; list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + u.phd.phd_entry) { /* unlink open-not-created llogs */ list_del_init(&loghandle->u.phd.phd_entry); llog_close(env, loghandle); diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c index 9bc51998c..826623f52 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c @@ -88,7 +88,8 @@ int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt) spin_unlock(&obd->obd_dev_lock); /* obd->obd_starting is needed for the case of cleanup - * in error case while obd is starting up. */ + * in error case while obd is starting up. + */ LASSERTF(obd->obd_starting == 1 || obd->obd_stopping == 1 || obd->obd_set_up == 0, "wrong obd state: %d/%d/%d\n", !!obd->obd_starting, @@ -110,11 +111,8 @@ int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt) struct obd_llog_group *olg; int rc, idx; - LASSERT(ctxt != NULL); - LASSERT(ctxt != LP_POISON); - olg = ctxt->loc_olg; - LASSERT(olg != NULL); + LASSERT(olg); LASSERT(olg != LP_POISON); idx = ctxt->loc_idx; @@ -151,7 +149,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd, if (index < 0 || index >= LLOG_MAX_CTXTS) return -EINVAL; - LASSERT(olg != NULL); + LASSERT(olg); ctxt = llog_new_ctxt(obd); if (!ctxt) diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c index 3aa7393b2..967ba2e1b 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c @@ -346,7 +346,6 @@ void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg) __swab32s(&lcfg->lcfg_buflens[i]); print_lustre_cfg(lcfg); - return; } EXPORT_SYMBOL(lustre_swab_lustre_cfg); @@ -387,7 +386,8 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) * * Overwrite fields from the end first, so they are not * clobbered, and use memmove() instead of memcpy() because - * the source and target buffers overlap. bug 16771 */ + * the source and target buffers overlap. bug 16771 + */ createtime = cm32->cm_createtime; canceltime = cm32->cm_canceltime; memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32); @@ -406,7 +406,5 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) __swab64s(&marker->cm_createtime); __swab64s(&marker->cm_canceltime); } - - return; } EXPORT_SYMBOL(lustre_swab_cfg_marker); diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c index 6acc4a10f..13aca5b93 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c @@ -48,14 +48,15 @@ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount) int smp_id; unsigned long flags = 0; - if (stats == NULL) + if (!stats) return; LASSERTF(0 <= idx && idx < stats->ls_num, "idx %d, ls_num %hu\n", idx, stats->ls_num); /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. */ + * single CPU area, so the smp_id should be 0 always. + */ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); if (smp_id < 0) return; @@ -96,14 +97,15 @@ void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount) int smp_id; unsigned long flags = 0; - if (stats == NULL) + if (!stats) return; LASSERTF(0 <= idx && idx < stats->ls_num, "idx %d, ls_num %hu\n", idx, stats->ls_num); /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. */ + * single CPU area, so the smp_id should be 0 always. + */ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); if (smp_id < 0) return; diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c index 51fe15f5d..d93f42fee 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c @@ -109,7 +109,7 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep) __u64 mask = 1; int i, ret = 0; - for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) { + for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { if (flags & mask) ret += snprintf(page + ret, count - ret, "%s%s", ret ? sep : "", obd_connect_names[i]); @@ -149,10 +149,10 @@ int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, } /* * Need to think these cases : - * 1. #echo x.00 > /proc/xxx output result : x - * 2. #echo x.0x > /proc/xxx output result : x.0x - * 3. #echo x.x0 > /proc/xxx output result : x.x - * 4. #echo x.xx > /proc/xxx output result : x.xx + * 1. #echo x.00 > /sys/xxx output result : x + * 2. #echo x.0x > /sys/xxx output result : x.0x + * 3. #echo x.x0 > /sys/xxx output result : x.x + * 4. #echo x.xx > /sys/xxx output result : x.xx * Only reserved 2 bits fraction. */ for (i = 0; i < (5 - prtn); i++) @@ -199,7 +199,7 @@ int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count, if (pbuf == end) return -EINVAL; - if (end != NULL && *end == '.') { + if (end && *end == '.') { int temp_val, pow = 1; int i; @@ -247,7 +247,7 @@ struct dentry *ldebugfs_add_simple(struct dentry *root, struct dentry *entry; umode_t mode = 0; - if (root == NULL || name == NULL || fops == NULL) + if (!root || !name || !fops) return ERR_PTR(-EINVAL); if (fops->read) @@ -256,12 +256,12 @@ struct dentry *ldebugfs_add_simple(struct dentry *root, mode |= 0200; entry = debugfs_create_file(name, mode, root, data, fops); if (IS_ERR_OR_NULL(entry)) { - CERROR("LprocFS: No memory to create entry %s", name); + CERROR("LprocFS: No memory to create entry %s\n", name); return entry ?: ERR_PTR(-ENOMEM); } return entry; } -EXPORT_SYMBOL(ldebugfs_add_simple); +EXPORT_SYMBOL_GPL(ldebugfs_add_simple); static struct file_operations lprocfs_generic_fops = { }; @@ -272,7 +272,7 @@ int ldebugfs_add_vars(struct dentry *parent, if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list)) return -EINVAL; - while (list->name != NULL) { + while (list->name) { struct dentry *entry; umode_t mode = 0; @@ -294,14 +294,14 @@ int ldebugfs_add_vars(struct dentry *parent, } return 0; } -EXPORT_SYMBOL(ldebugfs_add_vars); +EXPORT_SYMBOL_GPL(ldebugfs_add_vars); void ldebugfs_remove(struct dentry **entryp) { debugfs_remove_recursive(*entryp); *entryp = NULL; } -EXPORT_SYMBOL(ldebugfs_remove); +EXPORT_SYMBOL_GPL(ldebugfs_remove); struct dentry *ldebugfs_register(const char *name, struct dentry *parent, @@ -327,7 +327,7 @@ struct dentry *ldebugfs_register(const char *name, out: return entry; } -EXPORT_SYMBOL(ldebugfs_register); +EXPORT_SYMBOL_GPL(ldebugfs_register); /* Generic callbacks */ int lprocfs_rd_uint(struct seq_file *m, void *data) @@ -491,7 +491,7 @@ int lprocfs_rd_server_uuid(struct seq_file *m, void *data) char *imp_state_name = NULL; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -514,7 +514,7 @@ int lprocfs_rd_conn_uuid(struct seq_file *m, void *data) struct ptlrpc_connection *conn; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) @@ -543,7 +543,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, memset(cnt, 0, sizeof(*cnt)); - if (stats == NULL) { + if (!stats) { /* set count to 1 to avoid divide-by-zero errs in callers */ cnt->lc_count = 1; return; @@ -554,7 +554,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_entry; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; percpu_cntr = lprocfs_stats_counter_get(stats, i, idx); @@ -577,7 +577,7 @@ EXPORT_SYMBOL(lprocfs_stats_collect); #define flag2str(flag, first) \ do { \ if (imp->imp_##flag) \ - seq_printf(m, "%s" #flag, first ? "" : ", "); \ + seq_printf(m, "%s" #flag, first ? "" : ", "); \ } while (0) static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m) { @@ -604,16 +604,16 @@ static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep int i; bool first = true; - for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) { + for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { if (flags & mask) { seq_printf(m, "%s%s", - first ? sep : "", obd_connect_names[i]); + first ? sep : "", obd_connect_names[i]); first = false; } } if (flags & ~(mask - 1)) seq_printf(m, "%sunknown flags %#llx", - first ? sep : "", flags & ~(mask - 1)); + first ? sep : "", flags & ~(mask - 1)); } int lprocfs_rd_import(struct seq_file *m, void *data) @@ -629,7 +629,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data) int rw = 0; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -637,26 +637,27 @@ int lprocfs_rd_import(struct seq_file *m, void *data) imp = obd->u.cli.cl_import; seq_printf(m, - "import:\n" - " name: %s\n" - " target: %s\n" - " state: %s\n" - " instance: %u\n" - " connect_flags: [", - obd->obd_name, - obd2cli_tgt(obd), - ptlrpc_import_state_name(imp->imp_state), - imp->imp_connect_data.ocd_instance); - obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, ", "); + "import:\n" + " name: %s\n" + " target: %s\n" + " state: %s\n" + " instance: %u\n" + " connect_flags: [ ", + obd->obd_name, + obd2cli_tgt(obd), + ptlrpc_import_state_name(imp->imp_state), + imp->imp_connect_data.ocd_instance); + obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, + ", "); seq_printf(m, - "]\n" - " import_flags: ["); + " ]\n" + " import_flags: [ "); obd_import_flags2str(imp, m); seq_printf(m, - "]\n" - " connection:\n" - " failover_nids: ["); + " ]\n" + " connection:\n" + " failover_nids: [ "); spin_lock(&imp->imp_lock); j = 0; list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { @@ -665,24 +666,24 @@ int lprocfs_rd_import(struct seq_file *m, void *data) seq_printf(m, "%s%s", j ? ", " : "", nidstr); j++; } - if (imp->imp_connection != NULL) + if (imp->imp_connection) libcfs_nid2str_r(imp->imp_connection->c_peer.nid, nidstr, sizeof(nidstr)); else strncpy(nidstr, "", sizeof(nidstr)); seq_printf(m, - "]\n" - " current_connection: %s\n" - " connection_attempts: %u\n" - " generation: %u\n" - " in-progress_invalidations: %u\n", - nidstr, - imp->imp_conn_cnt, - imp->imp_generation, - atomic_read(&imp->imp_inval_count)); + " ]\n" + " current_connection: %s\n" + " connection_attempts: %u\n" + " generation: %u\n" + " in-progress_invalidations: %u\n", + nidstr, + imp->imp_conn_cnt, + imp->imp_generation, + atomic_read(&imp->imp_inval_count)); spin_unlock(&imp->imp_lock); - if (obd->obd_svc_stats == NULL) + if (!obd->obd_svc_stats) goto out_climp; header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR]; @@ -696,15 +697,15 @@ int lprocfs_rd_import(struct seq_file *m, void *data) } else ret.lc_sum = 0; seq_printf(m, - " rpcs:\n" - " inflight: %u\n" - " unregistering: %u\n" - " timeouts: %u\n" - " avg_waittime: %llu %s\n", - atomic_read(&imp->imp_inflight), - atomic_read(&imp->imp_unregistering), - atomic_read(&imp->imp_timeouts), - ret.lc_sum, header->lc_units); + " rpcs:\n" + " inflight: %u\n" + " unregistering: %u\n" + " timeouts: %u\n" + " avg_waittime: %llu %s\n", + atomic_read(&imp->imp_inflight), + atomic_read(&imp->imp_unregistering), + atomic_read(&imp->imp_timeouts), + ret.lc_sum, header->lc_units); k = 0; for (j = 0; j < IMP_AT_MAX_PORTALS; j++) { @@ -714,20 +715,20 @@ int lprocfs_rd_import(struct seq_file *m, void *data) at_get(&imp->imp_at.iat_service_estimate[j])); } seq_printf(m, - " service_estimates:\n" - " services: %u sec\n" - " network: %u sec\n", - k, - at_get(&imp->imp_at.iat_net_latency)); + " service_estimates:\n" + " services: %u sec\n" + " network: %u sec\n", + k, + at_get(&imp->imp_at.iat_net_latency)); seq_printf(m, - " transactions:\n" - " last_replay: %llu\n" - " peer_committed: %llu\n" - " last_checked: %llu\n", - imp->imp_last_replay_transno, - imp->imp_peer_committed_transno, - imp->imp_last_transno_checked); + " transactions:\n" + " last_replay: %llu\n" + " peer_committed: %llu\n" + " last_checked: %llu\n", + imp->imp_last_replay_transno, + imp->imp_peer_committed_transno, + imp->imp_last_transno_checked); /* avg data rates */ for (rw = 0; rw <= 1; rw++) { @@ -741,10 +742,10 @@ int lprocfs_rd_import(struct seq_file *m, void *data) do_div(sum, ret.lc_count); ret.lc_sum = sum; seq_printf(m, - " %s_data_averages:\n" - " bytes_per_rpc: %llu\n", - rw ? "write" : "read", - ret.lc_sum); + " %s_data_averages:\n" + " bytes_per_rpc: %llu\n", + rw ? "write" : "read", + ret.lc_sum); } k = (int)ret.lc_sum; j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES; @@ -757,13 +758,13 @@ int lprocfs_rd_import(struct seq_file *m, void *data) do_div(sum, ret.lc_count); ret.lc_sum = sum; seq_printf(m, - " %s_per_rpc: %llu\n", - header->lc_units, ret.lc_sum); + " %s_per_rpc: %llu\n", + header->lc_units, ret.lc_sum); j = (int)ret.lc_sum; if (j > 0) seq_printf(m, - " MB_per_sec: %u.%.02u\n", - k / j, (100 * k / j) % 100); + " MB_per_sec: %u.%.02u\n", + k / j, (100 * k / j) % 100); } } @@ -779,7 +780,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) struct obd_import *imp; int j, k, rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -787,7 +788,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) imp = obd->u.cli.cl_import; seq_printf(m, "current_state: %s\n", - ptlrpc_import_state_name(imp->imp_state)); + ptlrpc_import_state_name(imp->imp_state)); seq_printf(m, "state_history:\n"); k = imp->imp_state_hist_idx; for (j = 0; j < IMP_STATE_HIST_LEN; j++) { @@ -795,7 +796,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN]; if (ish->ish_state == 0) continue; - seq_printf(m, " - [%lld, %s]\n", (s64)ish->ish_time, + seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time, ptlrpc_import_state_name(ish->ish_state)); } @@ -825,7 +826,7 @@ int lprocfs_rd_timeouts(struct seq_file *m, void *data) struct dhms ts; int i, rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -942,7 +943,7 @@ int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, return rc; } -EXPORT_SYMBOL(lprocfs_obd_setup); +EXPORT_SYMBOL_GPL(lprocfs_obd_setup); int lprocfs_obd_cleanup(struct obd_device *obd) { @@ -957,7 +958,7 @@ int lprocfs_obd_cleanup(struct obd_device *obd) return 0; } -EXPORT_SYMBOL(lprocfs_obd_cleanup); +EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup); int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) { @@ -967,12 +968,12 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) unsigned long flags = 0; int i; - LASSERT(stats->ls_percpu[cpuid] == NULL); + LASSERT(!stats->ls_percpu[cpuid]); LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0); percpusize = lprocfs_stats_counter_size(stats); LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize); - if (stats->ls_percpu[cpuid] != NULL) { + if (stats->ls_percpu[cpuid]) { rc = 0; if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) { if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) @@ -1017,7 +1018,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, /* alloc percpu pointers for all possible cpu slots */ LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry])); - if (stats == NULL) + if (!stats) return NULL; stats->ls_num = num; @@ -1027,14 +1028,14 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, /* alloc num of counter headers */ LIBCFS_ALLOC(stats->ls_cnt_header, stats->ls_num * sizeof(struct lprocfs_counter_header)); - if (stats->ls_cnt_header == NULL) + if (!stats->ls_cnt_header) goto fail; if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) { /* contains only one set counters */ percpusize = lprocfs_stats_counter_size(stats); LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize); - if (stats->ls_percpu[0] == NULL) + if (!stats->ls_percpu[0]) goto fail; stats->ls_biggest_alloc_num = 1; } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) { @@ -1059,7 +1060,7 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh) unsigned int percpusize; unsigned int i; - if (stats == NULL || stats->ls_num == 0) + if (!stats || stats->ls_num == 0) return; *statsh = NULL; @@ -1070,9 +1071,9 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh) percpusize = lprocfs_stats_counter_size(stats); for (i = 0; i < num_entry; i++) - if (stats->ls_percpu[i] != NULL) + if (stats->ls_percpu[i]) LIBCFS_FREE(stats->ls_percpu[i], percpusize); - if (stats->ls_cnt_header != NULL) + if (stats->ls_cnt_header) LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num * sizeof(struct lprocfs_counter_header)); LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry])); @@ -1090,7 +1091,7 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats) num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_entry; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; for (j = 0; j < stats->ls_num; j++) { percpu_cntr = lprocfs_stats_counter_get(stats, i, j); @@ -1196,7 +1197,7 @@ static int lprocfs_stats_seq_open(struct inode *inode, struct file *file) return 0; } -struct file_operations lprocfs_stats_seq_fops = { +static const struct file_operations lprocfs_stats_seq_fops = { .owner = THIS_MODULE, .open = lprocfs_stats_seq_open, .read = seq_read, @@ -1206,7 +1207,7 @@ struct file_operations lprocfs_stats_seq_fops = { }; int ldebugfs_register_stats(struct dentry *parent, const char *name, - struct lprocfs_stats *stats) + struct lprocfs_stats *stats) { struct dentry *entry; @@ -1219,7 +1220,7 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name, return 0; } -EXPORT_SYMBOL(ldebugfs_register_stats); +EXPORT_SYMBOL_GPL(ldebugfs_register_stats); void lprocfs_counter_init(struct lprocfs_stats *stats, int index, unsigned conf, const char *name, const char *units) @@ -1230,10 +1231,8 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index, unsigned int i; unsigned int num_cpu; - LASSERT(stats != NULL); - header = &stats->ls_cnt_header[index]; - LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n", + LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n", index, name, units); header->lc_config = conf; @@ -1242,7 +1241,7 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index, num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_cpu; ++i) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; percpu_cntr = lprocfs_stats_counter_get(stats, i, index); percpu_cntr->lc_count = 0; @@ -1270,7 +1269,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc, { __s64 ret = 0; - if (lc == NULL || header == NULL) + if (!lc || !header) return 0; switch (field) { @@ -1319,8 +1318,8 @@ int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, } EXPORT_SYMBOL(lprocfs_write_u64_helper); -int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count, - __u64 *val, int mult) +int lprocfs_write_frac_u64_helper(const char __user *buffer, + unsigned long count, __u64 *val, int mult) { char kernbuf[22], *end, *pbuf; __u64 whole, frac = 0, units; @@ -1360,17 +1359,19 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count, } units = 1; - switch (tolower(*end)) { - case 'p': - units <<= 10; - case 't': - units <<= 10; - case 'g': - units <<= 10; - case 'm': - units <<= 10; - case 'k': - units <<= 10; + if (end) { + switch (tolower(*end)) { + case 'p': + units <<= 10; + case 't': + units <<= 10; + case 'g': + units <<= 10; + case 'm': + units <<= 10; + case 'k': + units <<= 10; + } } /* Specified units override the multiplier */ if (units > 1) @@ -1412,7 +1413,7 @@ char *lprocfs_find_named_value(const char *buffer, const char *name, /* there is no strnstr() in rhel5 and ubuntu kernels */ val = lprocfs_strnstr(buffer, name, buflen); - if (val == NULL) + if (!val) return (char *)buffer; val += strlen(name); /* skip prefix */ @@ -1429,11 +1430,9 @@ char *lprocfs_find_named_value(const char *buffer, const char *name, } EXPORT_SYMBOL(lprocfs_find_named_value); -int ldebugfs_seq_create(struct dentry *parent, - const char *name, - umode_t mode, - const struct file_operations *seq_fops, - void *data) +int ldebugfs_seq_create(struct dentry *parent, const char *name, + umode_t mode, const struct file_operations *seq_fops, + void *data) { struct dentry *entry; @@ -1446,7 +1445,7 @@ int ldebugfs_seq_create(struct dentry *parent, return 0; } -EXPORT_SYMBOL(ldebugfs_seq_create); +EXPORT_SYMBOL_GPL(ldebugfs_seq_create); int ldebugfs_obd_seq_create(struct obd_device *dev, const char *name, @@ -1457,7 +1456,7 @@ int ldebugfs_obd_seq_create(struct obd_device *dev, return ldebugfs_seq_create(dev->obd_debugfs_entry, name, mode, seq_fops, data); } -EXPORT_SYMBOL(ldebugfs_obd_seq_create); +EXPORT_SYMBOL_GPL(ldebugfs_obd_seq_create); void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value) { diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index ce248f407..978568ada 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c @@ -86,13 +86,12 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) */ fid = lu_object_fid(o); if (fid_is_zero(fid)) { - LASSERT(top->loh_hash.next == NULL - && top->loh_hash.pprev == NULL); + LASSERT(!top->loh_hash.next && !top->loh_hash.pprev); LASSERT(list_empty(&top->loh_lru)); if (!atomic_dec_and_test(&top->loh_ref)) return; list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) + if (o->lo_ops->loo_object_release) o->lo_ops->loo_object_release(env, o); } lu_object_free(env, orig); @@ -119,7 +118,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) * layers, and notify them that object is no longer busy. */ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) + if (o->lo_ops->loo_object_release) o->lo_ops->loo_object_release(env, o); } @@ -135,7 +134,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) } /* - * If object is dying (will not be cached), removed it + * If object is dying (will not be cached), then removed it * from hash table and LRU. * * This is done with hash table and LRU lists locked. As the only @@ -210,7 +209,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, * lu_object_header. */ top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); - if (top == NULL) + if (!top) return ERR_PTR(-ENOMEM); if (IS_ERR(top)) return top; @@ -245,7 +244,7 @@ next: } while (!clean); list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_start != NULL) { + if (scan->lo_ops->loo_object_start) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { lu_object_free(env, top); @@ -276,7 +275,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * First call ->loo_object_delete() method to release all resources. */ list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_delete != NULL) + if (scan->lo_ops->loo_object_delete) scan->lo_ops->loo_object_delete(env, scan); } @@ -296,7 +295,6 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) */ o = container_of0(splice.prev, struct lu_object, lo_linkage); list_del_init(&o->lo_linkage); - LASSERT(o->lo_ops->loo_object_free != NULL); o->lo_ops->loo_object_free(env, o); } @@ -451,7 +449,6 @@ int lu_cdebug_printer(const struct lu_env *env, va_start(args, format); key = lu_context_key_get(&env->le_ctx, &lu_global_key); - LASSERT(key != NULL); used = strlen(key->lck_area); complete = format[strlen(format) - 1] == '\n'; @@ -462,7 +459,7 @@ int lu_cdebug_printer(const struct lu_env *env, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) - libcfs_debug_msg(msgdata, "%s", key->lck_area); + libcfs_debug_msg(msgdata, "%s\n", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -508,7 +505,7 @@ void lu_object_print(const struct lu_env *env, void *cookie, (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, o->lo_dev->ld_type->ldt_name, o); - if (o->lo_ops->loo_object_print != NULL) + if (o->lo_ops->loo_object_print) (*o->lo_ops->loo_object_print)(env, cookie, printer, o); (*printer)(env, cookie, "\n"); @@ -535,9 +532,10 @@ static struct lu_object *htable_lookup(struct lu_site *s, *version = ver; bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); /* cfs_hash_bd_peek_locked is a somehow "internal" function - * of cfs_hash, it doesn't add refcount on object. */ + * of cfs_hash, it doesn't add refcount on object. + */ hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); - if (hnode == NULL) { + if (!hnode) { lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); return ERR_PTR(-ENOENT); } @@ -636,7 +634,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, * If dying object is found during index search, add @waiter to the * site wait-queue and return ERR_PTR(-EAGAIN). */ - if (conf != NULL && conf->loc_flags & LOC_F_NEW) + if (conf && conf->loc_flags & LOC_F_NEW) return lu_object_new(env, dev, f, conf); s = dev->ld_site; @@ -715,7 +713,7 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env, top = lu_object_find(env, dev, f, conf); if (!IS_ERR(top)) { obj = lu_object_locate(top->lo_header, dev->ld_type); - if (obj == NULL) + if (!obj) lu_object_put(env, top); } else obj = top; @@ -842,8 +840,8 @@ static int lu_htable_order(void) #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) - cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_SHIFT)) + cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -855,7 +853,7 @@ static int lu_htable_order(void) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (PAGE_CACHE_SIZE / 1024); + (PAGE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; @@ -966,11 +964,11 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) CFS_HASH_NO_ITEMREF | CFS_HASH_DEPTH | CFS_HASH_ASSERT_EMPTY); - if (s->ls_obj_hash != NULL) + if (s->ls_obj_hash) break; } - if (s->ls_obj_hash == NULL) { + if (!s->ls_obj_hash) { CERROR("failed to create lu_site hash with bits: %d\n", bits); return -ENOMEM; } @@ -982,7 +980,7 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) } s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); - if (s->ls_stats == NULL) { + if (!s->ls_stats) { cfs_hash_putref(s->ls_obj_hash); s->ls_obj_hash = NULL; return -ENOMEM; @@ -1031,19 +1029,19 @@ void lu_site_fini(struct lu_site *s) list_del_init(&s->ls_linkage); mutex_unlock(&lu_sites_guard); - if (s->ls_obj_hash != NULL) { + if (s->ls_obj_hash) { cfs_hash_putref(s->ls_obj_hash); s->ls_obj_hash = NULL; } - if (s->ls_top_dev != NULL) { + if (s->ls_top_dev) { s->ls_top_dev->ld_site = NULL; lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); lu_device_put(s->ls_top_dev); s->ls_top_dev = NULL; } - if (s->ls_stats != NULL) + if (s->ls_stats) lprocfs_free_stats(&s->ls_stats); } EXPORT_SYMBOL(lu_site_fini); @@ -1088,7 +1086,7 @@ EXPORT_SYMBOL(lu_device_put); */ int lu_device_init(struct lu_device *d, struct lu_device_type *t) { - if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL) + if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start) t->ldt_ops->ldto_start(t); memset(d, 0, sizeof(*d)); atomic_set(&d->ld_ref, 0); @@ -1107,7 +1105,7 @@ void lu_device_fini(struct lu_device *d) struct lu_device_type *t; t = d->ld_type; - if (d->ld_obd != NULL) { + if (d->ld_obd) { d->ld_obd->obd_lu_dev = NULL; d->ld_obd = NULL; } @@ -1116,7 +1114,7 @@ void lu_device_fini(struct lu_device *d) LASSERTF(atomic_read(&d->ld_ref) == 0, "Refcount is %u\n", atomic_read(&d->ld_ref)); LASSERT(t->ldt_device_nr > 0); - if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL) + if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop) t->ldt_ops->ldto_stop(t); } EXPORT_SYMBOL(lu_device_fini); @@ -1148,7 +1146,7 @@ void lu_object_fini(struct lu_object *o) LASSERT(list_empty(&o->lo_linkage)); - if (dev != NULL) { + if (dev) { lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, "lu_object", o); lu_device_put(dev); @@ -1239,7 +1237,7 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) struct lu_device *next; lu_site_purge(env, site, ~0); - for (scan = top; scan != NULL; scan = next) { + for (scan = top; scan; scan = next) { next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan); lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init); lu_device_put(scan); @@ -1248,13 +1246,13 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) /* purge again. */ lu_site_purge(env, site, ~0); - for (scan = top; scan != NULL; scan = next) { + for (scan = top; scan; scan = next) { const struct lu_device_type *ldt = scan->ld_type; struct obd_type *type; next = ldt->ldt_ops->ldto_device_free(env, scan); type = ldt->ldt_obd_type; - if (type != NULL) { + if (type) { type->typ_refcnt--; class_put_type(type); } @@ -1289,14 +1287,14 @@ int lu_context_key_register(struct lu_context_key *key) int result; int i; - LASSERT(key->lct_init != NULL); - LASSERT(key->lct_fini != NULL); + LASSERT(key->lct_init); + LASSERT(key->lct_fini); LASSERT(key->lct_tags != 0); result = -ENFILE; spin_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (lu_keys[i] == NULL) { + if (!lu_keys[i]) { key->lct_index = i; atomic_set(&key->lct_used, 1); lu_keys[i] = key; @@ -1313,12 +1311,10 @@ EXPORT_SYMBOL(lu_context_key_register); static void key_fini(struct lu_context *ctx, int index) { - if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) { + if (ctx->lc_value && ctx->lc_value[index]) { struct lu_context_key *key; key = lu_keys[index]; - LASSERT(key != NULL); - LASSERT(key->lct_fini != NULL); LASSERT(atomic_read(&key->lct_used) > 1); key->lct_fini(ctx, key, ctx->lc_value[index]); @@ -1376,7 +1372,7 @@ int lu_context_key_register_many(struct lu_context_key *k, ...) if (result) break; key = va_arg(args, struct lu_context_key *); - } while (key != NULL); + } while (key); va_end(args); if (result != 0) { @@ -1404,7 +1400,7 @@ void lu_context_key_degister_many(struct lu_context_key *k, ...) do { lu_context_key_degister(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_degister_many); @@ -1420,7 +1416,7 @@ void lu_context_key_revive_many(struct lu_context_key *k, ...) do { lu_context_key_revive(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_revive_many); @@ -1436,7 +1432,7 @@ void lu_context_key_quiesce_many(struct lu_context_key *k, ...) do { lu_context_key_quiesce(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_quiesce_many); @@ -1477,8 +1473,7 @@ void lu_context_key_quiesce(struct lu_context_key *key) * XXX memory barrier has to go here. */ spin_lock(&lu_keys_guard); - list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) + list_for_each_entry(ctx, &lu_context_remembered, lc_remember) key_fini(ctx, key->lct_index); spin_unlock(&lu_keys_guard); ++key_set_version; @@ -1497,7 +1492,7 @@ static void keys_fini(struct lu_context *ctx) { int i; - if (ctx->lc_value == NULL) + if (!ctx->lc_value) return; for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) @@ -1511,12 +1506,12 @@ static int keys_fill(struct lu_context *ctx) { int i; - LINVRNT(ctx->lc_value != NULL); + LINVRNT(ctx->lc_value); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; key = lu_keys[i]; - if (ctx->lc_value[i] == NULL && key != NULL && + if (!ctx->lc_value[i] && key && (key->lct_tags & ctx->lc_tags) && /* * Don't create values for a LCT_QUIESCENT key, as this @@ -1525,7 +1520,7 @@ static int keys_fill(struct lu_context *ctx) !(key->lct_tags & LCT_QUIESCENT)) { void *value; - LINVRNT(key->lct_init != NULL); + LINVRNT(key->lct_init); LINVRNT(key->lct_index == i); value = key->lct_init(ctx, key); @@ -1542,7 +1537,7 @@ static int keys_fill(struct lu_context *ctx) * value. */ ctx->lc_value[i] = value; - if (key->lct_exit != NULL) + if (key->lct_exit) ctx->lc_tags |= LCT_HAS_EXIT; } ctx->lc_version = key_set_version; @@ -1554,7 +1549,7 @@ static int keys_init(struct lu_context *ctx) { ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]), GFP_NOFS); - if (likely(ctx->lc_value != NULL)) + if (likely(ctx->lc_value)) return keys_fill(ctx); return -ENOMEM; @@ -1626,14 +1621,13 @@ void lu_context_exit(struct lu_context *ctx) LINVRNT(ctx->lc_state == LCS_ENTERED); ctx->lc_state = LCS_LEFT; - if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) { + if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) { for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (ctx->lc_value[i] != NULL) { + if (ctx->lc_value[i]) { struct lu_context_key *key; key = lu_keys[i]; - LASSERT(key != NULL); - if (key->lct_exit != NULL) + if (key->lct_exit) key->lct_exit(ctx, key, ctx->lc_value[i]); } @@ -1688,7 +1682,7 @@ int lu_env_refill(struct lu_env *env) int result; result = lu_context_refill(&env->le_ctx); - if (result == 0 && env->le_ses != NULL) + if (result == 0 && env->le_ses) result = lu_context_refill(env->le_ses); return result; } @@ -1922,11 +1916,11 @@ int lu_kmem_init(struct lu_kmem_descr *caches) int result; struct lu_kmem_descr *iter = caches; - for (result = 0; iter->ckd_cache != NULL; ++iter) { + for (result = 0; iter->ckd_cache; ++iter) { *iter->ckd_cache = kmem_cache_create(iter->ckd_name, iter->ckd_size, 0, 0, NULL); - if (*iter->ckd_cache == NULL) { + if (!*iter->ckd_cache) { result = -ENOMEM; /* free all previously allocated caches */ lu_kmem_fini(caches); @@ -1943,7 +1937,7 @@ EXPORT_SYMBOL(lu_kmem_init); */ void lu_kmem_fini(struct lu_kmem_descr *caches) { - for (; caches->ckd_cache != NULL; ++caches) { + for (; caches->ckd_cache; ++caches) { kmem_cache_destroy(*caches->ckd_cache); *caches->ckd_cache = NULL; } diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c index fb9147cc6..403ceea06 100644 --- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c +++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c @@ -65,7 +65,7 @@ void class_handle_hash(struct portals_handle *h, { struct handle_bucket *bucket; - LASSERT(h != NULL); + LASSERT(h); LASSERT(list_empty(&h->h_link)); /* @@ -140,10 +140,11 @@ void *class_handle2object(__u64 cookie) struct portals_handle *h; void *retval = NULL; - LASSERT(handle_hash != NULL); + LASSERT(handle_hash); /* Be careful when you want to change this code. See the - * rcu_read_lock() definition on top this file. - jxiong */ + * rcu_read_lock() definition on top this file. - jxiong + */ bucket = handle_hash + (cookie & HANDLE_HASH_MASK); rcu_read_lock(); @@ -170,7 +171,7 @@ void class_handle_free_cb(struct rcu_head *rcu) struct portals_handle *h = RCU2HANDLE(rcu); void *ptr = (void *)(unsigned long)h->h_cookie; - if (h->h_ops->hop_free != NULL) + if (h->h_ops->hop_free) h->h_ops->hop_free(ptr, h->h_size); else kfree(ptr); @@ -183,11 +184,11 @@ int class_handle_init(void) struct timespec64 ts; int seed[2]; - LASSERT(handle_hash == NULL); + LASSERT(!handle_hash); handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE, GFP_NOFS); - if (handle_hash == NULL) + if (!handle_hash) return -ENOMEM; spin_lock_init(&handle_base_lock); @@ -234,7 +235,7 @@ void class_handle_cleanup(void) { int count; - LASSERT(handle_hash != NULL); + LASSERT(handle_hash); count = cleanup_all_handles(); diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c index d6184f821..5f812460b 100644 --- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c +++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c @@ -93,7 +93,8 @@ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index) EXPORT_SYMBOL(lustre_uuid_to_peer); /* Add a nid to a niduuid. Multiple nids can be added to a single uuid; - LNET will choose the best one. */ + * LNET will choose the best one. + */ int class_add_uuid(const char *uuid, __u64 nid) { struct uuid_nid_data *data, *entry; @@ -149,9 +150,10 @@ int class_del_uuid(const char *uuid) { LIST_HEAD(deathrow); struct uuid_nid_data *data; + struct uuid_nid_data *temp; spin_lock(&g_uuid_lock); - if (uuid != NULL) { + if (uuid) { struct obd_uuid tmp; obd_str2uuid(&tmp, uuid); @@ -165,14 +167,12 @@ int class_del_uuid(const char *uuid) list_splice_init(&g_uuid_list, &deathrow); spin_unlock(&g_uuid_lock); - if (uuid != NULL && list_empty(&deathrow)) { + if (uuid && list_empty(&deathrow)) { CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid); return -EINVAL; } - while (!list_empty(&deathrow)) { - data = list_entry(deathrow.next, struct uuid_nid_data, - un_list); + list_for_each_entry_safe(data, temp, &deathrow, un_list) { list_del(&data->un_list); CDEBUG(D_INFO, "del uuid %s %s/%d\n", diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c index 49cdc6479..5395e994d 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_config.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c @@ -71,8 +71,9 @@ int class_find_param(char *buf, char *key, char **valp) EXPORT_SYMBOL(class_find_param); /* returns 0 if this is the first key in the buffer, else 1. - valp points to first char after key. */ -static int class_match_param(char *buf, char *key, char **valp) + * valp points to first char after key. + */ +static int class_match_param(char *buf, const char *key, char **valp) { if (!buf) return 1; @@ -114,9 +115,10 @@ enum { }; /* 0 is good nid, - 1 not found - < 0 error - endh is set to next separator */ + * 1 not found + * < 0 error + * endh is set to next separator + */ static int class_parse_value(char *buf, int opc, void *value, char **endh, int quiet) { @@ -210,7 +212,7 @@ static int class_attach(struct lustre_cfg *lcfg) name, typename, rc); goto out; } - LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n", + LASSERTF(obd, "Cannot get obd device %s of type %s\n", name, typename); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08X != %08X\n", @@ -230,7 +232,8 @@ static int class_attach(struct lustre_cfg *lcfg) mutex_init(&obd->obd_dev_mutex); spin_lock_init(&obd->obd_osfs_lock); /* obd->obd_osfs_age must be set to a value in the distant - * past to guarantee a fresh statfs is fetched on mount. */ + * past to guarantee a fresh statfs is fetched on mount. + */ obd->obd_osfs_age = cfs_time_shift_64(-1000); /* XXX belongs in setup not attach */ @@ -272,9 +275,9 @@ static int class_attach(struct lustre_cfg *lcfg) obd->obd_minor, typename, atomic_read(&obd->obd_refcount)); return 0; out: - if (obd != NULL) { + if (obd) class_release_dev(obd); - } + return rc; } @@ -286,7 +289,7 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) int err = 0; struct obd_export *exp; - LASSERT(obd != NULL); + LASSERT(obd); LASSERTF(obd == class_num2obd(obd->obd_minor), "obd %p != obd_devs[%d] %p\n", obd, obd->obd_minor, class_num2obd(obd->obd_minor)); @@ -315,7 +318,8 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) return -EEXIST; } /* just leave this on forever. I can't use obd_set_up here because - other fns check that status, and we're not actually set up yet. */ + * other fns check that status, and we're not actually set up yet. + */ obd->obd_starting = 1; obd->obd_uuid_hash = NULL; spin_unlock(&obd->obd_dev_lock); @@ -503,7 +507,8 @@ void class_decref(struct obd_device *obd, const char *scope, const void *source) if ((refs == 1) && obd->obd_stopping) { /* All exports have been destroyed; there should - be no more in-progress ops by this point.*/ + * be no more in-progress ops by this point. + */ spin_lock(&obd->obd_self_export->exp_lock); obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd); @@ -723,7 +728,8 @@ static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg) } /* We can't call ll_process_config or lquota_process_config directly because - * it lives in a module that must be loaded after this one. */ + * it lives in a module that must be loaded after this one. + */ static int (*client_process_config)(struct lustre_cfg *lcfg); static int (*quota_process_config)(struct lustre_cfg *lcfg); @@ -812,7 +818,8 @@ int class_process_config(struct lustre_cfg *lcfg) lustre_cfg_string(lcfg, 2), lustre_cfg_string(lcfg, 3)); /* set these mount options somewhere, so ll_fill_super - * can find them. */ + * can find them. + */ err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1), lustre_cfg_string(lcfg, 1), LUSTRE_CFG_BUFLEN(lcfg, 2), @@ -988,8 +995,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, fakefile.private_data = &fake_seqfile; fake_seqfile.private = data; /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt - or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar - or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */ + * or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar + * or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 + */ for (i = 1; i < lcfg->lcfg_bufcount; i++) { key = lustre_cfg_buf(lcfg, i); /* Strip off prefix */ @@ -1008,7 +1016,7 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, /* Search proc entries */ while (lvars[j].name) { var = &lvars[j]; - if (class_match_param(key, (char *)var->name, NULL) == 0 + if (!class_match_param(key, var->name, NULL) && keylen == strlen(var->name)) { matched++; rc = -EROFS; @@ -1027,9 +1035,10 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, } if (!matched) { /* If the prefix doesn't match, return error so we - can pass it down the stack */ + * can pass it down the stack + */ if (strnchr(key, keylen, '.')) - return -ENOSYS; + return -ENOSYS; CERROR("%s: unknown param %s\n", (char *)lustre_cfg_string(lcfg, 0), key); /* rc = -EINVAL; continue parsing other params */ @@ -1040,9 +1049,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, rc = 0; } else { CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n", - lustre_cfg_string(lcfg, 0), - (int)strlen(prefix) - 1, prefix, - (int)(sval - key - 1), key, sval); + lustre_cfg_string(lcfg, 0), + (int)strlen(prefix) - 1, prefix, + (int)(sval - key - 1), key, sval); } } @@ -1116,7 +1125,8 @@ int class_config_llog_handler(const struct lu_env *env, } } /* A config command without a start marker before it is - illegal (post 146) */ + * illegal (post 146) + */ if (!(clli->cfg_flags & CFG_F_COMPAT146) && !(clli->cfg_flags & CFG_F_MARKER) && (lcfg->lcfg_command != LCFG_MARKER)) { @@ -1182,8 +1192,9 @@ int class_config_llog_handler(const struct lu_env *env, } /* we override the llog's uuid for clients, to insure they - are unique */ - if (clli && clli->cfg_instance != NULL && + * are unique + */ + if (clli && clli->cfg_instance && lcfg->lcfg_command == LCFG_ATTACH) { lustre_cfg_bufs_set_string(&bufs, 2, clli->cfg_uuid.uuid); @@ -1211,7 +1222,8 @@ int class_config_llog_handler(const struct lu_env *env, lcfg_new->lcfg_flags = lcfg->lcfg_flags; /* XXX Hack to try to remain binary compatible with - * pre-newconfig logs */ + * pre-newconfig logs + */ if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */ (lcfg->lcfg_nid >> 32) == 0) { __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff); @@ -1270,7 +1282,7 @@ int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, if (cfg) { cd.lpcd_first_idx = cfg->cfg_last_idx; callback = cfg->cfg_callback; - LASSERT(callback != NULL); + LASSERT(callback); } else { callback = class_config_llog_handler; } diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c index b5aa8168d..d3e28a389 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c @@ -72,7 +72,7 @@ static void (*kill_super_cb)(struct super_block *sb); * this log, and is added to the mgc's list of logs to follow. */ int lustre_process_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) + struct config_llog_instance *cfg) { struct lustre_cfg *lcfg; struct lustre_cfg_bufs *bufs; @@ -114,7 +114,7 @@ EXPORT_SYMBOL(lustre_process_log); /* Stop watching this config log for updates */ int lustre_end_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) + struct config_llog_instance *cfg) { struct lustre_cfg *lcfg; struct lustre_cfg_bufs bufs; @@ -283,9 +283,10 @@ int lustre_start_mgc(struct super_block *sb) recov_bk = 0; /* Try all connections, but only once (again). - We don't want to block another target from starting - (using its local copy of the log), but we do want to connect - if at all possible. */ + * We don't want to block another target from starting + * (using its local copy of the log), but we do want to connect + * if at all possible. + */ recov_bk++; CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname, recov_bk); @@ -339,7 +340,7 @@ int lustre_start_mgc(struct super_block *sb) /* Add any failover MGS nids */ i = 1; while (ptr && ((*ptr == ':' || - class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) { + class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) { /* New failover node */ sprintf(niduuid, "%s_%x", mgcname, i); j = 0; @@ -375,7 +376,8 @@ int lustre_start_mgc(struct super_block *sb) goto out_free; /* Keep a refcount of servers/clients who started with "mount", - so we know when we can get rid of the mgc. */ + * so we know when we can get rid of the mgc. + */ atomic_set(&obd->u.cli.cl_mgc_refcount, 1); /* We connect to the MGS at setup, and don't disconnect until cleanup */ @@ -403,7 +405,8 @@ int lustre_start_mgc(struct super_block *sb) out: /* Keep the mgc info in the sb. Note that many lsi's can point - to the same mgc.*/ + * to the same mgc. + */ lsi->lsi_mgc = obd; out_free: mutex_unlock(&mgc_start_lock); @@ -432,7 +435,8 @@ static int lustre_stop_mgc(struct super_block *sb) LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0); if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) { /* This is not fatal, every client that stops - will call in here. */ + * will call in here. + */ CDEBUG(D_MOUNT, "mgc still has %d references.\n", atomic_read(&obd->u.cli.cl_mgc_refcount)); rc = -EBUSY; @@ -440,19 +444,20 @@ static int lustre_stop_mgc(struct super_block *sb) } /* The MGC has no recoverable data in any case. - * force shutdown set in umount_begin */ + * force shutdown set in umount_begin + */ obd->obd_no_recov = 1; if (obd->u.cli.cl_mgc_mgsexp) { /* An error is not fatal, if we are unable to send the - disconnect mgs ping evictor cleans up the export */ + * disconnect mgs ping evictor cleans up the export + */ rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp); if (rc) CDEBUG(D_MOUNT, "disconnect failed %d\n", rc); } - /* Save the obdname for cleaning the nid uuids, which are - obdname_XX */ + /* Save the obdname for cleaning the nid uuids, which are obdname_XX */ len = strlen(obd->obd_name) + 6; niduuid = kzalloc(len, GFP_NOFS); if (niduuid) { @@ -518,13 +523,12 @@ static int lustre_free_lsi(struct super_block *sb) { struct lustre_sb_info *lsi = s2lsi(sb); - LASSERT(lsi != NULL); CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi); /* someone didn't call server_put_mount. */ LASSERT(atomic_read(&lsi->lsi_mounts) == 0); - if (lsi->lsi_lmd != NULL) { + if (lsi->lsi_lmd) { kfree(lsi->lsi_lmd->lmd_dev); kfree(lsi->lsi_lmd->lmd_profile); kfree(lsi->lsi_lmd->lmd_mgssec); @@ -538,7 +542,7 @@ static int lustre_free_lsi(struct super_block *sb) kfree(lsi->lsi_lmd); } - LASSERT(lsi->lsi_llsbi == NULL); + LASSERT(!lsi->lsi_llsbi); kfree(lsi); s2lsi_nocast(sb) = NULL; @@ -546,13 +550,12 @@ static int lustre_free_lsi(struct super_block *sb) } /* The lsi has one reference for every server that is using the disk - - e.g. MDT, MGS, and potentially MGC */ + * e.g. MDT, MGS, and potentially MGC + */ static int lustre_put_lsi(struct super_block *sb) { struct lustre_sb_info *lsi = s2lsi(sb); - LASSERT(lsi != NULL); - CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts)); if (atomic_dec_and_test(&lsi->lsi_mounts)) { lustre_free_lsi(sb); @@ -588,21 +591,22 @@ static int server_name2fsname(const char *svname, char *fsname, if (dash == svname) return -EINVAL; - if (fsname != NULL) { + if (fsname) { strncpy(fsname, svname, dash - svname); fsname[dash - svname] = '\0'; } - if (endptr != NULL) + if (endptr) *endptr = dash; return 0; } /* Get the index from the obd name. - rc = server type, or - rc < 0 on error - if endptr isn't NULL it is set to end of name */ + * rc = server type, or + * rc < 0 on error + * if endptr isn't NULL it is set to end of name + */ static int server_name2index(const char *svname, __u32 *idx, const char **endptr) { @@ -627,18 +631,18 @@ static int server_name2index(const char *svname, __u32 *idx, dash += 3; if (strncmp(dash, "all", 3) == 0) { - if (endptr != NULL) + if (endptr) *endptr = dash + 3; return rc | LDD_F_SV_ALL; } index = simple_strtoul(dash, (char **)endptr, 16); - if (idx != NULL) + if (idx) *idx = index; /* Account for -mdc after index that is possible when specifying mdt */ - if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1, - sizeof(LUSTRE_MDC_NAME)-1) == 0) + if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1, + sizeof(LUSTRE_MDC_NAME) - 1) == 0) *endptr += sizeof(LUSTRE_MDC_NAME); return rc; @@ -661,7 +665,8 @@ int lustre_common_put_super(struct super_block *sb) return rc; } /* BUSY just means that there's some other obd that - needs the mgc. Let him clean it up. */ + * needs the mgc. Let him clean it up. + */ CDEBUG(D_MOUNT, "MGC still in use\n"); } /* Drop a ref to the mounted disk */ @@ -731,8 +736,9 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr) int rc = 0, devmax; /* The shortest an ost name can be is 8 chars: -OST0000. - We don't actually know the fsname at this time, so in fact - a user could specify any fsname. */ + * We don't actually know the fsname at this time, so in fact + * a user could specify any fsname. + */ devmax = strlen(ptr) / 8 + 1; /* temp storage until we figure out how many we have */ @@ -756,7 +762,8 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr) (uint)(s2-s1), s1, rc); s1 = s2; /* now we are pointing at ':' (next exclude) - or ',' (end of excludes) */ + * or ',' (end of excludes) + */ if (lmd->lmd_exclude_count >= devmax) break; } @@ -788,7 +795,7 @@ static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr) lmd->lmd_mgssec = NULL; tail = strchr(ptr, ','); - if (tail == NULL) + if (!tail) length = strlen(ptr); else length = tail - ptr; @@ -807,14 +814,14 @@ static int lmd_parse_string(char **handle, char *ptr) char *tail; int length; - if ((handle == NULL) || (ptr == NULL)) + if (!handle || !ptr) return -EINVAL; kfree(*handle); *handle = NULL; tail = strchr(ptr, ','); - if (tail == NULL) + if (!tail) length = strlen(ptr); else length = tail - ptr; @@ -847,14 +854,14 @@ static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr) return -EINVAL; } - if (lmd->lmd_mgs != NULL) + if (lmd->lmd_mgs) oldlen = strlen(lmd->lmd_mgs) + 1; mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS); if (!mgsnid) return -ENOMEM; - if (lmd->lmd_mgs != NULL) { + if (lmd->lmd_mgs) { /* Multiple mgsnid= are taken to mean failover locations */ memcpy(mgsnid, lmd->lmd_mgs, oldlen); mgsnid[oldlen - 1] = ':'; @@ -909,10 +916,12 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) s1++; /* Client options are parsed in ll_options: eg. flock, - user_xattr, acl */ + * user_xattr, acl + */ /* Parse non-ldiskfs options here. Rather than modifying - ldiskfs, we just zero these out here */ + * ldiskfs, we just zero these out here + */ if (strncmp(s1, "abort_recov", 11) == 0) { lmd->lmd_flags |= LMD_FLG_ABORT_RECOV; clear++; @@ -940,7 +949,8 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) sizeof(PARAM_MGSNODE) - 1) == 0) { s2 = s1 + sizeof(PARAM_MGSNODE) - 1; /* Assume the next mount opt is the first - invalid nid we get to. */ + * invalid nid we get to. + */ rc = lmd_parse_mgs(lmd, &s2); if (rc) goto invalid; @@ -981,7 +991,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) size_t length, params_length; char *tail = strchr(s1 + 6, ','); - if (tail == NULL) + if (!tail) length = strlen(s1); else length = tail - s1; @@ -1000,18 +1010,20 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) clear++; } /* Linux 2.4 doesn't pass the device, so we stuck it at the - end of the options. */ + * end of the options. + */ else if (strncmp(s1, "device=", 7) == 0) { devname = s1 + 7; /* terminate options right before device. device - must be the last one. */ + * must be the last one. + */ *s1 = '\0'; break; } /* Find next opt */ s2 = strchr(s1, ','); - if (s2 == NULL) { + if (!s2) { if (clear) *s1 = '\0'; break; @@ -1113,9 +1125,9 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent) if (lmd_is_client(lmd)) { CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile); - if (client_fill_super == NULL) + if (!client_fill_super) request_module("lustre"); - if (client_fill_super == NULL) { + if (!client_fill_super) { LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n"); lustre_put_lsi(sb); rc = -ENODEV; @@ -1136,7 +1148,8 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent) } /* If error happens in fill_super() call, @lsi will be killed there. - * This is why we do not put it here. */ + * This is why we do not put it here. + */ goto out; out: if (rc) { @@ -1151,7 +1164,8 @@ out: } /* We can't call ll_fill_super by name because it lives in a module that - must be loaded after this one. */ + * must be loaded after this one. + */ void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb, struct vfsmount *mnt)) { @@ -1166,8 +1180,8 @@ void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb)) EXPORT_SYMBOL(lustre_register_kill_super_cb); /***************** FS registration ******************/ -struct dentry *lustre_mount(struct file_system_type *fs_type, int flags, - const char *devname, void *data) +static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags, + const char *devname, void *data) { struct lustre_mount_data2 lmd2 = { .lmd2_data = data, diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c index 75e1deadd..e6436cb4a 100644 --- a/drivers/staging/lustre/lustre/obdclass/obdo.c +++ b/drivers/staging/lustre/lustre/obdclass/obdo.c @@ -55,7 +55,8 @@ void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent) EXPORT_SYMBOL(obdo_set_parent_fid); /* WARNING: the file systems must take care not to tinker with - attributes they don't manage (such as blocks). */ + * attributes they don't manage (such as blocks). + */ void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid) { u32 newvalid = 0; @@ -122,7 +123,8 @@ void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj) ostid_set_seq_mdt0(&ioobj->ioo_oid); /* Since 2.4 this does not contain o_mode in the low 16 bits. - * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs */ + * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs + */ ioobj->ioo_max_brw = 0; } EXPORT_SYMBOL(obdo_to_ioobj); diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c index 7b53f7dd1..1e83669c2 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c @@ -60,7 +60,6 @@ struct echo_device { struct cl_site ed_site_myself; struct cl_site *ed_site; struct lu_device *ed_next; - int ed_next_islov; }; struct echo_object { @@ -147,7 +146,7 @@ static inline struct echo_thread_info *echo_env_info(const struct lu_env *env) struct echo_thread_info *info; info = lu_context_key_get(&env->le_ctx, &echo_thread_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -162,9 +161,6 @@ struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c) static struct echo_object *cl_echo_object_find(struct echo_device *d, struct lov_stripe_md **lsm); static int cl_echo_object_put(struct echo_object *eco); -static int cl_echo_enqueue(struct echo_object *eco, u64 start, - u64 end, int mode, __u64 *cookie); -static int cl_echo_cancel(struct echo_device *d, __u64 cookie); static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, struct page **pages, int npages, int async); @@ -224,7 +220,7 @@ static struct lu_kmem_descr echo_caches[] = { * @{ */ static struct page *echo_page_vmpage(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { return cl2echo_page(slice)->ep_vmpage; } @@ -271,7 +267,7 @@ static void echo_page_completion(const struct lu_env *env, const struct cl_page_slice *slice, int ioret) { - LASSERT(slice->cpl_page->cp_sync_io != NULL); + LASSERT(slice->cpl_page->cp_sync_io); } static void echo_page_fini(const struct lu_env *env, @@ -282,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env, struct page *vmpage = ep->ep_vmpage; atomic_dec(&eco->eo_npages); - page_cache_release(vmpage); + put_page(vmpage); } static int echo_page_prep(const struct lu_env *env, @@ -371,13 +367,13 @@ static struct cl_lock_operations echo_lock_ops = { * @{ */ static int echo_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct echo_page *ep = cl_object_page_slice(obj, page); struct echo_object *eco = cl2echo_obj(obj); ep->ep_vmpage = vmpage; - page_cache_get(vmpage); + get_page(vmpage); mutex_init(&ep->ep_lock); cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops); atomic_inc(&eco->eo_npages); @@ -396,14 +392,14 @@ static int echo_lock_init(const struct lu_env *env, { struct echo_lock *el; - el = kmem_cache_alloc(echo_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (el != NULL) { + el = kmem_cache_zalloc(echo_lock_kmem, GFP_NOFS); + if (el) { cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops); el->el_object = cl2echo_obj(obj); INIT_LIST_HEAD(&el->el_chain); atomic_set(&el->el_refcount, 0); } - return el == NULL ? -ENOMEM : 0; + return !el ? -ENOMEM : 0; } static int echo_conf_set(const struct lu_env *env, struct cl_object *obj, @@ -443,7 +439,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj, under = ed->ed_next; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below == NULL) + if (!below) return -ENOMEM; lu_object_add(obj, below); } @@ -474,12 +470,12 @@ static int echo_alloc_memmd(struct echo_device *ed, int lsm_size; /* If export is lov/osc then use their obd method */ - if (ed->ed_next != NULL) + if (ed->ed_next) return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp); /* OFD has no unpackmd method, do everything here */ lsm_size = lov_stripe_md_size(1); - LASSERT(*lsmp == NULL); + LASSERT(!*lsmp); *lsmp = kzalloc(lsm_size, GFP_NOFS); if (!*lsmp) return -ENOMEM; @@ -502,12 +498,11 @@ static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp) int lsm_size; /* If export is lov/osc then use their obd method */ - if (ed->ed_next != NULL) + if (ed->ed_next) return obd_free_memmd(ed->ed_ec->ec_exp, lsmp); /* OFD has no unpackmd method, do everything here */ lsm_size = lov_stripe_md_size(1); - LASSERT(*lsmp != NULL); kfree((*lsmp)->lsm_oinfo[0]); kfree(*lsmp); *lsmp = NULL; @@ -534,7 +529,7 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj) } static int echo_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) + lu_printer_t p, const struct lu_object *o) { struct echo_object *obj = cl2echo_obj(lu2cl(o)); @@ -566,9 +561,9 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env, struct lu_object *obj = NULL; /* we're the top dev. */ - LASSERT(hdr == NULL); - eco = kmem_cache_alloc(echo_object_kmem, GFP_NOFS | __GFP_ZERO); - if (eco != NULL) { + LASSERT(!hdr); + eco = kmem_cache_zalloc(echo_object_kmem, GFP_NOFS); + if (eco) { struct cl_object_header *hdr = &eco->eo_hdr; obj = &echo_obj2cl(eco)->co_lu; @@ -582,13 +577,13 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env, return obj; } -static struct lu_device_operations echo_device_lu_ops = { +static const struct lu_device_operations echo_device_lu_ops = { .ldo_object_alloc = echo_object_alloc, }; /** @} echo_lu_dev_ops */ -static struct cl_device_operations echo_device_cl_ops = { +static const struct cl_device_operations echo_device_cl_ops = { }; /** \defgroup echo_init Setup and teardown @@ -626,18 +621,18 @@ static void echo_site_fini(const struct lu_env *env, struct echo_device *ed) } static void *echo_thread_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { struct echo_thread_info *info; - info = kmem_cache_alloc(echo_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(echo_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } static void echo_thread_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { struct echo_thread_info *info = data; @@ -645,7 +640,7 @@ static void echo_thread_key_fini(const struct lu_context *ctx, } static void echo_thread_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { } @@ -657,18 +652,18 @@ static struct lu_context_key echo_thread_key = { }; static void *echo_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { struct echo_session_info *session; - session = kmem_cache_alloc(echo_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(echo_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } static void echo_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { struct echo_session_info *session = data; @@ -676,7 +671,7 @@ static void echo_session_key_fini(const struct lu_context *ctx, } static void echo_session_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { } @@ -719,13 +714,13 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, cleanup = 2; obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); - LASSERT(env != NULL); + LASSERT(obd); + LASSERT(env); tgt = class_name2obd(lustre_cfg_string(cfg, 1)); - if (tgt == NULL) { + if (!tgt) { CERROR("Can not find tgt device %s\n", - lustre_cfg_string(cfg, 1)); + lustre_cfg_string(cfg, 1)); rc = -ENODEV; goto out; } @@ -751,14 +746,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, cleanup = 4; /* if echo client is to be stacked upon ost device, the next is - * NULL since ost is not a clio device so far */ - if (next != NULL && !lu_device_is_cl(next)) + * NULL since ost is not a clio device so far + */ + if (next && !lu_device_is_cl(next)) next = NULL; tgt_type_name = tgt->obd_type->typ_name; - if (next != NULL) { - LASSERT(next != NULL); - if (next->ld_site != NULL) { + if (next) { + if (next->ld_site) { rc = -EBUSY; goto out; } @@ -770,14 +765,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, if (rc) goto out; - /* Tricky case, I have to determine the obd type since - * CLIO uses the different parameters to initialize - * objects for lov & osc. */ - if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0) - ed->ed_next_islov = 1; - else - LASSERT(strcmp(tgt_type_name, - LUSTRE_OSC_NAME) == 0); } else { LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0); } @@ -809,7 +796,7 @@ out: } static int echo_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) + const char *name, struct lu_device *next) { LBUG(); return 0; @@ -963,20 +950,11 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d, info = echo_env_info(env); conf = &info->eti_conf; if (d->ed_next) { - if (!d->ed_next_islov) { - struct lov_oinfo *oinfo = lsm->lsm_oinfo[0]; - - LASSERT(oinfo != NULL); - oinfo->loi_oi = lsm->lsm_oi; - conf->eoc_cl.u.coc_oinfo = oinfo; - } else { - struct lustre_md *md; + struct lov_oinfo *oinfo = lsm->lsm_oinfo[0]; - md = &info->eti_md; - memset(md, 0, sizeof(*md)); - md->lsm = lsm; - conf->eoc_cl.u.coc_md = md; - } + LASSERT(oinfo); + oinfo->loi_oi = lsm->lsm_oi; + conf->eoc_cl.u.coc_oinfo = oinfo; } conf->eoc_md = lsmp; @@ -988,7 +966,8 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d, } /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() */ + * lu_obj_hop_keycmp() + */ /* coverity[overrun-buffer-val] */ obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl); if (IS_ERR(obj)) { @@ -1076,36 +1055,6 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco, return rc; } -static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end, - int mode, __u64 *cookie) -{ - struct echo_thread_info *info; - struct lu_env *env; - struct cl_io *io; - int refcheck; - int result; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - info = echo_env_info(env); - io = &info->eti_io; - - io->ci_ignore_layout = 1; - result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco)); - if (result < 0) - goto out; - LASSERT(result == 0); - - result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0); - cl_io_fini(env, io); - -out: - cl_env_put(env, &refcheck); - return result; -} - static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, __u64 cookie) { @@ -1114,7 +1063,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, struct list_head *el; int found = 0, still_used = 0; - LASSERT(ec != NULL); spin_lock(&ec->ec_lock); list_for_each(el, &ec->ec_locks) { ecl = list_entry(el, struct echo_lock, el_chain); @@ -1137,22 +1085,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, return 0; } -static int cl_echo_cancel(struct echo_device *ed, __u64 cookie) -{ - struct lu_env *env; - int refcheck; - int rc; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - rc = cl_echo_cancel0(env, ed, cookie); - - cl_env_put(env, &refcheck); - return rc; -} - static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io, enum cl_req_type unused, struct cl_2queue *queue) { @@ -1188,7 +1120,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, int i; LASSERT((offset & ~CFS_PAGE_MASK) == 0); - LASSERT(ed->ed_next != NULL); + LASSERT(ed->ed_next); env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); @@ -1206,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, LASSERT(rc == 0); rc = cl_echo_enqueue0(env, eco, offset, - offset + npages * PAGE_CACHE_SIZE - 1, + offset + npages * PAGE_SIZE - 1, rw == READ ? LCK_PR : LCK_PW, &lh.cookie, CEF_NEVER); if (rc < 0) @@ -1234,7 +1166,8 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, cl_page_list_add(&queue->c2_qin, clp); /* drop the reference count for cl_page_find, so that the page - * will be freed in cl_2queue_fini. */ + * will be freed in cl_2queue_fini. + */ cl_page_put(env, clp); cl_page_clip(env, clp, 0, page_size); @@ -1268,61 +1201,8 @@ out: static u64 last_object_id; -static int -echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) -{ - struct lov_stripe_md *ulsm = _ulsm; - struct lov_oinfo **p; - int nob, i; - - nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]); - if (nob > ulsm_nob) - return -EINVAL; - - if (copy_to_user(ulsm, lsm, sizeof(*ulsm))) - return -EFAULT; - - for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { - struct lov_oinfo __user *up; - if (get_user(up, ulsm->lsm_oinfo + i) || - copy_to_user(up, *p, sizeof(struct lov_oinfo))) - return -EFAULT; - } - return 0; -} - -static int -echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, - struct lov_stripe_md __user *ulsm, int ulsm_nob) -{ - struct echo_client_obd *ec = ed->ed_ec; - struct lov_oinfo **p; - int i; - - if (ulsm_nob < sizeof(*lsm)) - return -EINVAL; - - if (copy_from_user(lsm, ulsm, sizeof(*lsm))) - return -EFAULT; - - if (lsm->lsm_stripe_count > ec->ec_nstripes || - lsm->lsm_magic != LOV_MAGIC || - (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 || - ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL)) - return -EINVAL; - - for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { - struct lov_oinfo __user *up; - if (get_user(up, ulsm->lsm_oinfo + i) || - copy_from_user(*p, up, sizeof(struct lov_oinfo))) - return -EFAULT; - } - return 0; -} - static int echo_create_object(const struct lu_env *env, struct echo_device *ed, - int on_target, struct obdo *oa, void *ulsm, - int ulsm_nob, struct obd_trans_info *oti) + struct obdo *oa, struct obd_trans_info *oti) { struct echo_object *eco; struct echo_client_obd *ec = ed->ed_ec; @@ -1330,10 +1210,10 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, int rc; int created = 0; - if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */ - (on_target || /* set_stripe */ - ec->ec_nstripes != 0)) { /* LOV */ - CERROR("No valid oid\n"); + if (!(oa->o_valid & OBD_MD_FLID) || + !(oa->o_valid & OBD_MD_FLGROUP) || + !fid_seq_is_echo(ostid_seq(&oa->o_oi))) { + CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi)); return -EINVAL; } @@ -1343,52 +1223,18 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, goto failed; } - if (ulsm != NULL) { - int i, idx; - - rc = echo_copyin_lsm(ed, lsm, ulsm, ulsm_nob); - if (rc != 0) - goto failed; - - if (lsm->lsm_stripe_count == 0) - lsm->lsm_stripe_count = ec->ec_nstripes; - - if (lsm->lsm_stripe_size == 0) - lsm->lsm_stripe_size = PAGE_CACHE_SIZE; - - idx = cfs_rand(); - - /* setup stripes: indices + default ids if required */ - for (i = 0; i < lsm->lsm_stripe_count; i++) { - if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0) - lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi; - - lsm->lsm_oinfo[i]->loi_ost_idx = - (idx + i) % ec->ec_nstripes; - } - } - - /* setup object ID here for !on_target and LOV hint */ - if (oa->o_valid & OBD_MD_FLID) { - LASSERT(oa->o_valid & OBD_MD_FLGROUP); - lsm->lsm_oi = oa->o_oi; - } + /* setup object ID here */ + lsm->lsm_oi = oa->o_oi; if (ostid_id(&lsm->lsm_oi) == 0) ostid_set_id(&lsm->lsm_oi, ++last_object_id); - rc = 0; - if (on_target) { - /* Only echo objects are allowed to be created */ - LASSERT((oa->o_valid & OBD_MD_FLGROUP) && - (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO)); - rc = obd_create(env, ec->ec_exp, oa, &lsm, oti); - if (rc != 0) { - CERROR("Cannot create objects: rc = %d\n", rc); - goto failed; - } - created = 1; + rc = obd_create(env, ec->ec_exp, oa, &lsm, oti); + if (rc != 0) { + CERROR("Cannot create objects: rc = %d\n", rc); + goto failed; } + created = 1; /* See what object ID we were given */ oa->o_oi = lsm->lsm_oi; @@ -1447,42 +1293,16 @@ static int echo_get_object(struct echo_object **ecop, struct echo_device *ed, static void echo_put_object(struct echo_object *eco) { - if (cl_echo_object_put(eco)) - CERROR("echo client: drop an object failed"); -} - -static void -echo_get_stripe_off_id(struct lov_stripe_md *lsm, u64 *offp, u64 *idp) -{ - unsigned long stripe_count; - unsigned long stripe_size; - unsigned long width; - unsigned long woffset; - int stripe_index; - u64 offset; - - if (lsm->lsm_stripe_count <= 1) - return; - - offset = *offp; - stripe_size = lsm->lsm_stripe_size; - stripe_count = lsm->lsm_stripe_count; - - /* width = # bytes in all stripes */ - width = stripe_size * stripe_count; - - /* woffset = offset within a width; offset = whole number of widths */ - woffset = do_div(offset, width); - - stripe_index = woffset / stripe_size; + int rc; - *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi); - *offp = offset * stripe_size + woffset % stripe_size; + rc = cl_echo_object_put(eco); + if (rc) + CERROR("%s: echo client drop an object failed: rc = %d\n", + eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc); } static void -echo_client_page_debug_setup(struct lov_stripe_md *lsm, - struct page *page, int rw, u64 id, +echo_client_page_debug_setup(struct page *page, int rw, u64 id, u64 offset, u64 count) { char *addr; @@ -1491,15 +1311,14 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, int delta; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { if (rw == OBD_BRW_WRITE) { stripe_off = offset + delta; stripe_id = id; - echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id); } else { stripe_off = 0xdeadbeef00c0ffeeULL; stripe_id = 0xdeadbeef00c0ffeeULL; @@ -1511,8 +1330,7 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, kunmap(page); } -static int echo_client_page_debug_check(struct lov_stripe_md *lsm, - struct page *page, u64 id, +static int echo_client_page_debug_check(struct page *page, u64 id, u64 offset, u64 count) { u64 stripe_off; @@ -1523,14 +1341,13 @@ static int echo_client_page_debug_check(struct lov_stripe_md *lsm, int rc2; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { stripe_off = offset + delta; stripe_id = id; - echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id); rc2 = block_debug_check("test_brw", addr + delta, OBD_ECHO_BLOCK_SIZE, @@ -1550,7 +1367,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, u64 count, int async, struct obd_trans_info *oti) { - struct lov_stripe_md *lsm = eco->eo_lsm; u32 npages; struct brw_page *pga; struct brw_page *pgp; @@ -1569,53 +1385,51 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER; LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ); - LASSERT(lsm != NULL); - LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi)); if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) return -EINVAL; /* XXX think again with misaligned I/O */ - npages = count >> PAGE_CACHE_SHIFT; + npages = count >> PAGE_SHIFT; if (rw == OBD_BRW_WRITE) brw_flags = OBD_BRW_ASYNC; pga = kcalloc(npages, sizeof(*pga), GFP_NOFS); - if (pga == NULL) + if (!pga) return -ENOMEM; pages = kcalloc(npages, sizeof(*pages), GFP_NOFS); - if (pages == NULL) { + if (!pages) { kfree(pga); return -ENOMEM; } for (i = 0, pgp = pga, off = offset; i < npages; - i++, pgp++, off += PAGE_CACHE_SIZE) { + i++, pgp++, off += PAGE_SIZE) { - LASSERT(pgp->pg == NULL); /* for cleanup */ + LASSERT(!pgp->pg); /* for cleanup */ rc = -ENOMEM; pgp->pg = alloc_page(gfp_mask); - if (pgp->pg == NULL) + if (!pgp->pg) goto out; pages[i] = pgp->pg; - pgp->count = PAGE_CACHE_SIZE; + pgp->count = PAGE_SIZE; pgp->off = off; pgp->flag = brw_flags; if (verify) - echo_client_page_debug_setup(lsm, pgp->pg, rw, + echo_client_page_debug_setup(pgp->pg, rw, ostid_id(&oa->o_oi), off, pgp->count); } /* brw mode can only be used at client */ - LASSERT(ed->ed_next != NULL); + LASSERT(ed->ed_next); rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); out: @@ -1623,13 +1437,13 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, verify = 0; for (i = 0, pgp = pga; i < npages; i++, pgp++) { - if (pgp->pg == NULL) + if (!pgp->pg) continue; if (verify) { int vrc; - vrc = echo_client_page_debug_check(lsm, pgp->pg, + vrc = echo_client_page_debug_check(pgp->pg, ostid_id(&oa->o_oi), pgp->off, pgp->count); if (vrc != 0 && rc == 0) @@ -1649,7 +1463,6 @@ static int echo_client_prep_commit(const struct lu_env *env, u64 batch, struct obd_trans_info *oti, int async) { - struct lov_stripe_md *lsm = eco->eo_lsm; struct obd_ioobj ioo; struct niobuf_local *lnb; struct niobuf_remote *rnb; @@ -1657,17 +1470,16 @@ static int echo_client_prep_commit(const struct lu_env *env, u64 npages, tot_pages; int i, ret = 0, brw_flags = 0; - if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 || - (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi))) + if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) return -EINVAL; - npages = batch >> PAGE_CACHE_SHIFT; - tot_pages = count >> PAGE_CACHE_SHIFT; + npages = batch >> PAGE_SHIFT; + tot_pages = count >> PAGE_SHIFT; lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); - if (lnb == NULL || rnb == NULL) { + if (!lnb || !rnb) { ret = -ENOMEM; goto out; } @@ -1685,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env, if (tot_pages < npages) npages = tot_pages; - for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { + for (i = 0; i < npages; i++, off += PAGE_SIZE) { rnb[i].offset = off; - rnb[i].len = PAGE_CACHE_SIZE; + rnb[i].len = PAGE_SIZE; rnb[i].flags = brw_flags; } @@ -1705,7 +1517,7 @@ static int echo_client_prep_commit(const struct lu_env *env, struct page *page = lnb[i].page; /* read past eof? */ - if (page == NULL && lnb[i].rc == 0) + if (!page && lnb[i].rc == 0) continue; if (async) @@ -1717,12 +1529,12 @@ static int echo_client_prep_commit(const struct lu_env *env, continue; if (rw == OBD_BRW_WRITE) - echo_client_page_debug_setup(lsm, page, rw, + echo_client_page_debug_setup(page, rw, ostid_id(&oa->o_oi), rnb[i].offset, rnb[i].len); else - echo_client_page_debug_check(lsm, page, + echo_client_page_debug_check(page, ostid_id(&oa->o_oi), rnb[i].offset, rnb[i].len); @@ -1774,7 +1586,7 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw, if (test_mode == 1) async = 0; - if (ed->ed_next == NULL && test_mode != 3) { + if (!ed->ed_next && test_mode != 3) { test_mode = 3; data->ioc_plen1 = data->ioc_count; } @@ -1804,56 +1616,9 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw, return rc; } -static int -echo_client_enqueue(struct obd_export *exp, struct obdo *oa, - int mode, u64 offset, u64 nob) -{ - struct echo_device *ed = obd2echo_dev(exp->exp_obd); - struct lustre_handle *ulh = &oa->o_handle; - struct echo_object *eco; - u64 end; - int rc; - - if (ed->ed_next == NULL) - return -EOPNOTSUPP; - - if (!(mode == LCK_PR || mode == LCK_PW)) - return -EINVAL; - - if ((offset & (~CFS_PAGE_MASK)) != 0 || - (nob & (~CFS_PAGE_MASK)) != 0) - return -EINVAL; - - rc = echo_get_object(&eco, ed, oa); - if (rc != 0) - return rc; - - end = (nob == 0) ? ((u64) -1) : (offset + nob - 1); - rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie); - if (rc == 0) { - oa->o_valid |= OBD_MD_FLHANDLE; - CDEBUG(D_INFO, "Cookie is %#llx\n", ulh->cookie); - } - echo_put_object(eco); - return rc; -} - -static int -echo_client_cancel(struct obd_export *exp, struct obdo *oa) -{ - struct echo_device *ed = obd2echo_dev(exp->exp_obd); - __u64 cookie = oa->o_handle.cookie; - - if ((oa->o_valid & OBD_MD_FLHANDLE) == 0) - return -EINVAL; - - CDEBUG(D_INFO, "Cookie is %#llx\n", cookie); - return cl_echo_cancel(ed, cookie); -} - static int echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct echo_device *ed = obd2echo_dev(obd); @@ -1899,8 +1664,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, goto out; } - rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1, - data->ioc_plen1, &dummy_oti); + rc = echo_create_object(env, ed, oa, &dummy_oti); goto out; case OBD_IOC_DESTROY: @@ -1911,7 +1675,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm, + rc = obd_destroy(env, ec->ec_exp, oa, NULL, &dummy_oti, NULL); if (rc == 0) eco->eo_deleted = 1; @@ -1922,10 +1686,10 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, case OBD_IOC_GETATTR: rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { }; + struct obd_info oinfo = { + .oi_oa = oa, + }; - oinfo.oi_md = eco->eo_lsm; - oinfo.oi_oa = oa; rc = obd_getattr(env, ec->ec_exp, &oinfo); echo_put_object(eco); } @@ -1939,10 +1703,9 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { }; - - oinfo.oi_oa = oa; - oinfo.oi_md = eco->eo_lsm; + struct obd_info oinfo = { + .oi_oa = oa, + }; rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL); echo_put_object(eco); @@ -1961,50 +1724,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti); goto out; - case ECHO_IOC_GET_STRIPE: - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - rc = echo_copyout_lsm(eco->eo_lsm, data->ioc_pbuf1, - data->ioc_plen1); - echo_put_object(eco); - } - goto out; - - case ECHO_IOC_SET_STRIPE: - if (!capable(CFS_CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - if (data->ioc_pbuf1 == NULL) { /* unset */ - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - eco->eo_deleted = 1; - echo_put_object(eco); - } - } else { - rc = echo_create_object(env, ed, 0, oa, - data->ioc_pbuf1, - data->ioc_plen1, &dummy_oti); - } - goto out; - - case ECHO_IOC_ENQUEUE: - if (!capable(CFS_CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - rc = echo_client_enqueue(exp, oa, - data->ioc_conn1, /* lock mode */ - data->ioc_offset, - data->ioc_count);/*extent*/ - goto out; - - case ECHO_IOC_CANCEL: - rc = echo_client_cancel(exp, oa); - goto out; - default: CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd); rc = -ENOTTY; @@ -2051,14 +1770,10 @@ static int echo_client_setup(const struct lu_env *env, INIT_LIST_HEAD(&ec->ec_objects); INIT_LIST_HEAD(&ec->ec_locks); ec->ec_unique = 0; - ec->ec_nstripes = 0; ocd = kzalloc(sizeof(*ocd), GFP_NOFS); - if (!ocd) { - CERROR("Can't alloc ocd connecting to %s\n", - lustre_cfg_string(lcfg, 1)); + if (!ocd) return -ENOMEM; - } ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE | @@ -2120,7 +1835,7 @@ static int echo_client_disconnect(struct obd_export *exp) { int rc; - if (exp == NULL) { + if (!exp) { rc = -EINVAL; goto out; } @@ -2163,7 +1878,7 @@ static int __init obdecho_init(void) { LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); - LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); + LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); return echo_client_init(); } @@ -2175,9 +1890,9 @@ static void /*__exit*/ obdecho_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Lustre Testing Echo OBD driver"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Lustre Echo Client test driver"); MODULE_VERSION(LUSTRE_VERSION_STRING); +MODULE_LICENSE("GPL"); module_init(obdecho_init); module_exit(obdecho_exit); diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h index 69063fa65..f5034a253 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h +++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c index 1091536fc..a3358c39b 100644 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c @@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, if (rc) return rc; - pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ + pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ if (pages_number <= 0 || - pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || + pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || pages_number > totalram_pages / 4) /* 1/4 of RAM */ return -ERANGE; client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT); + cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT); osc_wake_cache_waiters(cli); client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v) { struct obd_device *dev = m->private; struct client_obd *cli = &dev->u.cli; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; seq_printf(m, "used_mb: %d\n" @@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file, return -EFAULT; kernbuf[count] = 0; - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - kernbuf; rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); @@ -381,7 +381,7 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) DECLARE_CKSUM_NAME; - if (obd == NULL) + if (!obd) return 0; for (i = 0; i < ARRAY_SIZE(cksum_name); i++) { @@ -397,8 +397,8 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) } static ssize_t osc_checksum_type_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *obd = ((struct seq_file *)file->private_data)->private; int i; @@ -406,7 +406,7 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, DECLARE_CKSUM_NAME; char kernbuf[10]; - if (obd == NULL) + if (!obd) return 0; if (count > sizeof(kernbuf) - 1) @@ -422,8 +422,8 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0) continue; if (!strcmp(kernbuf, cksum_name[i])) { - obd->u.cli.cl_cksum_type = 1 << i; - return count; + obd->u.cli.cl_cksum_type = 1 << i; + return count; } } return -EINVAL; @@ -480,9 +480,19 @@ static ssize_t contention_seconds_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + int val; + + rc = kstrtoint(buffer, 10, &val); + if (rc) + return rc; + + if (val < 0) + return -EINVAL; + + od->od_contention_time = val; - return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?: - count; + return count; } LUSTRE_RW_ATTR(contention_seconds); @@ -505,9 +515,16 @@ static ssize_t lockless_truncate_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + unsigned int val; - return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?: - count; + rc = kstrtouint(buffer, 10, &val); + if (rc) + return rc; + + od->od_lockless_truncate = val; + + return count; } LUSTRE_RW_ATTR(lockless_truncate); @@ -552,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj, /* if the max_pages is specified in bytes, convert to pages */ if (val >= ONE_MB_BRW_SIZE) - val >>= PAGE_CACHE_SHIFT; + val >>= PAGE_SHIFT; - chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); + chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1); /* max_pages_per_rpc must be chunk aligned */ val = (val + ~chunk_mask) & chunk_mask; - if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { + if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) { return -ERANGE; } client_obd_list_lock(&cli->cl_loi_list_lock); @@ -635,10 +652,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - 1 << i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + 1 << i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } @@ -659,10 +676,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index 2229419b7..5f25bf83d 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -140,7 +140,7 @@ static const char *oes_strings[] = { static inline struct osc_extent *rb_extent(struct rb_node *n) { - if (n == NULL) + if (!n) return NULL; return container_of(n, struct osc_extent, oe_node); @@ -148,7 +148,7 @@ static inline struct osc_extent *rb_extent(struct rb_node *n) static inline struct osc_extent *next_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -157,7 +157,7 @@ static inline struct osc_extent *next_extent(struct osc_extent *ext) static inline struct osc_extent *prev_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -240,7 +240,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, goto out; } - if (ext->oe_osclock == NULL && ext->oe_grants > 0) { + if (!ext->oe_osclock && ext->oe_grants > 0) { rc = 90; goto out; } @@ -262,7 +262,8 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, } /* Do not verify page list if extent is in RPC. This is because an - * in-RPC extent is supposed to be exclusively accessible w/o lock. */ + * in-RPC extent is supposed to be exclusively accessible w/o lock. + */ if (ext->oe_state > OES_CACHE) { rc = 0; goto out; @@ -319,7 +320,7 @@ static int osc_extent_is_overlapped(struct osc_object *obj, if (!extent_debug) return 0; - for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) { + for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) { if (tmp == ext) continue; if (tmp->oe_end >= ext->oe_start && @@ -346,8 +347,8 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj) { struct osc_extent *ext; - ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO); - if (ext == NULL) + ext = kmem_cache_zalloc(osc_extent_kmem, GFP_NOFS); + if (!ext) return NULL; RB_CLEAR_NODE(&ext->oe_node); @@ -415,7 +416,7 @@ static struct osc_extent *osc_extent_search(struct osc_object *obj, struct osc_extent *tmp, *p = NULL; LASSERT(osc_object_is_locked(obj)); - while (n != NULL) { + while (n) { tmp = rb_extent(n); if (index < tmp->oe_start) { n = n->rb_left; @@ -439,7 +440,7 @@ static struct osc_extent *osc_extent_lookup(struct osc_object *obj, struct osc_extent *ext; ext = osc_extent_search(obj, index); - if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end) + if (ext && ext->oe_start <= index && index <= ext->oe_end) return osc_extent_get(ext); return NULL; } @@ -454,7 +455,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) LASSERT(ext->oe_intree == 0); LASSERT(ext->oe_obj == obj); LASSERT(osc_object_is_locked(obj)); - while (*n != NULL) { + while (*n) { tmp = rb_extent(*n); parent = *n; @@ -463,7 +464,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) else if (ext->oe_start > tmp->oe_end) n = &(*n)->rb_right; else - EASSERTF(0, tmp, EXTSTR, EXTPARA(ext)); + EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext)); } rb_link_node(&ext->oe_node, parent, n); rb_insert_color(&ext->oe_node, &obj->oo_root); @@ -533,7 +534,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, LASSERT(cur->oe_state == OES_CACHE); LASSERT(osc_object_is_locked(obj)); - if (victim == NULL) + if (!victim) return -EINVAL; if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait) @@ -543,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, return -ERANGE; LASSERT(cur->oe_osclock == victim->oe_osclock); - ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; + ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT; chunk_start = cur->oe_start >> ppc_bits; chunk_end = cur->oe_end >> ppc_bits; if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && @@ -587,7 +588,8 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_trunc_pending) { /* a truncate process is waiting for this extent. * This may happen due to a race, check - * osc_cache_truncate_start(). */ + * osc_cache_truncate_start(). + */ osc_extent_state_set(ext, OES_TRUNC); ext->oe_trunc_pending = 0; } else { @@ -601,7 +603,7 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_urgent) list_move_tail(&ext->oe_link, - &obj->oo_urgent_exts); + &obj->oo_urgent_exts); } osc_object_unlock(obj); @@ -639,15 +641,14 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, int rc; cur = osc_extent_alloc(obj); - if (cur == NULL) + if (!cur) return ERR_PTR(-ENOMEM); lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); - LASSERT(lock != NULL); LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); - LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); - ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + LASSERT(cli->cl_chunkbits >= PAGE_SHIFT); + ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; chunk_mask = ~((1 << ppc_bits) - 1); chunksize = 1 << cli->cl_chunkbits; chunk = index >> ppc_bits; @@ -673,14 +674,15 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, /* grants has been allocated by caller */ LASSERTF(*grants >= chunksize + cli->cl_extent_tax, "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax); - LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur)); + LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n", + EXTPARA(cur)); restart: osc_object_lock(obj); ext = osc_extent_search(obj, cur->oe_start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); - while (ext != NULL) { + while (ext) { loff_t ext_chk_start = ext->oe_start >> ppc_bits; loff_t ext_chk_end = ext->oe_end >> ppc_bits; @@ -691,7 +693,7 @@ restart: /* if covering by different locks, no chance to match */ if (lock != ext->oe_osclock) { EASSERTF(!overlapped(ext, cur), ext, - EXTSTR, EXTPARA(cur)); + EXTSTR"\n", EXTPARA(cur)); ext = next_extent(ext); continue; @@ -705,18 +707,21 @@ restart: /* ok, from now on, ext and cur have these attrs: * 1. covered by the same lock - * 2. contiguous at chunk level or overlapping. */ + * 2. contiguous at chunk level or overlapping. + */ if (overlapped(ext, cur)) { /* cur is the minimum unit, so overlapping means - * full contain. */ + * full contain. + */ EASSERTF((ext->oe_start <= cur->oe_start && ext->oe_end >= cur->oe_end), - ext, EXTSTR, EXTPARA(cur)); + ext, EXTSTR"\n", EXTPARA(cur)); if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) { /* for simplicity, we wait for this extent to - * finish before going forward. */ + * finish before going forward. + */ conflict = osc_extent_get(ext); break; } @@ -729,17 +734,20 @@ restart: if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) { /* we can't do anything for a non OES_CACHE extent, or * if there is someone waiting for this extent to be - * flushed, try next one. */ + * flushed, try next one. + */ ext = next_extent(ext); continue; } /* check if they belong to the same rpc slot before trying to * merge. the extents are not overlapped and contiguous at - * chunk level to get here. */ + * chunk level to get here. + */ if (ext->oe_max_end != max_end) { /* if they don't belong to the same RPC slot or - * max_pages_per_rpc has ever changed, do not merge. */ + * max_pages_per_rpc has ever changed, do not merge. + */ ext = next_extent(ext); continue; } @@ -748,7 +756,8 @@ restart: * level so that we know the whole extent is covered by grant * (the pages in the extent are NOT required to be contiguous). * Otherwise, it will be too much difficult to know which - * chunks have grants allocated. */ + * chunks have grants allocated. + */ /* try to do front merge - extend ext's start */ if (chunk + 1 == ext_chk_start) { @@ -768,28 +777,29 @@ restart: *grants -= chunksize; /* try to merge with the next one because we just fill - * in a gap */ + * in a gap + */ if (osc_extent_merge(env, ext, next_extent(ext)) == 0) /* we can save extent tax from next extent */ *grants += cli->cl_extent_tax; found = osc_extent_hold(ext); } - if (found != NULL) + if (found) break; ext = next_extent(ext); } osc_extent_tree_dump(D_CACHE, obj); - if (found != NULL) { - LASSERT(conflict == NULL); + if (found) { + LASSERT(!conflict); if (!IS_ERR(found)) { LASSERT(found->oe_osclock == cur->oe_osclock); OSC_EXTENT_DUMP(D_CACHE, found, "found caching ext for %lu.\n", index); } - } else if (conflict == NULL) { + } else if (!conflict) { /* create a new extent */ EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur); cur->oe_grants = chunksize + cli->cl_extent_tax; @@ -804,11 +814,12 @@ restart: } osc_object_unlock(obj); - if (conflict != NULL) { - LASSERT(found == NULL); + if (conflict) { + LASSERT(!found); /* waiting for IO to finish. Please notice that it's impossible - * to be an OES_TRUNC extent. */ + * to be an OES_TRUNC extent. + */ rc = osc_extent_wait(env, conflict, OES_INV); osc_extent_put(env, conflict); conflict = NULL; @@ -845,8 +856,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, ext->oe_rc = rc ?: ext->oe_nr_pages; EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext); - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { list_del_init(&oap->oap_rpc_item); list_del_init(&oap->oap_pending_item); if (last_off <= oap->oap_obj_off) { @@ -861,11 +871,12 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (!sent) { lost_grant = ext->oe_grants; - } else if (blocksize < PAGE_CACHE_SIZE && - last_count != PAGE_CACHE_SIZE) { + } else if (blocksize < PAGE_SIZE && + last_count != PAGE_SIZE) { /* For short writes we shouldn't count parts of pages that * span a whole chunk on the OST side, or our accounting goes - * wrong. Should match the code in filter_grant_check. */ + * wrong. Should match the code in filter_grant_check. + */ int offset = oap->oap_page_off & ~CFS_PAGE_MASK; int count = oap->oap_count + (offset & (blocksize - 1)); int end = (offset + oap->oap_count) & (blocksize - 1); @@ -873,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (end) count += blocksize - end; - lost_grant = PAGE_CACHE_SIZE - count; + lost_grant = PAGE_SIZE - count; } if (ext->oe_grants > 0) osc_free_grant(cli, nr_pages, lost_grant); @@ -909,7 +920,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, osc_object_lock(obj); LASSERT(sanity_check_nolock(ext) == 0); /* `Kick' this extent only if the caller is waiting for it to be - * written out. */ + * written out. + */ if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp && !ext->oe_trunc_pending) { if (ext->oe_state == OES_ACTIVE) { @@ -955,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, struct osc_async_page *oap; struct osc_async_page *tmp; int pages_in_chunk = 0; - int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; __u64 trunc_chunk = trunc_index >> ppc_bits; int grants = 0; int nr_pages = 0; @@ -967,7 +979,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, /* Request new lu_env. * We can't use that env from osc_cache_truncate_start() because - * it's from lov_io_sub and not fully initialized. */ + * it's from lov_io_sub and not fully initialized. + */ env = cl_env_nested_get(&nest); io = &osc_env_info(env)->oti_io; io->ci_obj = cl_object_top(osc2cl(obj)); @@ -976,15 +989,15 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, goto out; /* discard all pages with index greater then trunc_index */ - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { struct cl_page *sub = oap2cl_page(oap); struct cl_page *page = cl_page_top(sub); LASSERT(list_empty(&oap->oap_rpc_item)); /* only discard the pages with their index greater than - * trunc_index, and ... */ + * trunc_index, and ... + */ if (sub->cp_index < trunc_index || (sub->cp_index == trunc_index && partial)) { /* accounting how many pages remaining in the chunk @@ -1028,11 +1041,13 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, pgoff_t last_index; /* if there is no pages in this chunk, we can also free grants - * for the last chunk */ + * for the last chunk + */ if (pages_in_chunk == 0) { /* if this is the 1st chunk and no pages in this chunk, * ext->oe_nr_pages must be zero, so we should be in - * the other if-clause. */ + * the other if-clause. + */ LASSERT(trunc_chunk > 0); --trunc_chunk; ++chunks; @@ -1074,13 +1089,13 @@ static int osc_extent_make_ready(const struct lu_env *env, LASSERT(sanity_check(ext) == 0); /* in locking state, any process should not touch this extent. */ EASSERT(ext->oe_state == OES_LOCKING, ext); - EASSERT(ext->oe_owner != NULL, ext); + EASSERT(ext->oe_owner, ext); OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n"); list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { ++page_count; - if (last == NULL || last->oap_obj_off < oap->oap_obj_off) + if (!last || last->oap_obj_off < oap->oap_obj_off) last = oap; /* checking ASYNC_READY is race safe */ @@ -1103,21 +1118,23 @@ static int osc_extent_make_ready(const struct lu_env *env, } LASSERT(page_count == ext->oe_nr_pages); - LASSERT(last != NULL); + LASSERT(last); /* the last page is the only one we need to refresh its count by - * the size of file. */ + * the size of file. + */ if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); LASSERT(last->oap_count > 0); - LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); + LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE); last->oap_async_flags |= ASYNC_COUNT_STABLE; } /* for the rest of pages, we don't need to call osf_refresh_count() - * because it's known they are not the last page */ + * because it's known they are not the last page + */ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { - oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; + oap->oap_count = PAGE_SIZE - oap->oap_page_off; oap->oap_async_flags |= ASYNC_COUNT_STABLE; } } @@ -1141,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) struct osc_object *obj = ext->oe_obj; struct client_obd *cli = osc_cli(obj); struct osc_extent *next; - int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; pgoff_t chunk = index >> ppc_bits; pgoff_t end_chunk; pgoff_t end_index; @@ -1167,9 +1184,10 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1); next = next_extent(ext); - if (next != NULL && next->oe_start <= end_index) { + if (next && next->oe_start <= end_index) { /* complex mode - overlapped with the next extent, - * this case will be handled by osc_extent_find() */ + * this case will be handled by osc_extent_find() + */ rc = -EAGAIN; goto out; } @@ -1197,7 +1215,7 @@ static void osc_extent_tree_dump0(int level, struct osc_object *obj, /* osc_object_lock(obj); */ cnt = 1; - for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext)) + for (ext = first_extent(obj); ext; ext = next_extent(ext)) OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++); cnt = 1; @@ -1262,7 +1280,6 @@ static int osc_refresh_count(const struct lu_env *env, /* readpage queues with _COUNT_STABLE, shouldn't get here. */ LASSERT(!(cmd & OBD_BRW_READ)); - LASSERT(opg != NULL); obj = opg->ops_cl.cpl_obj; cl_object_attr_lock(obj); @@ -1276,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env, return 0; else if (cl_offset(obj, page->cp_index + 1) > kms) /* catch sub-page write at end of file */ - return kms % PAGE_CACHE_SIZE; + return kms % PAGE_SIZE; else - return PAGE_CACHE_SIZE; + return PAGE_SIZE; } static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, @@ -1299,16 +1316,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, * page->cp_req can be NULL if io submission failed before * cl_req was allocated. */ - if (page->cp_req != NULL) + if (page->cp_req) cl_req_page_done(env, page); - LASSERT(page->cp_req == NULL); + LASSERT(!page->cp_req); crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE; /* Clear opg->ops_transfer_pinned before VM lock is released. */ opg->ops_transfer_pinned = 0; spin_lock(&obj->oo_seatbelt); - LASSERT(opg->ops_submitter != NULL); + LASSERT(opg->ops_submitter); LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -1359,15 +1376,16 @@ static void osc_consume_write_grant(struct client_obd *cli, assert_spin_locked(&cli->cl_loi_list_lock.lock); LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); atomic_inc(&obd_dirty_pages); - cli->cl_dirty += PAGE_CACHE_SIZE; + cli->cl_dirty += PAGE_SIZE; pga->flag |= OBD_BRW_FROM_GRANT; CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", - PAGE_CACHE_SIZE, pga, pga->pg); + PAGE_SIZE, pga, pga->pg); osc_update_next_shrink(cli); } /* the companion to osc_consume_write_grant, called when a brw has completed. - * must be called with the loi lock held. */ + * must be called with the loi lock held. + */ static void osc_release_write_grant(struct client_obd *cli, struct brw_page *pga) { @@ -1378,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli, pga->flag &= ~OBD_BRW_FROM_GRANT; atomic_dec(&obd_dirty_pages); - cli->cl_dirty -= PAGE_CACHE_SIZE; + cli->cl_dirty -= PAGE_SIZE; if (pga->flag & OBD_BRW_NOCACHE) { pga->flag &= ~OBD_BRW_NOCACHE; atomic_dec(&obd_dirty_transit_pages); - cli->cl_dirty_transit -= PAGE_CACHE_SIZE; + cli->cl_dirty_transit -= PAGE_SIZE; } } @@ -1410,7 +1428,8 @@ static void __osc_unreserve_grant(struct client_obd *cli, /* it's quite normal for us to get more grant than reserved. * Thinking about a case that two extents merged by adding a new * chunk, we can save one extent tax. If extent tax is greater than - * one chunk, we can save more grant by adding a new chunk */ + * one chunk, we can save more grant by adding a new chunk + */ cli->cl_reserved_grant -= reserved; if (unused > reserved) { cli->cl_avail_grant += reserved; @@ -1437,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli, * used, we should return these grants to OST. There're two cases where grants * can be lost: * 1. truncate; - * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was + * 2. blocksize at OST is less than PAGE_SIZE and a partial page was * written. In this case OST may use less chunks to serve this partial * write. OSTs don't actually know the page size on the client side. so * clients have to calculate lost grant by the blocksize on the OST. @@ -1450,11 +1469,12 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, client_obd_list_lock(&cli->cl_loi_list_lock); atomic_sub(nr_pages, &obd_dirty_pages); - cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; + cli->cl_dirty -= nr_pages << PAGE_SHIFT; cli->cl_lost_grant += lost_grant; if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { /* borrow some grant from truncate to avoid the case that - * truncate uses up all avail grant */ + * truncate uses up all avail grant + */ cli->cl_lost_grant -= grant; cli->cl_avail_grant += grant; } @@ -1492,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli, if (rc < 0) return 0; - if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && + if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max && atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { osc_consume_write_grant(cli, &oap->oap_brw_page); if (transient) { - cli->cl_dirty_transit += PAGE_CACHE_SIZE; + cli->cl_dirty_transit += PAGE_SIZE; atomic_inc(&obd_dirty_transit_pages); oap->oap_brw_flags |= OBD_BRW_NOCACHE; } @@ -1539,9 +1559,10 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, client_obd_list_lock(&cli->cl_loi_list_lock); /* force the caller to try sync io. this can jump the list - * of queued writes and create a discontiguous rpc stream */ + * of queued writes and create a discontiguous rpc stream + */ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || - cli->cl_dirty_max < PAGE_CACHE_SIZE || + cli->cl_dirty_max < PAGE_SIZE || cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { rc = -EDQUOT; goto out; @@ -1558,7 +1579,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, * Adding a cache waiter will trigger urgent write-out no matter what * RPC size will be. * The exiting condition is no avail grants and no dirty pages caching, - * that really means there is no space on the OST. */ + * that really means there is no space on the OST. + */ init_waitqueue_head(&ocw.ocw_waitq); ocw.ocw_oap = oap; ocw.ocw_grant = bytes; @@ -1610,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli) ocw->ocw_rc = -EDQUOT; /* we can't dirty more */ - if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || + if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) || (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) { CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", @@ -1640,7 +1662,8 @@ static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc) /* This maintains the lists of pending pages to read/write for a given object * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint() - * to quickly find objects that are ready to send an RPC. */ + * to quickly find objects that are ready to send an RPC. + */ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, int cmd) { @@ -1649,8 +1672,9 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, /* if we have an invalid import we want to drain the queued pages * by forcing them through rpcs that immediately fail and complete * the pages. recovery relies on this to empty the queued pages - * before canceling the locks and evicting down the llite pages */ - if ((cli->cl_import == NULL || cli->cl_import->imp_invalid)) + * before canceling the locks and evicting down the llite pages + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) invalid_import = 1; if (cmd & OBD_BRW_WRITE) { @@ -1670,7 +1694,8 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, } /* trigger a write rpc stream as long as there are dirtiers * waiting for space. as they're waiting, they're not going to - * create more pages to coalesce with what's waiting.. */ + * create more pages to coalesce with what's waiting.. + */ if (!list_empty(&cli->cl_cache_waiters)) { CDEBUG(D_CACHE, "cache waiters forcing RPC\n"); return 1; @@ -1723,7 +1748,8 @@ static void on_list(struct list_head *item, struct list_head *list, int should_b } /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc - * can find pages to build into rpcs quickly */ + * can find pages to build into rpcs quickly + */ static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc) { if (osc_makes_hprpc(osc)) { @@ -1761,7 +1787,8 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) * application. As an async write fails we record the error code for later if * the app does an fsync. As long as errors persist we force future rpcs to be * sync so that the app can get a sync error and break the cycle of queueing - * pages for which writeback will fail. */ + * pages for which writeback will fail. + */ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, int rc) { @@ -1780,7 +1807,8 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, } /* this must be called holding the loi list lock to give coverage to exit_cache, - * async_flag maintenance, and oap_request */ + * async_flag maintenance, and oap_request + */ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct osc_async_page *oap, int sent, int rc) { @@ -1788,7 +1816,7 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct lov_oinfo *loi = osc->oo_oinfo; __u64 xid = 0; - if (oap->oap_request != NULL) { + if (oap->oap_request) { xid = ptlrpc_req_xid(oap->oap_request); ptlrpc_req_finished(oap->oap_request); oap->oap_request = NULL; @@ -1877,13 +1905,12 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; int page_count = 0; unsigned int max_pages = cli->cl_max_pages_per_rpc; LASSERT(osc_object_is_locked(obj)); - while (!list_empty(&obj->oo_hp_exts)) { - ext = list_entry(obj->oo_hp_exts.next, struct osc_extent, - oe_link); + list_for_each_entry_safe(ext, temp, &obj->oo_hp_exts, oe_link) { LASSERT(ext->oe_state == OES_CACHE); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) @@ -1895,7 +1922,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while (!list_empty(&obj->oo_urgent_exts)) { ext = list_entry(obj->oo_urgent_exts.next, - struct osc_extent, oe_link); + struct osc_extent, oe_link); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) return page_count; @@ -1906,7 +1933,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while ((ext = next_extent(ext)) != NULL) { if ((ext->oe_state != OES_CACHE) || (!list_empty(&ext->oe_link) && - ext->oe_owner != NULL)) + ext->oe_owner)) continue; if (!try_to_add_extent_for_io(cli, ext, rpclist, @@ -1918,10 +1945,10 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) return page_count; ext = first_extent(obj); - while (ext != NULL) { + while (ext) { if ((ext->oe_state != OES_CACHE) || /* this extent may be already in current rpclist */ - (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) { + (!list_empty(&ext->oe_link) && ext->oe_owner)) { ext = next_extent(ext); continue; } @@ -1938,6 +1965,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) static int osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { LIST_HEAD(rpclist); struct osc_extent *ext; @@ -1967,7 +1995,8 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, } /* we're going to grab page lock, so release object lock because - * lock order is page lock -> object lock. */ + * lock order is page lock -> object lock. + */ osc_object_unlock(osc); list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) { @@ -1979,7 +2008,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, continue; } } - if (first == NULL) { + if (!first) { first = ext; srvlock = ext->oe_srvlock; } else { @@ -2010,6 +2039,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, static int osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { struct osc_extent *ext; struct osc_extent *next; @@ -2019,8 +2049,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, int rc = 0; LASSERT(osc_object_is_locked(osc)); - list_for_each_entry_safe(ext, next, - &osc->oo_reading_exts, oe_link) { + list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) { EASSERT(ext->oe_state == OES_LOCK_DONE, ext); if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count, &max_pages)) @@ -2051,12 +2080,14 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, }) /* This is called by osc_check_rpcs() to find which objects have pages that - * we could be sending. These lists are maintained by osc_makes_rpc(). */ + * we could be sending. These lists are maintained by osc_makes_rpc(). + */ static struct osc_object *osc_next_obj(struct client_obd *cli) { /* First return objects that have blocked locks so that they * will be flushed quickly and other clients can get the lock, - * then objects which have pages ready to be stuffed into RPCs */ + * then objects which have pages ready to be stuffed into RPCs + */ if (!list_empty(&cli->cl_loi_hp_ready_list)) return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item); if (!list_empty(&cli->cl_loi_ready_list)) @@ -2065,14 +2096,16 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files * have filled up the cache and not been fired into rpcs because - * they don't pass the nr_pending/object threshold */ + * they don't pass the nr_pending/object threshold + */ if (!list_empty(&cli->cl_cache_waiters) && !list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); /* then return all queued objects when we have an invalid import - * so that they get flushed */ - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) { + * so that they get flushed + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) { if (!list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); if (!list_empty(&cli->cl_loi_read_list)) @@ -2083,6 +2116,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* called with the loi list lock held */ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) + __must_hold(&cli->cl_loi_list_lock) { struct osc_object *osc; int rc = 0; @@ -2108,7 +2142,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * would be redundant if we were getting read/write work items * instead of objects. we don't want send_oap_rpc to drain a * partial read pending queue when we're given this object to - * do io on writes while there are cache waiters */ + * do io on writes while there are cache waiters + */ osc_object_lock(osc); if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) { rc = osc_send_write_rpc(env, cli, osc); @@ -2130,7 +2165,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * because it might be blocked at grabbing * the page lock as we mentioned. * - * Anyway, continue to drain pages. */ + * Anyway, continue to drain pages. + */ /* break; */ } } @@ -2155,12 +2191,13 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, { int rc = 0; - if (osc != NULL && osc_list_maint(cli, osc) == 0) + if (osc && osc_list_maint(cli, osc) == 0) return 0; if (!async) { /* disable osc_lru_shrink() temporarily to avoid - * potential stack overrun problem. LU-2859 */ + * potential stack overrun problem. LU-2859 + */ atomic_inc(&cli->cl_lru_shrinkers); client_obd_list_lock(&cli->cl_loi_list_lock); osc_check_rpcs(env, cli); @@ -2168,7 +2205,7 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, atomic_dec(&cli->cl_lru_shrinkers); } else { CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); - LASSERT(cli->cl_writeback_work != NULL); + LASSERT(cli->cl_writeback_work); rc = ptlrpcd_queue_work(cli->cl_writeback_work); } return rc; @@ -2233,7 +2270,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, if (oap->oap_magic != OAP_MAGIC) return -EINVAL; - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) + if (!cli->cl_import || cli->cl_import->imp_invalid) return -EIO; if (!list_empty(&oap->oap_pending_item) || @@ -2284,12 +2321,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, * 1. if there exists an active extent for this IO, mostly this page * can be added to the active extent and sometimes we need to * expand extent to accommodate this page; - * 2. otherwise, a new extent will be allocated. */ + * 2. otherwise, a new extent will be allocated. + */ ext = oio->oi_active; - if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) { + if (ext && ext->oe_start <= index && ext->oe_max_end >= index) { /* one chunk plus extent overhead must be enough to write this - * page */ + * page + */ grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; if (ext->oe_end >= index) grants = 0; @@ -2316,7 +2355,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, } } rc = 0; - } else if (ext != NULL) { + } else if (ext) { /* index is located outside of active extent */ need_release = 1; } @@ -2326,13 +2365,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, ext = NULL; } - if (ext == NULL) { + if (!ext) { int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; /* try to find new extent to cover this page */ - LASSERT(oio->oi_active == NULL); + LASSERT(!oio->oi_active); /* we may have allocated grant for this page if we failed - * to expand the previous active extent. */ + * to expand the previous active extent. + */ LASSERT(ergo(grants > 0, grants >= tmp)); rc = 0; @@ -2359,8 +2399,8 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, osc_unreserve_grant(cli, grants, tmp); } - LASSERT(ergo(rc == 0, ext != NULL)); - if (ext != NULL) { + LASSERT(ergo(rc == 0, ext)); + if (ext) { EASSERTF(ext->oe_end >= index && ext->oe_start <= index, ext, "index = %lu.\n", index); LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0); @@ -2397,15 +2437,16 @@ int osc_teardown_async_page(const struct lu_env *env, ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index); /* only truncated pages are allowed to be taken out. * See osc_extent_truncate() and osc_cache_truncate_start() - * for details. */ - if (ext != NULL && ext->oe_state != OES_TRUNC) { + * for details. + */ + if (ext && ext->oe_state != OES_TRUNC) { OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n", oap2cl_page(oap)->cp_index); rc = -EBUSY; } } osc_object_unlock(obj); - if (ext != NULL) + if (ext) osc_extent_put(env, ext); return rc; } @@ -2430,7 +2471,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, osc_object_lock(obj); ext = osc_extent_lookup(obj, index); - if (ext == NULL) { + if (!ext) { osc_extent_tree_dump(D_ERROR, obj); LASSERTF(0, "page index %lu is NOT covered.\n", index); } @@ -2448,7 +2489,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * exists a deadlock problem because other process can wait for * page writeback bit holding page lock; and meanwhile in * vvp_page_make_ready(), we need to grab page lock before - * really sending the RPC. */ + * really sending the RPC. + */ case OES_TRUNC: /* race with truncate, page will be redirtied */ case OES_ACTIVE: @@ -2456,7 +2498,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * re-dirty the page. If we continued on here, and we were the * one making the extent active, we could deadlock waiting for * the page writeback to clear but it won't because the extent - * is active and won't be written out. */ + * is active and won't be written out. + */ rc = -EAGAIN; goto out; default: @@ -2527,12 +2570,13 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) if (ext->oe_start <= index && ext->oe_end >= index) { LASSERT(ext->oe_state == OES_LOCK_DONE); /* For OES_LOCK_DONE state extent, it has already held - * a refcount for RPC. */ + * a refcount for RPC. + */ found = osc_extent_get(ext); break; } } - if (found != NULL) { + if (found) { list_del_init(&found->oe_link); osc_update_pending(obj, cmd, -found->oe_nr_pages); osc_object_unlock(obj); @@ -2543,8 +2587,9 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) } else { osc_object_unlock(obj); /* ok, it's been put in an rpc. only one oap gets a request - * reference */ - if (oap->oap_request != NULL) { + * reference + */ + if (oap->oap_request) { ptlrpc_mark_interrupted(oap->oap_request); ptlrpcd_wake(oap->oap_request); ptlrpc_req_finished(oap->oap_request); @@ -2579,7 +2624,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, } ext = osc_extent_alloc(obj); - if (ext == NULL) { + if (!ext) { list_for_each_entry_safe(oap, tmp, list, oap_pending_item) { list_del_init(&oap->oap_pending_item); osc_ap_completion(env, cli, oap, 0, -ENOMEM); @@ -2621,6 +2666,7 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; struct osc_extent *waiting = NULL; pgoff_t index; LIST_HEAD(list); @@ -2634,18 +2680,19 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { EASSERT(ext->oe_state != OES_TRUNC, ext); if (ext->oe_state > OES_CACHE || ext->oe_urgent) { /* if ext is in urgent state, it means there must exist * a page already having been flushed by write_page(). * We have to wait for this extent because we can't - * truncate that page. */ + * truncate that page. + */ LASSERT(!ext->oe_hp); OSC_EXTENT_DUMP(D_CACHE, ext, "waiting for busy extent\n"); @@ -2660,7 +2707,8 @@ again: /* though we grab inode mutex for write path, but we * release it before releasing extent(in osc_io_end()), * so there is a race window that an extent is still - * in OES_ACTIVE when truncate starts. */ + * in OES_ACTIVE when truncate starts. + */ LASSERT(!ext->oe_trunc_pending); ext->oe_trunc_pending = 1; } else { @@ -2678,14 +2726,14 @@ again: osc_list_maint(cli, obj); - while (!list_empty(&list)) { + list_for_each_entry_safe(ext, temp, &list, oe_link) { int rc; - ext = list_entry(list.next, struct osc_extent, oe_link); list_del_init(&ext->oe_link); /* extent may be in OES_ACTIVE state because inode mutex - * is released before osc_io_end() in file write case */ + * is released before osc_io_end() in file write case + */ if (ext->oe_state != OES_TRUNC) osc_extent_wait(env, ext, OES_TRUNC); @@ -2710,19 +2758,21 @@ again: /* we need to hold this extent in OES_TRUNC state so * that no writeback will happen. This is to avoid - * BUG 17397. */ - LASSERT(oio->oi_trunc == NULL); + * BUG 17397. + */ + LASSERT(!oio->oi_trunc); oio->oi_trunc = osc_extent_get(ext); OSC_EXTENT_DUMP(D_CACHE, ext, "trunc at %llu\n", size); } osc_extent_put(env, ext); } - if (waiting != NULL) { + if (waiting) { int rc; /* ignore the result of osc_extent_wait the write initiator - * should take care of it. */ + * should take care of it. + */ rc = osc_extent_wait(env, waiting, OES_INV); if (rc < 0) OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc); @@ -2743,7 +2793,7 @@ void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio, struct osc_extent *ext = oio->oi_trunc; oio->oi_trunc = NULL; - if (ext != NULL) { + if (ext) { bool unplug = false; EASSERT(ext->oe_nr_pages > 0, ext); @@ -2786,11 +2836,11 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { int rc; if (ext->oe_start > end) @@ -2841,11 +2891,11 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, osc_object_lock(obj); ext = osc_extent_search(obj, start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < start) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { if (ext->oe_start > end) break; @@ -2864,18 +2914,18 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, ext->oe_urgent = 1; list = &obj->oo_urgent_exts; } - if (list != NULL) + if (list) list_move_tail(&ext->oe_link, list); unplug = true; } else { /* the only discarder is lock cancelling, so - * [start, end] must contain this extent */ + * [start, end] must contain this extent + */ EASSERT(ext->oe_start >= start && ext->oe_max_end <= end, ext); osc_extent_state_set(ext, OES_LOCKING); ext->oe_owner = current; - list_move_tail(&ext->oe_link, - &discard_list); + list_move_tail(&ext->oe_link, &discard_list); osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages); } @@ -2884,14 +2934,16 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, /* It's pretty bad to wait for ACTIVE extents, because * we don't know how long we will wait for it to be * flushed since it may be blocked at awaiting more - * grants. We do this for the correctness of fsync. */ + * grants. We do this for the correctness of fsync. + */ LASSERT(hp == 0 && discard == 0); ext->oe_urgent = 1; break; case OES_TRUNC: /* this extent is being truncated, can't do anything * for it now. it will be set to urgent after truncate - * is finished in osc_cache_truncate_end(). */ + * is finished in osc_cache_truncate_end(). + */ default: break; } @@ -2910,7 +2962,8 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, EASSERT(ext->oe_state == OES_LOCKING, ext); /* Discard caching pages. We don't actually write this - * extent out but we complete it as if we did. */ + * extent out but we complete it as if we did. + */ rc = osc_extent_make_ready(env, ext); if (unlikely(rc < 0)) { OSC_EXTENT_DUMP(D_ERROR, ext, diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h index 415c27e4a..d55d04d04 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h @@ -69,10 +69,12 @@ struct osc_io { /** true if this io is lockless. */ int oi_lockless; /** active extents, we know how many bytes is going to be written, - * so having an active extent will prevent it from being fragmented */ + * so having an active extent will prevent it from being fragmented + */ struct osc_extent *oi_active; /** partially truncated extent, we need to hold this extent to prevent - * page writeback from happening. */ + * page writeback from happening. + */ struct osc_extent *oi_trunc; struct obd_info oi_info; @@ -154,7 +156,8 @@ struct osc_object { atomic_t oo_nr_writes; /** Protect extent tree. Will be used to protect - * oo_{read|write}_pages soon. */ + * oo_{read|write}_pages soon. + */ spinlock_t oo_lock; }; @@ -472,7 +475,7 @@ static inline struct osc_thread_info *osc_env_info(const struct lu_env *env) struct osc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &osc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -481,7 +484,7 @@ static inline struct osc_session *osc_env_session(const struct lu_env *env) struct osc_session *ses; ses = lu_context_key_get(env->le_ses, &osc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -522,7 +525,7 @@ static inline struct cl_object *osc2cl(const struct osc_object *obj) return (struct cl_object *)&obj->oo_cl; } -static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) +static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode) { LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP); if (mode == CLM_READ) @@ -533,7 +536,7 @@ static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) return LCK_GROUP; } -static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode) +static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode) { LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP); if (mode == LCK_PR) @@ -627,22 +630,26 @@ struct osc_extent { oe_srvlock:1, oe_memalloc:1, /** an ACTIVE extent is going to be truncated, so when this extent - * is released, it will turn into TRUNC state instead of CACHE. */ + * is released, it will turn into TRUNC state instead of CACHE. + */ oe_trunc_pending:1, /** this extent should be written asap and someone may wait for the * write to finish. This bit is usually set along with urgent if * the extent was CACHE state. * fsync_wait extent can't be merged because new extent region may - * exceed fsync range. */ + * exceed fsync range. + */ oe_fsync_wait:1, /** covering lock is being canceled */ oe_hp:1, /** this extent should be written back asap. set if one of pages is - * called by page WB daemon, or sync write or reading requests. */ + * called by page WB daemon, or sync write or reading requests. + */ oe_urgent:1; /** how many grants allocated for this extent. * Grant allocated for this extent. There is no grant allocated - * for reading extents and sync write extents. */ + * for reading extents and sync write extents. + */ unsigned int oe_grants; /** # of dirty pages in this extent */ unsigned int oe_nr_pages; @@ -655,21 +662,25 @@ struct osc_extent { struct osc_page *oe_next_page; /** start and end index of this extent, include start and end * themselves. Page offset here is the page index of osc_pages. - * oe_start is used as keyword for red-black tree. */ + * oe_start is used as keyword for red-black tree. + */ pgoff_t oe_start; pgoff_t oe_end; /** maximum ending index of this extent, this is limited by - * max_pages_per_rpc, lock extent and chunk size. */ + * max_pages_per_rpc, lock extent and chunk size. + */ pgoff_t oe_max_end; /** waitqueue - for those who want to be notified if this extent's - * state has changed. */ + * state has changed. + */ wait_queue_head_t oe_waitq; /** lock covering this extent */ struct cl_lock *oe_osclock; /** terminator of this extent. Must be true if this extent is in IO. */ struct task_struct *oe_owner; /** return value of writeback. If somebody is waiting for this extent, - * this value can be known by outside world. */ + * this value can be known by outside world. + */ int oe_rc; /** max pages per rpc when this extent was created */ unsigned int oe_mppr; diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c index 7078cc57d..d4fe507f1 100644 --- a/drivers/staging/lustre/lustre/osc/osc_dev.c +++ b/drivers/staging/lustre/lustre/osc/osc_dev.c @@ -122,8 +122,8 @@ static void *osc_key_init(const struct lu_context *ctx, { struct osc_thread_info *info; - info = kmem_cache_alloc(osc_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -147,8 +147,8 @@ static void *osc_session_init(const struct lu_context *ctx, { struct osc_session *info; - info = kmem_cache_alloc(osc_session_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_session_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -228,7 +228,7 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env, /* Setup OSC OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); + LASSERT(obd); rc = osc_setup(obd, cfg); if (rc) { osc_device_free(env, d); diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h index a4c61463b..ea695c209 100644 --- a/drivers/staging/lustre/lustre/osc/osc_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_internal.h @@ -47,11 +47,13 @@ struct lu_env; enum async_flags { ASYNC_READY = 0x1, /* ap_make_ready will not be called before this - page is added to an rpc */ + * page is added to an rpc + */ ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */ ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called - to give the caller a chance to update - or cancel the size of the io */ + * to give the caller a chance to update + * or cancel the size of the io + */ ASYNC_HP = 0x10, }; diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c index abd0beb48..6bd0a45d8 100644 --- a/drivers/staging/lustre/lustre/osc/osc_io.c +++ b/drivers/staging/lustre/lustre/osc/osc_io.c @@ -73,7 +73,7 @@ static struct osc_page *osc_cl_page_osc(struct cl_page *page) const struct cl_page_slice *slice; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); return cl2osc_page(slice); } @@ -135,7 +135,7 @@ static int osc_io_submit(const struct lu_env *env, /* Top level IO. */ io = page->cp_owner; - LASSERT(io != NULL); + LASSERT(io); opg = osc_cl_page_osc(page); oap = &opg->ops_oap; @@ -266,13 +266,14 @@ static int osc_io_prepare_write(const struct lu_env *env, * This implements OBD_BRW_CHECK logic from old client. */ - if (imp == NULL || imp->imp_invalid) + if (!imp || imp->imp_invalid) result = -EIO; if (result == 0 && oio->oi_lockless) /* this page contains `invalid' data, but who cares? * nobody can access the invalid data. * in osc_io_commit_write(), we're going to write exact - * [from, to) bytes of this page to OST. -jay */ + * [from, to) bytes of this page to OST. -jay + */ cl_page_export(env, slice->cpl_page, 1); return result; @@ -349,14 +350,14 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, __u64 start = *(__u64 *)cbdata; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); ops = cl2osc_page(slice); oap = &ops->ops_oap; if (oap->oap_cmd & OBD_BRW_WRITE && !list_empty(&oap->oap_pending_item)) CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n", - start, current->comm); + start, current->comm); { struct page *vmpage = cl_page_vmpage(env, page); @@ -500,7 +501,7 @@ static void osc_io_setattr_end(const struct lu_env *env, __u64 size = io->u.ci_setattr.sa_attr.lvb_size; osc_trunc_check(env, io, oio, size); - if (oio->oi_trunc != NULL) { + if (oio->oi_trunc) { osc_cache_truncate_end(env, oio, cl2osc(obj)); oio->oi_trunc = NULL; } @@ -596,7 +597,8 @@ static int osc_io_fsync_start(const struct lu_env *env, * send OST_SYNC RPC. This is bad because it causes extents * to be written osc by osc. However, we usually start * writeback before CL_FSYNC_ALL so this won't have any real - * problem. */ + * problem. + */ rc = osc_cache_wait_range(env, osc, start, end); if (result == 0) result = rc; @@ -754,13 +756,12 @@ static void osc_req_attr_set(const struct lu_env *env, opg = osc_cl_page_osc(apage); apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */ lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1); - if (lock == NULL) { + if (!lock) { struct cl_object_header *head; struct cl_lock *scan; head = cl_object_header(apage->cp_obj); - list_for_each_entry(scan, &head->coh_locks, - cll_linkage) + list_for_each_entry(scan, &head->coh_locks, cll_linkage) CL_LOCK_DEBUG(D_ERROR, env, scan, "no cover page!\n"); CL_PAGE_DEBUG(D_ERROR, env, apage, @@ -770,10 +771,9 @@ static void osc_req_attr_set(const struct lu_env *env, } olck = osc_lock_at(lock); - LASSERT(olck != NULL); - LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL)); + LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock)); /* check for lockless io. */ - if (olck->ols_lock != NULL) { + if (olck->ols_lock) { oa->o_handle = olck->ols_lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; } @@ -803,8 +803,8 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev, struct osc_req *or; int result; - or = kmem_cache_alloc(osc_req_kmem, GFP_NOFS | __GFP_ZERO); - if (or != NULL) { + or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS); + if (or) { cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); result = 0; } else diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c index 71f2810d1..013df9787 100644 --- a/drivers/staging/lustre/lustre/osc/osc_lock.c +++ b/drivers/staging/lustre/lustre/osc/osc_lock.c @@ -79,7 +79,7 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) struct ldlm_lock *lock; lock = ldlm_handle2lock(handle); - if (lock != NULL) + if (lock) LDLM_LOCK_PUT(lock); return lock; } @@ -94,42 +94,40 @@ static int osc_lock_invariant(struct osc_lock *ols) int handle_used = lustre_handle_is_used(&ols->ols_handle); if (ergo(osc_lock_is_lockless(ols), - ols->ols_locklessable && ols->ols_lock == NULL)) + ols->ols_locklessable && !ols->ols_lock)) return 1; /* * If all the following "ergo"s are true, return 1, otherwise 0 */ - if (!ergo(olock != NULL, handle_used)) + if (!ergo(olock, handle_used)) return 0; - if (!ergo(olock != NULL, - olock->l_handle.h_cookie == ols->ols_handle.cookie)) + if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie)) return 0; if (!ergo(handle_used, - ergo(lock != NULL && olock != NULL, lock == olock) && - ergo(lock == NULL, olock == NULL))) + ergo(lock && olock, lock == olock) && + ergo(!lock, !olock))) return 0; /* * Check that ->ols_handle and ->ols_lock are consistent, but * take into account that they are set at the different time. */ if (!ergo(ols->ols_state == OLS_CANCELLED, - olock == NULL && !handle_used)) + !olock && !handle_used)) return 0; /* * DLM lock is destroyed only after we have seen cancellation * ast. */ - if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, - ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) + if (!ergo(olock && ols->ols_state < OLS_CANCELLED, + ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) return 0; if (!ergo(ols->ols_state == OLS_GRANTED, - olock != NULL && - olock->l_req_mode == olock->l_granted_mode && - ols->ols_hold)) + olock && olock->l_req_mode == olock->l_granted_mode && + ols->ols_hold)) return 0; return 1; } @@ -149,14 +147,15 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) spin_lock(&osc_ast_guard); dlmlock = olck->ols_lock; - if (dlmlock == NULL) { + if (!dlmlock) { spin_unlock(&osc_ast_guard); return; } olck->ols_lock = NULL; /* wb(); --- for all who checks (ols->ols_lock != NULL) before - * call to osc_lock_detach() */ + * call to osc_lock_detach() + */ dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -171,7 +170,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) /* Must get the value under the lock to avoid possible races. */ old_kms = cl2osc(obj)->oo_oinfo->loi_kms; /* Update the kms. Need to loop all granted locks. - * Not a problem for the client */ + * Not a problem for the client + */ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); cl_object_attr_set(env, obj, attr, CAT_KMS); @@ -223,8 +223,7 @@ static int osc_lock_unuse(const struct lu_env *env, /* * Move lock into OLS_RELEASED state before calling * osc_cancel_base() so that possible synchronous cancellation - * (that always happens e.g., for liblustre) sees that lock is - * released. + * sees that lock is released. */ ols->ols_state = OLS_RELEASED; return osc_lock_unhold(ols); @@ -247,7 +246,7 @@ static void osc_lock_fini(const struct lu_env *env, * lock is destroyed immediately after upcall. */ osc_lock_unhold(ols); - LASSERT(ols->ols_lock == NULL); + LASSERT(!ols->ols_lock); LASSERT(atomic_read(&ols->ols_pageref) == 0 || atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC); @@ -292,7 +291,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) lock_res_and_lock(dlm_lock); spin_lock(&osc_ast_guard); olck = dlm_lock->l_ast_data; - if (olck != NULL) { + if (olck) { struct cl_lock *lock = olck->ols_cl.cls_lock; /* * If osc_lock holds a reference on ldlm lock, return it even @@ -359,13 +358,13 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, __u64 size; dlmlock = olck->ols_lock; - LASSERT(dlmlock != NULL); /* re-grab LVB from a dlm lock under DLM spin-locks. */ *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; size = lvb->lvb_size; /* Extend KMS up to the end of this lock and no further - * A lock on [x,y] means a KMS of up to y + 1 bytes! */ + * A lock on [x,y] means a KMS of up to y + 1 bytes! + */ if (size > dlmlock->l_policy_data.l_extent.end) size = dlmlock->l_policy_data.l_extent.end + 1; if (size >= oinfo->loi_kms) { @@ -429,7 +428,8 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, * to take a semaphore on a parent lock. This is safe, because * spin-locks are needed to protect consistency of * dlmlock->l_*_mode and LVB, and we have finished processing - * them. */ + * them. + */ unlock_res_and_lock(dlmlock); cl_lock_modify(env, lock, descr); cl_lock_signal(env, lock); @@ -444,12 +444,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); - LASSERT(dlmlock != NULL); + LASSERT(dlmlock); lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); olck->ols_lock = dlmlock; spin_unlock(&osc_ast_guard); @@ -470,7 +470,8 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by - * osc_lock and released in osc_lock_detach() */ + * osc_lock and released in osc_lock_detach() + */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); olck->ols_has_ref = 1; } @@ -508,10 +509,10 @@ static int osc_lock_upcall(void *cookie, int errcode) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock(&olck->ols_handle); - if (dlmlock != NULL) { + if (dlmlock) { lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -548,7 +549,8 @@ static int osc_lock_upcall(void *cookie, int errcode) /* For AGL case, the RPC sponsor may exits the cl_lock * processing without wait() called before related OSC * lock upcall(). So update the lock status according - * to the enqueue result inside AGL upcall(). */ + * to the enqueue result inside AGL upcall(). + */ if (olck->ols_agl) { lock->cll_flags |= CLF_FROM_UPCALL; cl_wait_try(env, lock); @@ -571,7 +573,8 @@ static int osc_lock_upcall(void *cookie, int errcode) lu_ref_del(&lock->cll_reference, "upcall", lock); /* This maybe the last reference, so must be called after - * cl_lock_mutex_put(). */ + * cl_lock_mutex_put(). + */ cl_lock_put(env, lock); cl_env_nested_put(&nest, env); @@ -634,7 +637,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, cancel = 0; olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); LINVRNT(osc_lock_invariant(olck)); @@ -786,17 +789,17 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, env = cl_env_nested_get(&nest); if (!IS_ERR(env)) { olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); /* * ldlm_handle_cp_callback() copied LVB from request * to lock->l_lvb_data, store it in osc_lock. */ - LASSERT(dlmlock->l_lvb_data != NULL); + LASSERT(dlmlock->l_lvb_data); lock_res_and_lock(dlmlock); olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) { + if (!olck->ols_lock) { /* * upcall (osc_lock_upcall()) hasn't yet been * called. Do nothing now, upcall will bind @@ -850,14 +853,15 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) * environment. */ olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; /* Do not grab the mutex of cl_lock for glimpse. * See LU-1274 for details. * BTW, it's okay for cl_lock to be cancelled during * this period because server can handle this race. * See ldlm_server_glimpse_ast() for details. - * cl_lock_mutex_get(env, lock); */ + * cl_lock_mutex_get(env, lock); + */ cap = &req->rq_pill; req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, @@ -1017,7 +1021,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); /* make it enqueue anyway for glimpse lock, because we actually - * don't need to cancel any conflicting locks. */ + * don't need to cancel any conflicting locks. + */ if (olck->ols_glimpse) return 0; @@ -1051,7 +1056,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, * imagine that client has PR lock on [0, 1000], and thread T0 * is doing lockless IO in [500, 1500] region. Concurrent * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. */ + * wrong, because these data are possibly stale. + */ if (!lockless && osc_lock_compatible(olck, scan_ols)) continue; @@ -1074,7 +1080,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, } else { CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n", lock, conflict); - LASSERT(lock->cll_conflict == NULL); + LASSERT(!lock->cll_conflict); lu_ref_add(&conflict->cll_reference, "cancel-wait", lock); lock->cll_conflict = conflict; @@ -1111,7 +1117,7 @@ static int osc_lock_enqueue(const struct lu_env *env, "Impossible state: %d\n", ols->ols_state); LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), - "lock = %p, ols = %p\n", lock, ols); + "lock = %p, ols = %p\n", lock, ols); result = osc_lock_enqueue_wait(env, ols); if (result == 0) { @@ -1123,7 +1129,8 @@ static int osc_lock_enqueue(const struct lu_env *env, struct ldlm_enqueue_info *einfo = &ols->ols_einfo; /* lock will be passed as upcall cookie, - * hold ref to prevent to be released. */ + * hold ref to prevent to be released. + */ cl_lock_hold_add(env, lock, "upcall", lock); /* a user for lock also */ cl_lock_user_add(env, lock); @@ -1137,12 +1144,12 @@ static int osc_lock_enqueue(const struct lu_env *env, ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname); osc_lock_build_policy(env, lock, policy); result = osc_enqueue_base(osc_export(obj), resname, - &ols->ols_flags, policy, - &ols->ols_lvb, - obj->oo_oinfo->loi_kms_valid, - osc_lock_upcall, - ols, einfo, &ols->ols_handle, - PTLRPCD_SET, 1, ols->ols_agl); + &ols->ols_flags, policy, + &ols->ols_lvb, + obj->oo_oinfo->loi_kms_valid, + osc_lock_upcall, + ols, einfo, &ols->ols_handle, + PTLRPCD_SET, 1, ols->ols_agl); if (result != 0) { cl_lock_user_del(env, lock); cl_lock_unhold(env, lock, "upcall", lock); @@ -1174,7 +1181,8 @@ static int osc_lock_wait(const struct lu_env *env, } else if (olck->ols_agl) { if (lock->cll_flags & CLF_FROM_UPCALL) /* It is from enqueue RPC reply upcall for - * updating state. Do not re-enqueue. */ + * updating state. Do not re-enqueue. + */ return -ENAVAIL; olck->ols_state = OLS_NEW; } else { @@ -1197,7 +1205,7 @@ static int osc_lock_wait(const struct lu_env *env, } LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED && - lock->cll_error == 0, olck->ols_lock != NULL)); + lock->cll_error == 0, olck->ols_lock)); return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; } @@ -1235,7 +1243,8 @@ static int osc_lock_use(const struct lu_env *env, LASSERT(lock->cll_state == CLS_INTRANSIT); LASSERT(lock->cll_users > 0); /* set a flag for osc_dlm_blocking_ast0() to signal the - * lock.*/ + * lock. + */ olck->ols_ast_wait = 1; rc = CLO_WAIT; } @@ -1257,11 +1266,12 @@ static int osc_lock_flush(struct osc_lock *ols, int discard) if (descr->cld_mode >= CLM_WRITE) { result = osc_cache_writeback_range(env, obj, - descr->cld_start, descr->cld_end, - 1, discard); + descr->cld_start, + descr->cld_end, + 1, discard); LDLM_DEBUG(ols->ols_lock, - "lock %p: %d pages were %s.\n", lock, result, - discard ? "discarded" : "written"); + "lock %p: %d pages were %s.\n", lock, result, + discard ? "discarded" : "written"); if (result > 0) result = 0; } @@ -1306,7 +1316,7 @@ static void osc_lock_cancel(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); LINVRNT(osc_lock_invariant(olck)); - if (dlmlock != NULL) { + if (dlmlock) { int do_cancel; discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA); @@ -1318,7 +1328,8 @@ static void osc_lock_cancel(const struct lu_env *env, /* Now that we're the only user of dlm read/write reference, * mostly the ->l_readers + ->l_writers should be zero. * However, there is a corner case. - * See bug 18829 for details.*/ + * See bug 18829 for details. + */ do_cancel = (dlmlock->l_readers == 0 && dlmlock->l_writers == 0); dlmlock->l_flags |= LDLM_FL_CBPENDING; @@ -1382,7 +1393,7 @@ static void osc_lock_state(const struct lu_env *env, if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { struct osc_io *oio = osc_env_io(env); - LASSERT(lock->ols_owner == NULL); + LASSERT(!lock->ols_owner); lock->ols_owner = oio; } else if (state != CLS_HELD) lock->ols_owner = NULL; @@ -1517,7 +1528,8 @@ static void osc_lock_lockless_state(const struct lu_env *env, lock->ols_owner = oio; /* set the io to be lockless if this lock is for io's - * host object */ + * host object + */ if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) oio->oi_lockless = 1; } @@ -1555,8 +1567,8 @@ int osc_lock_init(const struct lu_env *env, struct osc_lock *clk; int result; - clk = kmem_cache_alloc(osc_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (clk != NULL) { + clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS); + if (clk) { __u32 enqflags = lock->cll_descr.cld_enq_flags; osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); @@ -1578,8 +1590,8 @@ int osc_lock_init(const struct lu_env *env, if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA)) clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; - LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n", - lock, clk, clk->ols_flags); + LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx", + lock, clk, clk->ols_flags); result = 0; } else @@ -1599,9 +1611,9 @@ int osc_dlm_lock_pageref(struct ldlm_lock *dlm) * doesn't matter because in the worst case we don't cancel a lock * which we actually can, that's no harm. */ - if (olock != NULL && + if (olock && atomic_add_return(_PAGEREF_MAGIC, - &olock->ols_pageref) != _PAGEREF_MAGIC) { + &olock->ols_pageref) != _PAGEREF_MAGIC) { atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref); rc = 1; } diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c index fdd6219aa..9d474fcdd 100644 --- a/drivers/staging/lustre/lustre/osc/osc_object.c +++ b/drivers/staging/lustre/lustre/osc/osc_object.c @@ -113,7 +113,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj) LASSERT(list_empty(&osc->oo_write_item)); LASSERT(list_empty(&osc->oo_read_item)); - LASSERT(osc->oo_root.rb_node == NULL); + LASSERT(!osc->oo_root.rb_node); LASSERT(list_empty(&osc->oo_hp_exts)); LASSERT(list_empty(&osc->oo_urgent_exts)); LASSERT(list_empty(&osc->oo_rpc_exts)); @@ -255,8 +255,8 @@ struct lu_object *osc_object_alloc(const struct lu_env *env, struct osc_object *osc; struct lu_object *obj; - osc = kmem_cache_alloc(osc_object_kmem, GFP_NOFS | __GFP_ZERO); - if (osc != NULL) { + osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS); + if (osc) { obj = osc2lu(osc); lu_object_init(obj, NULL, dev); osc->oo_cl.co_ops = &osc_ops; diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c index 2439d804f..ce9ddd515 100644 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ b/drivers/staging/lustre/lustre/osc/osc_page.c @@ -51,111 +51,12 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, * @{ */ -/* - * Comment out osc_page_protected because it may sleep inside the - * the client_obd_list_lock. - * client_obd_list_lock -> osc_ap_completion -> osc_completion -> - * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base - * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep. - */ -#if 0 -static int osc_page_is_dlocked(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int pending, int unref) -{ - struct cl_page *page; - struct osc_object *obj; - struct osc_thread_info *info; - struct ldlm_res_id *resname; - struct lustre_handle *lockh; - ldlm_policy_data_t *policy; - ldlm_mode_t dlmmode; - __u64 flags; - - might_sleep(); - - info = osc_env_info(env); - resname = &info->oti_resname; - policy = &info->oti_policy; - lockh = &info->oti_handle; - page = opg->ops_cl.cpl_page; - obj = cl2osc(opg->ops_cl.cpl_obj); - - flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED; - if (pending) - flags |= LDLM_FL_CBPENDING; - - dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW; - osc_lock_build_res(env, obj, resname); - osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index); - return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy, - dlmmode, &flags, NULL, lockh, unref); -} - -/** - * Checks an invariant that a page in the cache is covered by a lock, as - * needed. - */ -static int osc_page_protected(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int unref) -{ - struct cl_object_header *hdr; - struct cl_lock *scan; - struct cl_page *page; - struct cl_lock_descr *descr; - int result; - - LINVRNT(!opg->ops_temp); - - page = opg->ops_cl.cpl_page; - if (page->cp_owner != NULL && - cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER) - /* - * If IO is done without locks (liblustre, or lloop), lock is - * not required. - */ - result = 1; - else - /* otherwise check for a DLM lock */ - result = osc_page_is_dlocked(env, opg, mode, 1, unref); - if (result == 0) { - /* maybe this page is a part of a lockless io? */ - hdr = cl_object_header(opg->ops_cl.cpl_obj); - descr = &osc_env_info(env)->oti_descr; - descr->cld_mode = mode; - descr->cld_start = page->cp_index; - descr->cld_end = page->cp_index; - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { - /* - * Lock-less sub-lock has to be either in HELD state - * (when io is actively going on), or in CACHED state, - * when top-lock is being unlocked: - * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse(). - */ - if ((scan->cll_state == CLS_HELD || - scan->cll_state == CLS_CACHED) && - cl_lock_ext_match(&scan->cll_descr, descr)) { - struct osc_lock *olck; - - olck = osc_lock_at(scan); - result = osc_lock_is_lockless(olck); - break; - } - } - spin_unlock(&hdr->coh_lock_guard); - } - return result; -} -#else static int osc_page_protected(const struct lu_env *env, const struct osc_page *opg, enum cl_lock_mode mode, int unref) { return 1; } -#endif /***************************************************************************** * @@ -168,7 +69,7 @@ static void osc_page_fini(const struct lu_env *env, struct osc_page *opg = cl2osc_page(slice); CDEBUG(D_TRACE, "%p\n", opg); - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); } static void osc_page_transfer_get(struct osc_page *opg, const char *label) @@ -204,7 +105,8 @@ static void osc_page_transfer_add(const struct lu_env *env, struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); /* ops_lru and ops_inflight share the same field, so take it from LRU - * first and then use it as inflight. */ + * first and then use it as inflight. + */ osc_lru_del(osc_cli(obj), opg, false); spin_lock(&obj->oo_seatbelt); @@ -232,9 +134,10 @@ static int osc_page_cache_add(const struct lu_env *env, /* for sync write, kernel will wait for this page to be flushed before * osc_io_end() is called, so release it earlier. - * for mkwrite(), it's known there is no further pages. */ + * for mkwrite(), it's known there is no further pages. + */ if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) { - if (oio->oi_active != NULL) { + if (oio->oi_active) { osc_extent_release(env, oio->oi_active); oio->oi_active = NULL; } @@ -258,7 +161,7 @@ static int osc_page_addref_lock(const struct lu_env *env, struct osc_lock *olock; int rc; - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); olock = osc_lock_at(lock); if (atomic_inc_return(&olock->ols_pageref) <= 0) { @@ -278,7 +181,7 @@ static void osc_page_putref_lock(const struct lu_env *env, struct cl_lock *lock = opg->ops_lock; struct osc_lock *olock; - LASSERT(lock != NULL); + LASSERT(lock); olock = osc_lock_at(lock); atomic_dec(&olock->ols_pageref); @@ -296,7 +199,7 @@ static int osc_page_is_under_lock(const struct lu_env *env, lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page, NULL, 1, 0); - if (lock != NULL) { + if (lock) { if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0) result = -EBUSY; cl_lock_put(env, lock); @@ -424,7 +327,7 @@ static void osc_page_delete(const struct lu_env *env, } spin_lock(&obj->oo_seatbelt); - if (opg->ops_submitter != NULL) { + if (opg->ops_submitter) { LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -434,8 +337,8 @@ static void osc_page_delete(const struct lu_env *env, osc_lru_del(osc_cli(obj), opg, true); } -void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice, - int from, int to) +static void osc_page_clip(const struct lu_env *env, + const struct cl_page_slice *slice, int from, int to) { struct osc_page *opg = cl2osc_page(slice); struct osc_async_page *oap = &opg->ops_oap; @@ -458,7 +361,8 @@ static int osc_page_cancel(const struct lu_env *env, LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); /* Check if the transferring against this page - * is completed, or not even queued. */ + * is completed, or not even queued. + */ if (opg->ops_transfer_pinned) /* FIXME: may not be interrupted.. */ rc = osc_cancel_async_page(env, opg); @@ -499,30 +403,30 @@ static const struct cl_page_operations osc_page_ops = { }; int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct osc_object *osc = cl2osc(obj); struct osc_page *opg = cl_object_page_slice(obj, page); int result; opg->ops_from = 0; - opg->ops_to = PAGE_CACHE_SIZE; + opg->ops_to = PAGE_SIZE; result = osc_prep_async_page(osc, opg, vmpage, - cl_offset(obj, page->cp_index)); + cl_offset(obj, page->cp_index)); if (result == 0) { struct osc_io *oio = osc_env_io(env); opg->ops_srvlock = osc_io_srvlock(oio); - cl_page_slice_add(page, &opg->ops_cl, obj, - &osc_page_ops); + cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops); } /* * Cannot assert osc_page_protected() here as read-ahead * creates temporary pages outside of a lock. */ /* ops_inflight and ops_lru are the same field, but it doesn't - * hurt to initialize it twice :-) */ + * hurt to initialize it twice :-) + */ INIT_LIST_HEAD(&opg->ops_inflight); INIT_LIST_HEAD(&opg->ops_lru); @@ -557,7 +461,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC; if (!client_is_remote(osc_export(obj)) && - capable(CFS_CAP_SYS_RESOURCE)) { + capable(CFS_CAP_SYS_RESOURCE)) { oap->oap_brw_flags |= OBD_BRW_NOQUOTA; oap->oap_cmd |= OBD_BRW_NOQUOTA; } @@ -581,16 +485,18 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); static atomic_t osc_lru_waiters = ATOMIC_INIT(0); /* LRU pages are freed in batch mode. OSC should at least free this - * number of pages to avoid running out of LRU budget, and.. */ -static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ + * number of pages to avoid running out of LRU budget, and.. + */ +static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */ /* free this number at most otherwise it will take too long time to finish. */ -static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ +static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */ /* Check if we can free LRU slots from this OSC. If there exists LRU waiters, * we should free slots aggressively. In this way, slots are freed in a steady * step to maintain fairness among OSCs. * - * Return how many LRU pages should be freed. */ + * Return how many LRU pages should be freed. + */ static int osc_cache_too_much(struct client_obd *cli) { struct cl_client_cache *cache = cli->cl_cache; @@ -602,7 +508,8 @@ static int osc_cache_too_much(struct client_obd *cli) return min(pages, lru_shrink_max); /* if it's going to run out LRU slots, we should free some, but not - * too much to maintain fairness among OSCs. */ + * too much to maintain fairness among OSCs. + */ if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { unsigned long tmp; @@ -630,7 +537,8 @@ static int discard_pagevec(const struct lu_env *env, struct cl_io *io, /* free LRU page only if nobody is using it. * This check is necessary to avoid freeing the pages * having already been removed from LRU and pinned - * for IO. */ + * for IO. + */ if (!cl_page_in_use(page)) { cl_page_unmap(env, io, page); cl_page_discard(env, io, page); @@ -655,6 +563,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) struct cl_object *clobj = NULL; struct cl_page **pvec; struct osc_page *opg; + struct osc_page *temp; int maxscan = 0; int count = 0; int index = 0; @@ -674,28 +583,26 @@ int osc_lru_shrink(struct client_obd *cli, int target) client_obd_list_lock(&cli->cl_lru_list_lock); atomic_inc(&cli->cl_lru_shrinkers); maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list)); - while (!list_empty(&cli->cl_lru_list)) { + list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) { struct cl_page *page; if (--maxscan < 0) break; - opg = list_entry(cli->cl_lru_list.next, struct osc_page, - ops_lru); page = cl_page_top(opg->ops_cl.cpl_page); if (cl_page_in_use_noref(page)) { list_move_tail(&opg->ops_lru, &cli->cl_lru_list); continue; } - LASSERT(page->cp_obj != NULL); + LASSERT(page->cp_obj); if (clobj != page->cp_obj) { struct cl_object *tmp = page->cp_obj; cl_object_get(tmp); client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); index = 0; @@ -720,11 +627,13 @@ int osc_lru_shrink(struct client_obd *cli, int target) /* move this page to the end of list as it will be discarded * soon. The page will be finally removed from LRU list in - * osc_page_delete(). */ + * osc_page_delete(). + */ list_move_tail(&opg->ops_lru, &cli->cl_lru_list); /* it's okay to grab a refcount here w/o holding lock because - * it has to grab cl_lru_list_lock to delete the page. */ + * it has to grab cl_lru_list_lock to delete the page. + */ cl_page_get(page); pvec[index++] = page; if (++count >= target) @@ -740,7 +649,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) } client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); cl_io_fini(env, io); @@ -775,7 +684,8 @@ static void osc_lru_add(struct client_obd *cli, struct osc_page *opg) } /* delete page from LRUlist. The page can be deleted from LRUlist for two - * reasons: redirtied or deleted from page cache. */ + * reasons: redirtied or deleted from page cache. + */ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) { if (opg->ops_in_lru) { @@ -797,7 +707,8 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) * this osc occupies too many LRU pages and kernel is * stealing one of them. * cl_lru_shrinkers is to avoid recursive call in case - * we're already in the context of osc_lru_shrink(). */ + * we're already in the context of osc_lru_shrink(). + */ if (atomic_read(&cli->cl_lru_shrinkers) == 0 && !memory_pressure_get()) osc_lru_shrink(cli, osc_cache_too_much(cli)); @@ -819,22 +730,23 @@ static int osc_lru_reclaim(struct client_obd *cli) int max_scans; int rc; - LASSERT(cache != NULL); + LASSERT(cache); rc = osc_lru_shrink(cli, lru_shrink_min); if (rc != 0) { CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n", - cli->cl_import->imp_obd->obd_name, rc, cli); + cli->cl_import->imp_obd->obd_name, rc, cli); return rc; } CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); /* Reclaim LRU slots from other client_obd as it can't free enough - * from its own. This should rarely happen. */ + * from its own. This should rarely happen. + */ spin_lock(&cache->ccc_lru_lock); LASSERT(!list_empty(&cache->ccc_lru)); @@ -844,12 +756,12 @@ static int osc_lru_reclaim(struct client_obd *cli) max_scans = atomic_read(&cache->ccc_users); while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) { cli = list_entry(cache->ccc_lru.next, struct client_obd, - cl_lru_osc); + cl_lru_osc); CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); if (atomic_read(&cli->cl_lru_in_list) > 0) { @@ -864,7 +776,7 @@ static int osc_lru_reclaim(struct client_obd *cli) spin_unlock(&cache->ccc_lru_lock); CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n", - cli->cl_import->imp_obd->obd_name, cli, rc); + cli->cl_import->imp_obd->obd_name, cli, rc); return rc; } @@ -875,7 +787,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, struct client_obd *cli = osc_cli(obj); int rc = 0; - if (cli->cl_cache == NULL) /* shall not be in LRU */ + if (!cli->cl_cache) /* shall not be in LRU */ return 0; LASSERT(atomic_read(cli->cl_lru_left) >= 0); @@ -892,15 +804,16 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, cond_resched(); /* slowest case, all of caching pages are busy, notifying - * other OSCs that we're lack of LRU slots. */ + * other OSCs that we're lack of LRU slots. + */ atomic_inc(&osc_lru_waiters); gen = atomic_read(&cli->cl_lru_in_list); rc = l_wait_event(osc_lru_waitq, - atomic_read(cli->cl_lru_left) > 0 || - (atomic_read(&cli->cl_lru_in_list) > 0 && - gen != atomic_read(&cli->cl_lru_in_list)), - &lwi); + atomic_read(cli->cl_lru_left) > 0 || + (atomic_read(&cli->cl_lru_in_list) > 0 && + gen != atomic_read(&cli->cl_lru_in_list)), + &lwi); atomic_dec(&osc_lru_waiters); if (rc < 0) diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c index e70e7961d..194d8ede4 100644 --- a/drivers/staging/lustre/lustre/osc/osc_quota.c +++ b/drivers/staging/lustre/lustre/osc/osc_quota.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -35,8 +30,8 @@ static inline struct osc_quota_info *osc_oqi_alloc(u32 id) { struct osc_quota_info *oqi; - oqi = kmem_cache_alloc(osc_quota_kmem, GFP_NOFS | __GFP_ZERO); - if (oqi != NULL) + oqi = kmem_cache_zalloc(osc_quota_kmem, GFP_NOFS); + if (oqi) oqi->oqi_id = id; return oqi; @@ -52,10 +47,12 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]) oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if (oqi) { /* do not try to access oqi here, it could have been - * freed by osc_quota_setdq() */ + * freed by osc_quota_setdq() + */ /* the slot is busy, the user is about to run out of - * quota space on this OST */ + * quota space on this OST + */ CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n", type == USRQUOTA ? "user" : "grout", qid[type]); return NO_QUOTA; @@ -89,12 +86,13 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if ((flags & FL_QUOTA_FLAG(type)) != 0) { /* This ID is getting close to its quota limit, let's - * switch to sync I/O */ - if (oqi != NULL) + * switch to sync I/O + */ + if (oqi) continue; oqi = osc_oqi_alloc(qid[type]); - if (oqi == NULL) { + if (!oqi) { rc = -ENOMEM; break; } @@ -113,8 +111,9 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], qid[type], rc); } else { /* This ID is now off the hook, let's remove it from - * the hash table */ - if (oqi == NULL) + * the hash table + */ + if (!oqi) continue; oqi = cfs_hash_del_key(cli->cl_quota_hash[type], @@ -147,7 +146,7 @@ oqi_keycmp(const void *key, struct hlist_node *hnode) struct osc_quota_info *oqi; u32 uid; - LASSERT(key != NULL); + LASSERT(key); uid = *((u32 *)key); oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash); @@ -218,7 +217,7 @@ int osc_quota_setup(struct obd_device *obd) CFS_HASH_MAX_THETA, "a_hash_ops, CFS_HASH_DEFAULT); - if (cli->cl_quota_hash[type] == NULL) + if (!cli->cl_quota_hash[type]) break; } @@ -252,7 +251,7 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION, OST_QUOTACTL); - if (req == NULL) + if (!req) return -ENOMEM; oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -294,7 +293,7 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION, OST_QUOTACHECK); - if (req == NULL) + if (!req) return -ENOMEM; body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -302,8 +301,8 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, ptlrpc_request_set_replen(req); - /* the next poll will find -ENODATA, that means quotacheck is - * going on */ + /* the next poll will find -ENODATA, that means quotacheck is going on + */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index 7034f0a94..30526ebca 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -104,7 +104,6 @@ struct osc_enqueue_args { static void osc_release_ppga(struct brw_page **ppga, u32 count); static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc); -static int osc_cleanup(struct obd_device *obd); /* Pack OSC object metadata for disk storage (LE byte order). */ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, @@ -113,18 +112,18 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, int lmm_size; lmm_size = sizeof(**lmmp); - if (lmmp == NULL) + if (!lmmp) return lmm_size; - if (*lmmp != NULL && lsm == NULL) { + if (*lmmp && !lsm) { kfree(*lmmp); *lmmp = NULL; return 0; - } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) { + } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) { return -EBADF; } - if (*lmmp == NULL) { + if (!*lmmp) { *lmmp = kzalloc(lmm_size, GFP_NOFS); if (!*lmmp) return -ENOMEM; @@ -143,7 +142,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, int lsm_size; struct obd_import *imp = class_exp2cliimp(exp); - if (lmm != NULL) { + if (lmm) { if (lmm_bytes < sizeof(*lmm)) { CERROR("%s: lov_mds_md too small: %d, need %d\n", exp->exp_obd->obd_name, lmm_bytes, @@ -160,23 +159,23 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, } lsm_size = lov_stripe_md_size(1); - if (lsmp == NULL) + if (!lsmp) return lsm_size; - if (*lsmp != NULL && lmm == NULL) { + if (*lsmp && !lmm) { kfree((*lsmp)->lsm_oinfo[0]); kfree(*lsmp); *lsmp = NULL; return 0; } - if (*lsmp == NULL) { + if (!*lsmp) { *lsmp = kzalloc(lsm_size, GFP_NOFS); - if (unlikely(*lsmp == NULL)) + if (unlikely(!*lsmp)) return -ENOMEM; (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS); - if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) { + if (unlikely(!(*lsmp)->lsm_oinfo[0])) { kfree(*lsmp); return -ENOMEM; } @@ -185,11 +184,11 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, return -EBADF; } - if (lmm != NULL) + if (lmm) /* XXX zero *lsmp? */ ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi); - if (imp != NULL && + if (imp && (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES)) (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes; else @@ -246,7 +245,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -276,7 +275,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -294,7 +293,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -321,7 +320,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -339,7 +338,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -362,7 +361,7 @@ static int osc_setattr_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -384,7 +383,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -451,7 +450,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, } req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -482,7 +481,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, goto out_req; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out_req; } @@ -500,7 +499,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, lsm->lsm_oi = oa->o_oi; *ea = lsm; - if (oti != NULL) { + if (oti) { oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); if (oa->o_valid & OBD_MD_FLCOOKIE) { @@ -530,7 +529,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); @@ -573,7 +572,7 @@ static int osc_sync_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { CERROR("can't unpack ost_body\n"); rc = -EPROTO; goto out; @@ -595,7 +594,7 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC); @@ -629,10 +628,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, /* Find and cancel locally locks matched by @mode in the resource found by * @objid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. */ + * locks added to @cancels list. + */ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, struct list_head *cancels, - ldlm_mode_t mode, __u64 lock_flags) + enum ldlm_mode mode, __u64 lock_flags) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; struct ldlm_res_id res_id; @@ -644,13 +644,14 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, * * This distinguishes from a case when ELC is not supported originally, * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. */ + * locally, without sending any RPC. + */ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) return 0; ostid_build_res_name(&oa->o_oi, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); @@ -723,7 +724,8 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp, * If the client dies, or the OST is down when the object should be destroyed, * the records are not cancelled, and when the OST reconnects to the MDS next, * it will retrieve the llog unlink logs and then sends the log cancellation - * cookies to the MDS after committing destroy transactions. */ + * cookies to the MDS after committing destroy transactions. + */ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, struct obdo *oa, struct lov_stripe_md *ea, struct obd_trans_info *oti, struct obd_export *md_export) @@ -743,7 +745,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, LDLM_FL_DISCARD_DATA); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -758,7 +760,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); - if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && oa->o_valid & OBD_MD_FLCOOKIE) oa->o_lcookie = *oti->oti_logcookies; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); @@ -769,7 +771,8 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, /* If osc_destroy is for destroying the unlink orphan, * sent from MDT to OST, which should not be blocked here, * because the process might be triggered by ptlrpcd, and - * it is not good to block ptlrpcd thread (b=16006)*/ + * it is not good to block ptlrpcd thread (b=16006 + **/ if (!(oa->o_flags & OBD_FL_DELORPHAN)) { req->rq_interpret_reply = osc_destroy_interpret; if (!osc_can_send_destroy(cli)) { @@ -810,7 +813,8 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, (long)(obd_max_dirty_pages + 1))) { /* The atomic_read() allowing the atomic_inc() are * not covered by a lock thus they may safely race and trip - * this CERROR() unless we add in a small fudge factor (+1). */ + * this CERROR() unless we add in a small fudge factor (+1). + */ CERROR("dirty %d - %d > system dirty_max %d\n", atomic_read(&obd_dirty_pages), atomic_read(&obd_dirty_transit_pages), @@ -822,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, oa->o_undirty = 0; } else { long max_in_flight = (cli->cl_max_pages_per_rpc << - PAGE_CACHE_SHIFT)* + PAGE_SHIFT)* (cli->cl_max_rpcs_in_flight + 1); oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); } @@ -839,7 +843,7 @@ void osc_update_next_shrink(struct client_obd *cli) { cli->cl_next_shrink_grant = cfs_time_shift(cli->cl_grant_shrink_interval); - CDEBUG(D_CACHE, "next time %ld to shrink grant \n", + CDEBUG(D_CACHE, "next time %ld to shrink grant\n", cli->cl_next_shrink_grant); } @@ -900,15 +904,16 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) /* Shrink the current grant, either from some large amount to enough for a * full set of in-flight RPCs, or if we have already shrunk to that limit * then to enough for a single RPC. This avoids keeping more grant than - * needed, and avoids shrinking the grant piecemeal. */ + * needed, and avoids shrinking the grant piecemeal. + */ static int osc_shrink_grant(struct client_obd *cli) { __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * - (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); + (cli->cl_max_pages_per_rpc << PAGE_SHIFT); client_obd_list_lock(&cli->cl_loi_list_lock); if (cli->cl_avail_grant <= target_bytes) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; client_obd_list_unlock(&cli->cl_loi_list_lock); return osc_shrink_grant_to_target(cli, target_bytes); @@ -922,9 +927,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) client_obd_list_lock(&cli->cl_loi_list_lock); /* Don't shrink if we are already above or below the desired limit * We don't want to shrink below a single RPC, as that will negatively - * impact block allocation and long-term performance. */ - if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + * impact block allocation and long-term performance. + */ + if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT) + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; if (target_bytes >= cli->cl_avail_grant) { client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -970,8 +976,9 @@ static int osc_should_shrink_grant(struct client_obd *client) if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { /* Get the current RPC size directly, instead of going via: * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) - * Keep comment here so that it can be found by searching. */ - int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + * Keep comment here so that it can be found by searching. + */ + int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT; if (client->cl_import->imp_state == LUSTRE_IMP_FULL && client->cl_avail_grant > brw_size) @@ -986,8 +993,7 @@ static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data) { struct client_obd *client; - list_for_each_entry(client, &item->ti_obd_list, - cl_grant_shrink_list) { + list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) { if (osc_should_shrink_grant(client)) osc_shrink_grant(client); } @@ -1004,10 +1010,10 @@ static int osc_add_shrink_grant(struct client_obd *client) &client->cl_grant_shrink_list); if (rc) { CERROR("add grant client %s error %d\n", - client->cl_import->imp_obd->obd_name, rc); + client->cl_import->imp_obd->obd_name, rc); return rc; } - CDEBUG(D_CACHE, "add grant client %s \n", + CDEBUG(D_CACHE, "add grant client %s\n", client->cl_import->imp_obd->obd_name); osc_update_next_shrink(client); return 0; @@ -1040,12 +1046,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant, ocd->ocd_grant, cli->cl_dirty); /* workaround for servers which do not have the patch from - * LU-2679 */ + * LU-2679 + */ cli->cl_avail_grant = ocd->ocd_grant; } /* determine the appropriate chunk size used by osc_extent. */ - cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); + cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize); client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", @@ -1060,7 +1067,8 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) /* We assume that the reason this OSC got a short read is because it read * beyond the end of a stripe file; i.e. lustre is reading a sparse file * via the LOV, and it _knows_ it's reading inside the file, it's just that - * this stripe never got written at or beyond this stripe offset yet. */ + * this stripe never got written at or beyond this stripe offset yet. + */ static void handle_short_read(int nob_read, u32 page_count, struct brw_page **pga) { @@ -1106,7 +1114,7 @@ static int check_write_rcs(struct ptlrpc_request *req, remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, sizeof(*remote_rcs) * niocount); - if (remote_rcs == NULL) { + if (!remote_rcs) { CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n"); return -EPROTO; } @@ -1118,7 +1126,7 @@ static int check_write_rcs(struct ptlrpc_request *req, if (remote_rcs[i] != 0) { CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n", - i, remote_rcs[i], req); + i, remote_rcs[i], req); return -EPROTO; } } @@ -1139,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA); /* warn if we try to combine flags that we don't know to be - * safe to combine */ + * safe to combine + */ if (unlikely((p1->flag & mask) != (p2->flag & mask))) { CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n", p1->flag, p2->flag); @@ -1152,7 +1161,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) static u32 osc_checksum_bulk(int nob, u32 pg_count, struct brw_page **pga, int opc, - cksum_type_t cksum_type) + enum cksum_type cksum_type) { __u32 cksum; int i = 0; @@ -1174,7 +1183,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, int count = pga[i]->count > nob ? nob : pga[i]->count; /* corrupt the data before we compute the checksum, to - * simulate an OST->client data error */ + * simulate an OST->client data error + */ if (i == 0 && opc == OST_READ && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) { unsigned char *ptr = kmap(pga[i]->pg); @@ -1184,7 +1194,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, kunmap(pga[i]->pg); } cfs_crypto_hash_update_page(hdesc, pga[i]->pg, - pga[i]->off & ~CFS_PAGE_MASK, + pga[i]->off & ~CFS_PAGE_MASK, count); CDEBUG(D_PAGE, "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n", @@ -1205,7 +1215,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, cfs_crypto_hash_final(hdesc, NULL, NULL); /* For sending we only compute the wrong checksum instead - * of corrupting the data so it is still correct on a redo */ + * of corrupting the data so it is still correct on a redo + */ if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND)) cksum++; @@ -1244,7 +1255,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc = OST_READ; req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ); } - if (req == NULL) + if (!req) return -ENOMEM; for (niocount = i = 1; i < page_count; i++) { @@ -1266,7 +1277,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own - * retry logic */ + * retry logic + */ req->rq_no_retry_einprogress = 1; desc = ptlrpc_prep_bulk_imp(req, page_count, @@ -1274,7 +1286,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK, OST_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { rc = -ENOMEM; goto out; } @@ -1283,7 +1295,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, body = req_capsule_client_get(pill, &RMF_OST_BODY); ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ); niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - LASSERT(body != NULL && ioobj != NULL && niobuf != NULL); + LASSERT(body && ioobj && niobuf); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); @@ -1293,7 +1305,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, * that might be send for this request. The actual number is decided * when the RPC is finally sent in ptlrpc_register_bulk(). It sends * "max - 1" for old client compatibility sending "0", and also so the - * the actual maximum is a power-of-two number, not one less. LU-1431 */ + * the actual maximum is a power-of-two number, not one less. LU-1431 + */ ioobj_max_brw_set(ioobj, desc->bd_md_max_brw); LASSERT(page_count > 0); pg_prev = pga[0]; @@ -1304,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, LASSERT(pg->count > 0); /* make sure there is no gap in the middle of page array */ LASSERTF(page_count == 1 || - (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && + (ergo(i == 0, poff + pg->count == PAGE_SIZE) && ergo(i > 0 && i < page_count - 1, - poff == 0 && pg->count == PAGE_CACHE_SIZE) && + poff == 0 && pg->count == PAGE_SIZE) && ergo(i == page_count - 1, poff == 0)), "i: %d/%d pg: %p off: %llu, count: %u\n", i, page_count, pg, pg->off, pg->count); @@ -1355,8 +1368,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, if (cli->cl_checksum && !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { /* store cl_cksum_type in a local variable since - * it can be changed via lprocfs */ - cksum_type_t cksum_type = cli->cl_cksum_type; + * it can be changed via lprocfs + */ + enum cksum_type cksum_type = cli->cl_cksum_type; if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { oa->o_flags &= OBD_FL_LOCAL_MASK; @@ -1375,7 +1389,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, oa->o_flags |= cksum_type_pack(cksum_type); } else { /* clear out the checksum flag, in case this is a - * resend but cl_checksum is no longer set. b=11238 */ + * resend but cl_checksum is no longer set. b=11238 + */ oa->o_valid &= ~OBD_MD_FLCKSUM; } oa->o_cksum = body->oa.o_cksum; @@ -1415,11 +1430,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, __u32 client_cksum, __u32 server_cksum, int nob, u32 page_count, struct brw_page **pga, - cksum_type_t client_cksum_type) + enum cksum_type client_cksum_type) { __u32 new_cksum; char *msg; - cksum_type_t cksum_type; + enum cksum_type cksum_type; if (server_cksum == client_cksum) { CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum); @@ -1472,9 +1487,9 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) return rc; } - LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc); + LASSERTF(req->rq_repmsg, "rc = %d\n", rc); body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { DEBUG_REQ(D_INFO, req, "Can't unpack body\n"); return -EPROTO; } @@ -1538,7 +1553,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (rc != req->rq_bulk->bd_nob_transferred) { CERROR("Unexpected rc %d (%d transferred)\n", - rc, req->rq_bulk->bd_nob_transferred); + rc, req->rq_bulk->bd_nob_transferred); return -EPROTO; } @@ -1550,7 +1565,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) __u32 server_cksum = body->oa.o_cksum; char *via = ""; char *router = ""; - cksum_type_t cksum_type; + enum cksum_type cksum_type; cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ? body->oa.o_flags : 0); @@ -1627,7 +1642,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, return rc; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { - if (oap->oap_request != NULL) { + if (oap->oap_request) { LASSERTF(request == oap->oap_request, "request %p != oap_request %p\n", request, oap->oap_request); @@ -1638,12 +1653,14 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, } } /* New request takes over pga and oaps from old request. - * Note that copying a list_head doesn't work, need to move it... */ + * Note that copying a list_head doesn't work, need to move it... + */ aa->aa_resends++; new_req->rq_interpret_reply = request->rq_interpret_reply; new_req->rq_async_args = request->rq_async_args; /* cap resend delay to the current request timeout, this is similar to - * what ptlrpc does (see after_reply()) */ + * what ptlrpc does (see after_reply()) + */ if (aa->aa_resends > new_req->rq_timeout) new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout; else @@ -1669,7 +1686,8 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, /* XXX: This code will run into problem if we're going to support * to add a series of BRW RPCs into a self-defined ptlrpc_request_set * and wait for all of them to be finished. We should inherit request - * set from old request. */ + * set from old request. + */ ptlrpcd_add_req(new_req); DEBUG_REQ(D_INFO, new_req, "new request"); @@ -1709,7 +1727,7 @@ static void sort_brw_pages(struct brw_page **array, int num) static void osc_release_ppga(struct brw_page **ppga, u32 count) { - LASSERT(ppga != NULL); + LASSERT(ppga); kfree(ppga); } @@ -1725,7 +1743,8 @@ static int brw_interpret(const struct lu_env *env, rc = osc_brw_fini_request(req, rc); CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); /* When server return -EINPROGRESS, client should always retry - * regardless of the number of times the bulk was resent already. */ + * regardless of the number of times the bulk was resent already. + */ if (osc_recoverable_error(rc)) { if (req->rq_import_generation != req->rq_import->imp_generation) { @@ -1748,7 +1767,7 @@ static int brw_interpret(const struct lu_env *env, } list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) { - if (obj == NULL && rc == 0) { + if (!obj && rc == 0) { obj = osc2cl(ext->oe_obj); cl_object_get(obj); } @@ -1759,7 +1778,7 @@ static int brw_interpret(const struct lu_env *env, LASSERT(list_empty(&aa->aa_exts)); LASSERT(list_empty(&aa->aa_oaps)); - if (obj != NULL) { + if (obj) { struct obdo *oa = aa->aa_oa; struct cl_attr *attr = &osc_env_info(env)->oti_attr; unsigned long valid = 0; @@ -1798,7 +1817,8 @@ static int brw_interpret(const struct lu_env *env, client_obd_list_lock(&cli->cl_loi_list_lock); /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters * is called so we know whether to go to sync BRWs or wait for more - * RPCs to complete */ + * RPCs to complete + */ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) cli->cl_w_in_flight--; else @@ -1857,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, oap->oap_count; else LASSERT(oap->oap_page_off + oap->oap_count == - PAGE_CACHE_SIZE); + PAGE_SIZE); } } @@ -1871,13 +1891,13 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS); - if (pga == NULL) { + if (!pga) { rc = -ENOMEM; goto out; } - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) { + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) { rc = -ENOMEM; goto out; } @@ -1886,7 +1906,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_for_each_entry(oap, &rpc_list, oap_rpc_item) { struct cl_page *page = oap2cl_page(oap); - if (clerq == NULL) { + if (!clerq) { clerq = cl_req_alloc(env, page, crt, 1 /* only 1-object rpcs for now */); if (IS_ERR(clerq)) { @@ -1907,7 +1927,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } /* always get the data for the obdo for the rpc */ - LASSERT(clerq != NULL); + LASSERT(clerq); crattr->cra_oa = oa; cl_req_attr_set(env, clerq, crattr, ~0ULL); if (lock) { @@ -1923,7 +1943,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, sort_brw_pages(pga, page_count); rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, - pga, &req, 1, 0); + pga, &req, 1, 0); if (rc != 0) { CERROR("prep_req failed: %d\n", rc); goto out; @@ -1938,7 +1958,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, * we race with setattr (locally or in queue at OST). If OST gets * later setattr before earlier BRW (as determined by the request xid), * the OST will not use BRW timestamps. Sadly, there is no obvious - * way to do this in a single call. bug 10150 */ + * way to do this in a single call. bug 10150 + */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); crattr->cra_oa = &body->oa; cl_req_attr_set(env, clerq, crattr, @@ -1955,23 +1976,24 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, aa->aa_clerq = clerq; /* queued sync pages can be torn down while the pages - * were between the pending list and the rpc */ + * were between the pending list and the rpc + */ tmp = NULL; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { /* only one oap gets a request reference */ - if (tmp == NULL) + if (!tmp) tmp = oap; if (oap->oap_interrupted && !req->rq_intr) { CDEBUG(D_INODE, "oap %p in req %p interrupted\n", - oap, req); + oap, req); ptlrpc_mark_interrupted(req); } } - if (tmp != NULL) + if (tmp) tmp->oap_request = ptlrpc_request_addref(req); client_obd_list_lock(&cli->cl_loi_list_lock); - starting_offset >>= PAGE_CACHE_SHIFT; + starting_offset >>= PAGE_SHIFT; if (cmd == OBD_BRW_READ) { cli->cl_r_in_flight++; lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); @@ -2001,16 +2023,17 @@ out: kfree(crattr); if (rc != 0) { - LASSERT(req == NULL); + LASSERT(!req); if (oa) kmem_cache_free(obdo_cachep, oa); kfree(pga); /* this should happen rarely and is pretty bad, it makes the - * pending list not follow the dirty order */ + * pending list not follow the dirty order + */ while (!list_empty(ext_list)) { ext = list_entry(ext_list->next, struct osc_extent, - oe_link); + oe_link); list_del_init(&ext->oe_link); osc_extent_finish(env, ext, 0, rc); } @@ -2026,7 +2049,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, void *data = einfo->ei_cbdata; int set = 0; - LASSERT(lock != NULL); LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl); LASSERT(lock->l_resource->lr_type == einfo->ei_type); LASSERT(lock->l_completion_ast == einfo->ei_cb_cp); @@ -2035,7 +2057,7 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, lock_res_and_lock(lock); spin_lock(&osc_ast_guard); - if (lock->l_ast_data == NULL) + if (!lock->l_ast_data) lock->l_ast_data = data; if (lock->l_ast_data == data) set = 1; @@ -2052,7 +2074,7 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, struct ldlm_lock *lock = ldlm_handle2lock(lockh); int set = 0; - if (lock != NULL) { + if (lock) { set = osc_set_lock_data_with_check(lock, einfo); LDLM_LOCK_PUT(lock); } else @@ -2064,7 +2086,8 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, /* find any ldlm lock of the inode in osc * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t replace, void *data) { @@ -2095,7 +2118,6 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(rep != NULL); rep->lock_policy_res1 = ptlrpc_status_ntoh(rep->lock_policy_res1); if (rep->lock_policy_res1) @@ -2127,18 +2149,21 @@ static int osc_enqueue_interpret(const struct lu_env *env, __u64 *flags = aa->oa_flags; /* Make a local copy of a lock handle and a mode, because aa->oa_* - * might be freed anytime after lock upcall has been called. */ + * might be freed anytime after lock upcall has been called. + */ lustre_handle_copy(&handle, aa->oa_lockh); mode = aa->oa_ei->ei_mode; /* ldlm_cli_enqueue is holding a reference on the lock, so it must - * be valid. */ + * be valid. + */ lock = ldlm_handle2lock(&handle); /* Take an additional reference so that a blocking AST that * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed * to arrive after an upcall has been executed by - * osc_enqueue_fini(). */ + * osc_enqueue_fini(). + */ ldlm_lock_addref(&handle, mode); /* Let CP AST to grant the lock first. */ @@ -2170,7 +2195,7 @@ static int osc_enqueue_interpret(const struct lu_env *env, */ ldlm_lock_decref(&handle, mode); - LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n", + LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n", aa->oa_lockh, req, aa); ldlm_lock_decref(&handle, mode); LDLM_LOCK_PUT(lock); @@ -2185,7 +2210,8 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1; * others may take a considerable amount of time in a case of ost failure; and * when other sync requests do not get released lock from a client, the client * is excluded from the cluster -- such scenarious make the life difficult, so - * release locks just after they are obtained. */ + * release locks just after they are obtained. + */ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, __u64 *flags, ldlm_policy_data_t *policy, struct ost_lvb *lvb, int kms_valid, @@ -2198,11 +2224,12 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, struct ptlrpc_request *req = NULL; int intent = *flags & LDLM_FL_HAS_INTENT; __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY); - ldlm_mode_t mode; + enum ldlm_mode mode; int rc; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother. */ + * dealing with the page cache is a little smoother. + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; @@ -2226,7 +2253,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, * * At some point we should cancel the read lock instead of making them * send us a blocking callback, but there are problems with canceling - * locks out from other users right now, too. */ + * locks out from other users right now, too. + */ mode = einfo->ei_mode; if (einfo->ei_mode == LCK_PR) mode |= LCK_PW; @@ -2238,7 +2266,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) { /* For AGL, if enqueue RPC is sent but the lock is not * granted, then skip to process this strpe. - * Return -ECANCELED to tell the caller. */ + * Return -ECANCELED to tell the caller. + */ ldlm_lock_decref(lockh, mode); LDLM_LOCK_PUT(matched); return -ECANCELED; @@ -2247,19 +2276,22 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if (osc_set_lock_data_with_check(matched, einfo)) { *flags |= LDLM_FL_LVB_READY; /* addref the lock only if not async requests and PW - * lock is matched whereas we asked for PR. */ + * lock is matched whereas we asked for PR. + */ if (!rqset && einfo->ei_mode != mode) ldlm_lock_addref(lockh, LCK_PR); if (intent) { /* I would like to be able to ASSERT here that * rss <= kms, but I can't, for reasons which - * are explained in lov_enqueue() */ + * are explained in lov_enqueue() + */ } /* We already have a lock, and it's referenced. * * At this point, the cl_lock::cll_state is CLS_QUEUING, - * AGL upcall may change it to CLS_HELD directly. */ + * AGL upcall may change it to CLS_HELD directly. + */ (*upcall)(cookie, ELDLM_OK); if (einfo->ei_mode != mode) @@ -2281,7 +2313,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE_LVB); - if (req == NULL) + if (!req) return -ENOMEM; rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0); @@ -2341,27 +2373,29 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, { struct obd_device *obd = exp->exp_obd; __u64 lflags = *flags; - ldlm_mode_t rc; + enum ldlm_mode rc; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH)) return -EIO; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother */ + * dealing with the page cache is a little smoother + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; /* Next, search for already existing extent locks that will cover us */ /* If we're trying to read, we also search for an existing PW lock. The * VFS and page cache already protect us locally, so lots of readers/ - * writers can share a single PW lock. */ + * writers can share a single PW lock. + */ rc = mode; if (mode == LCK_PR) rc |= LCK_PW; rc = ldlm_lock_match(obd->obd_namespace, lflags, res_id, type, policy, rc, lockh, unref); if (rc) { - if (data != NULL) { + if (data) { if (!osc_set_data_with_check(lockh, data)) { if (!(lflags & LDLM_FL_TEST_LOCK)) ldlm_lock_decref(lockh, rc); @@ -2398,8 +2432,9 @@ static int osc_statfs_interpret(const struct lu_env *env, * due to issues at a higher level (LOV). * Exit immediately since the caller is * aware of the problem and takes care - * of the clean up */ - return rc; + * of the clean up + */ + return rc; if ((rc == -ENOTCONN || rc == -EAGAIN) && (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) { @@ -2411,7 +2446,7 @@ static int osc_statfs_interpret(const struct lu_env *env, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2436,9 +2471,10 @@ static int osc_statfs_async(struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2474,8 +2510,9 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_import *imp = NULL; int rc; - /*Since the request might also come from lprocfs, so we need - *sync this with client_disconnect_export Bug15684*/ + /* Since the request might also come from lprocfs, so we need + * sync this with client_disconnect_export Bug15684 + */ down_read(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) imp = class_import_get(obd->u.cli.cl_import); @@ -2488,12 +2525,13 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS); class_import_put(imp); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2516,7 +2554,7 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2534,7 +2572,8 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * the maximum number of OST indices which will fit in the user buffer. * lmm_magic must be LOV_MAGIC (we only use 1 slot here). */ -static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) +static int osc_getstripe(struct lov_stripe_md *lsm, + struct lov_user_md __user *lump) { /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ struct lov_user_md_v3 lum, *lumk; @@ -2545,7 +2584,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) return -ENODATA; /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) */ + * lmm_stripe_count, (the header part is common to v1 and v3) + */ lum_size = sizeof(struct lov_user_md_v1); if (copy_from_user(&lum, lump, lum_size)) return -EFAULT; @@ -2560,7 +2600,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0])); /* we can use lov_mds_md_size() to compute lum_size - * because lov_user_md_vX and lov_mds_md_vX have the same size */ + * because lov_user_md_vX and lov_mds_md_vX have the same size + */ if (lum.lmm_stripe_count > 0) { lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic); lumk = kzalloc(lum_size, GFP_NOFS); @@ -2591,14 +2632,15 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) } static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct obd_ioctl_data *data = karg; int err = 0; if (!try_module_get(THIS_MODULE)) { - CERROR("Can't get module. Is it alive?"); + CERROR("%s: cannot get module '%s'\n", obd->obd_name, + module_name(THIS_MODULE)); return -EINVAL; } switch (cmd) { @@ -2700,7 +2742,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_LAST_ID); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2721,7 +2763,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, goto out; reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto out; } @@ -2735,7 +2777,7 @@ out: struct ldlm_res_id res_id; ldlm_policy_data_t policy; struct lustre_handle lockh; - ldlm_mode_t mode = 0; + enum ldlm_mode mode = 0; struct ptlrpc_request *req; struct ll_user_fiemap *reply; char *tmp; @@ -2748,12 +2790,12 @@ out: CFS_PAGE_MASK; if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= - fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) + fm_key->fiemap.fm_start + PAGE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length + - PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; + PAGE_SIZE - 1) & CFS_PAGE_MASK; ostid_build_res_name(&fm_key->oa.o_oi, &res_id); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, @@ -2774,7 +2816,7 @@ out: skip_locking: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_FIEMAP); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto drop_lock; } @@ -2803,7 +2845,7 @@ skip_locking: goto fini_req; reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto fini_req; } @@ -2852,7 +2894,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, if (KEY_IS(KEY_CACHE_SET)) { struct client_obd *cli = &obd->u.cli; - LASSERT(cli->cl_cache == NULL); /* only once */ + LASSERT(!cli->cl_cache); /* only once */ cli->cl_cache = val; atomic_inc(&cli->cl_cache->ccc_users); cli->cl_lru_left = &cli->cl_cache->ccc_lru_left; @@ -2880,16 +2922,17 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, return -EINVAL; /* We pass all other commands directly to OST. Since nobody calls osc - methods directly and everybody is supposed to go through LOV, we - assume lov checked invalid values for us. - The only recognised values so far are evict_by_nid and mds_conn. - Even if something bad goes through, we'd get a -EINVAL from OST - anyway. */ + * methods directly and everybody is supposed to go through LOV, we + * assume lov checked invalid values for us. + * The only recognised values so far are evict_by_nid and mds_conn. + * Even if something bad goes through, we'd get a -EINVAL from OST + * anyway. + */ req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ? &RQF_OST_SET_GRANT_INFO : &RQF_OBD_SET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2916,7 +2959,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oa) { ptlrpc_req_finished(req); return -ENOMEM; @@ -2928,7 +2971,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, ptlrpc_request_set_replen(req); if (!KEY_IS(KEY_GRANT_SHRINK)) { - LASSERT(set != NULL); + LASSERT(set); ptlrpc_set_add_req(set, req); ptlrpc_check_set(NULL, set); } else { @@ -2946,7 +2989,7 @@ static int osc_reconnect(const struct lu_env *env, { struct client_obd *cli = &obd->u.cli; - if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { + if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { long lost_grant; client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2987,7 +3030,7 @@ static int osc_disconnect(struct obd_export *exp) * So the osc should be disconnected from the shrink list, after we * are sure the import has been destroyed. BUG18662 */ - if (obd->u.cli.cl_import == NULL) + if (!obd->u.cli.cl_import) osc_del_shrink_grant(&obd->u.cli); return rc; } @@ -3024,7 +3067,8 @@ static int osc_import_event(struct obd_device *obd, /* Reset grants */ cli = &obd->u.cli; /* all pages go to failing rpcs due to the invalid - * import */ + * import + */ osc_io_unplug(env, cli, NULL); ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); @@ -3206,13 +3250,13 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) return 0; } -int osc_cleanup(struct obd_device *obd) +static int osc_cleanup(struct obd_device *obd) { struct client_obd *cli = &obd->u.cli; int rc; /* lru cleanup */ - if (cli->cl_cache != NULL) { + if (cli->cl_cache) { LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0); spin_lock(&cli->cl_cache->ccc_lru_lock); list_del_init(&cli->cl_lru_osc); @@ -3255,7 +3299,7 @@ static int osc_process_config(struct obd_device *obd, u32 len, void *buf) return osc_process_config_base(obd, buf); } -struct obd_ops osc_obd_ops = { +static struct obd_ops osc_obd_ops = { .owner = THIS_MODULE, .setup = osc_setup, .precleanup = osc_precleanup, @@ -3298,7 +3342,8 @@ static int __init osc_init(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches); rc = lu_kmem_init(osc_caches); diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index efdda0950..cf3ac8eee 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -145,7 +145,7 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); desc = ptlrpc_new_bulk(npages, max_brw, type, portal); - if (desc == NULL) + if (!desc) return NULL; desc->bd_import_generation = req->rq_import_generation; @@ -171,15 +171,15 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len, int pin) { LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(page != NULL); + LASSERT(page); LASSERT(pageoffset >= 0); LASSERT(len > 0); - LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(pageoffset + len <= PAGE_SIZE); desc->bd_nob += len; if (pin) - page_cache_get(page); + get_page(page); ptlrpc_add_bulk_page(desc, page, pageoffset, len); } @@ -193,7 +193,6 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) { int i; - LASSERT(desc != NULL); LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ LASSERT(desc->bd_md_count == 0); /* network hands off */ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); @@ -207,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) if (unpin) { for (i = 0; i < desc->bd_iov_count; i++) - page_cache_release(desc->bd_iov[i].kiov_page); + put_page(desc->bd_iov[i].kiov_page); } kfree(desc); @@ -353,6 +352,7 @@ static int unpack_reply(struct ptlrpc_request *req) * If anything goes wrong just ignore it - same as if it never happened */ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) + __must_hold(&req->rq_lock) { struct ptlrpc_request *early_req; time64_t olddl; @@ -411,7 +411,7 @@ int ptlrpc_request_cache_init(void) request_cache = kmem_cache_create("ptlrpc_cache", sizeof(struct ptlrpc_request), 0, SLAB_HWCACHE_ALIGN, NULL); - return request_cache == NULL ? -ENOMEM : 0; + return !request_cache ? -ENOMEM : 0; } void ptlrpc_request_cache_fini(void) @@ -423,7 +423,7 @@ struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags) { struct ptlrpc_request *req; - req = kmem_cache_alloc(request_cache, flags | __GFP_ZERO); + req = kmem_cache_zalloc(request_cache, flags); return req; } @@ -441,8 +441,6 @@ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) struct list_head *l, *tmp; struct ptlrpc_request *req; - LASSERT(pool != NULL); - spin_lock(&pool->prp_lock); list_for_each_safe(l, tmp, &pool->prp_req_list) { req = list_entry(l, struct ptlrpc_request, rq_list); @@ -559,7 +557,7 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) } request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, - rq_list); + rq_list); list_del_init(&request->rq_list); spin_unlock(&pool->prp_lock); @@ -724,10 +722,10 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, request = ptlrpc_prep_req_from_pool(pool); if (request) { - LASSERTF((unsigned long)imp > 0x1000, "%p", imp); + LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp); LASSERT(imp != LP_POISON); - LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p", - imp->imp_client); + LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n", + imp->imp_client); LASSERT(imp->imp_client != LP_POISON); request->rq_import = class_import_get(imp); @@ -752,7 +750,7 @@ ptlrpc_request_alloc_internal(struct obd_import *imp, struct ptlrpc_request *request; request = __ptlrpc_request_alloc(imp, pool); - if (request == NULL) + if (!request) return NULL; req_capsule_init(&request->rq_pill, request, RCL_CLIENT); @@ -898,8 +896,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) RQ_PHASE_COMPLETE : RQ_PHASE_NEW; list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); LASSERT(req->rq_phase == expected_phase); n++; @@ -911,8 +908,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); list_del_init(&req->rq_set_chain); LASSERT(req->rq_phase == expected_phase); @@ -951,10 +947,10 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set, atomic_inc(&set->set_remaining); req->rq_queued_time = cfs_time_current(); - if (req->rq_reqmsg != NULL) + if (req->rq_reqmsg) lustre_msg_set_jobid(req->rq_reqmsg, NULL); - if (set->set_producer != NULL) + if (set->set_producer) /* * If the request set has a producer callback, the RPC must be * sent straight away @@ -974,7 +970,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, struct ptlrpc_request_set *set = pc->pc_set; int count, i; - LASSERT(req->rq_set == NULL); + LASSERT(!req->rq_set); LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0); spin_lock(&set->set_new_req_lock); @@ -1015,7 +1011,6 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, { int delay = 0; - LASSERT(status != NULL); *status = 0; if (req->rq_ctx_init || req->rq_ctx_fini) { @@ -1078,7 +1073,7 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req) __u32 opc; int err; - LASSERT(req->rq_reqmsg != NULL); + LASSERT(req->rq_reqmsg); opc = lustre_msg_get_opc(req->rq_reqmsg); /* @@ -1167,7 +1162,7 @@ static int after_reply(struct ptlrpc_request *req) struct timespec64 work_start; long timediff; - LASSERT(obd != NULL); + LASSERT(obd); /* repbuf must be unlinked */ LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink); @@ -1247,7 +1242,7 @@ static int after_reply(struct ptlrpc_request *req) ktime_get_real_ts64(&work_start); timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC + (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC; - if (obd->obd_svc_stats != NULL) { + if (obd->obd_svc_stats) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); ptlrpc_lprocfs_rpc_sent(req, timediff); @@ -1310,7 +1305,7 @@ static int after_reply(struct ptlrpc_request *req) /* version recovery */ ptlrpc_save_versions(req); ptlrpc_retain_replayable_request(req, imp); - } else if (req->rq_commit_cb != NULL && + } else if (req->rq_commit_cb && list_empty(&req->rq_replay_list)) { /* * NB: don't call rq_commit_cb if it's already on @@ -1334,8 +1329,8 @@ static int after_reply(struct ptlrpc_request *req) struct ptlrpc_request *last; last = list_entry(imp->imp_replay_list.prev, - struct ptlrpc_request, - rq_replay_list); + struct ptlrpc_request, + rq_replay_list); /* * Requests with rq_replay stay on the list even if no * commit is expected. @@ -1437,7 +1432,7 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) { int remaining, rc; - LASSERT(set->set_producer != NULL); + LASSERT(set->set_producer); remaining = atomic_read(&set->set_remaining); @@ -1478,8 +1473,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) INIT_LIST_HEAD(&comp_reqs); list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); struct obd_import *imp = req->rq_import; int unregistered = 0; int rc = 0; @@ -1621,8 +1615,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) */ list_del_init(&req->rq_list); list_add_tail(&req->rq_list, - &imp-> - imp_delayed_list); + &imp->imp_delayed_list); spin_unlock(&imp->imp_lock); continue; } @@ -1630,7 +1623,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (status != 0) { req->rq_status = status; ptlrpc_rqphase_move(req, - RQ_PHASE_INTERPRET); + RQ_PHASE_INTERPRET); spin_unlock(&imp->imp_lock); goto interpret; } @@ -1645,7 +1638,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) list_del_init(&req->rq_list); list_add_tail(&req->rq_list, - &imp->imp_sending_list); + &imp->imp_sending_list); spin_unlock(&imp->imp_lock); @@ -1750,7 +1743,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) * process the reply. Similarly if the RPC returned * an error, and therefore the bulk will never arrive. */ - if (req->rq_bulk == NULL || req->rq_status < 0) { + if (!req->rq_bulk || req->rq_status < 0) { ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); goto interpret; } @@ -1802,7 +1795,7 @@ interpret: } ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); - CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0, + CDEBUG(req->rq_reqmsg ? D_RPCTRACE : 0, "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n", current_comm(), imp->imp_obd->obd_uuid.uuid, lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, @@ -1882,8 +1875,8 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) "timed out for sent delay" : "timed out for slow reply"), (s64)req->rq_sent, (s64)req->rq_real_sent); - if (imp != NULL && obd_debug_peer_on_timeout) - LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer); + if (imp && obd_debug_peer_on_timeout) + LNetDebugPeer(imp->imp_connection->c_peer); ptlrpc_unregister_reply(req, async_unlink); ptlrpc_unregister_bulk(req, async_unlink); @@ -1891,7 +1884,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) if (obd_dump_on_timeout) libcfs_debug_dumplog(); - if (imp == NULL) { + if (!imp) { DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?"); return 1; } @@ -1944,13 +1937,10 @@ int ptlrpc_expired_set(void *data) struct list_head *tmp; time64_t now = ktime_get_real_seconds(); - LASSERT(set != NULL); - /* A timeout expired. See which reqs it applies to... */ list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); /* don't expire request waiting for context */ if (req->rq_wait_ctx) @@ -2002,13 +1992,11 @@ void ptlrpc_interrupted_set(void *data) struct ptlrpc_request_set *set = data; struct list_head *tmp; - LASSERT(set != NULL); CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); if (req->rq_phase != RQ_PHASE_RPC && req->rq_phase != RQ_PHASE_UNREGISTERING) @@ -2081,7 +2069,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) else list_for_each(tmp, &set->set_requests) { req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); if (req->rq_phase == RQ_PHASE_NEW) (void)ptlrpc_send_new_req(req); } @@ -2155,7 +2143,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) if (rc == 0 && atomic_read(&set->set_remaining) == 0) { list_for_each(tmp, &set->set_requests) { req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); spin_lock(&req->rq_lock); req->rq_invalid_rqset = 1; spin_unlock(&req->rq_lock); @@ -2174,7 +2162,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) rc = req->rq_status; } - if (set->set_interpret != NULL) { + if (set->set_interpret) { int (*interpreter)(struct ptlrpc_request_set *set, void *, int) = set->set_interpret; rc = interpreter(set, set->set_arg, rc); @@ -2206,10 +2194,10 @@ EXPORT_SYMBOL(ptlrpc_set_wait); */ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) { - if (request == NULL) + if (!request) return; LASSERTF(!request->rq_receiving_reply, "req %p\n", request); - LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */ + LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */ LASSERTF(list_empty(&request->rq_list), "req %p\n", request); LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request); @@ -2221,7 +2209,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) * We must take it off the imp_replay_list first. Otherwise, we'll set * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ - if (request->rq_import != NULL) { + if (request->rq_import) { if (!locked) spin_lock(&request->rq_import->imp_lock); list_del_init(&request->rq_replay_list); @@ -2236,20 +2224,20 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) LBUG(); } - if (request->rq_repbuf != NULL) + if (request->rq_repbuf) sptlrpc_cli_free_repbuf(request); - if (request->rq_export != NULL) { + if (request->rq_export) { class_export_put(request->rq_export); request->rq_export = NULL; } - if (request->rq_import != NULL) { + if (request->rq_import) { class_import_put(request->rq_import); request->rq_import = NULL; } - if (request->rq_bulk != NULL) + if (request->rq_bulk) ptlrpc_free_bulk_pin(request->rq_bulk); - if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) + if (request->rq_reqbuf || request->rq_clrbuf) sptlrpc_cli_free_reqbuf(request); if (request->rq_cli_ctx) @@ -2269,7 +2257,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) */ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked) { - if (request == NULL) + if (!request) return 1; if (request == LP_POISON || @@ -2351,7 +2339,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) * a chance to run reply_in_callback(), and to make sure we've * unlinked before returning a req to the pool. */ - if (request->rq_set != NULL) + if (request->rq_set) wq = &request->rq_set->set_waitq; else wq = &request->rq_reply_waitq; @@ -2386,7 +2374,7 @@ static void ptlrpc_free_request(struct ptlrpc_request *req) req->rq_replay = 0; spin_unlock(&req->rq_lock); - if (req->rq_commit_cb != NULL) + if (req->rq_commit_cb) req->rq_commit_cb(req); list_del_init(&req->rq_replay_list); @@ -2427,7 +2415,6 @@ void ptlrpc_free_committed(struct obd_import *imp) struct ptlrpc_request *last_req = NULL; /* temporary fire escape */ bool skip_committed_list = true; - LASSERT(imp != NULL); assert_spin_locked(&imp->imp_lock); if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked && @@ -2575,8 +2562,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, ptlrpc_request_addref(req); list_for_each_prev(tmp, &imp->imp_replay_list) { struct ptlrpc_request *iter = - list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + list_entry(tmp, struct ptlrpc_request, rq_replay_list); /* * We may have duplicate transnos if we create and then @@ -2611,12 +2597,12 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) struct ptlrpc_request_set *set; int rc; - LASSERT(req->rq_set == NULL); + LASSERT(!req->rq_set); LASSERT(!req->rq_receiving_reply); set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("Unable to allocate ptlrpc set."); + if (!set) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); return -ENOMEM; } @@ -2847,12 +2833,9 @@ void ptlrpc_abort_set(struct ptlrpc_request_set *set) { struct list_head *tmp, *pos; - LASSERT(set != NULL); - list_for_each_safe(pos, tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(pos, struct ptlrpc_request, - rq_set_chain); + list_entry(pos, struct ptlrpc_request, rq_set_chain); spin_lock(&req->rq_lock); if (req->rq_phase != RQ_PHASE_RPC) { @@ -2994,7 +2977,6 @@ static int work_interpreter(const struct lu_env *env, struct ptlrpc_work_async_args *arg = data; LASSERT(ptlrpcd_check_work(req)); - LASSERT(arg->cb != NULL); rc = arg->cb(env, arg->cbdata); @@ -3026,12 +3008,12 @@ void *ptlrpcd_alloc_work(struct obd_import *imp, might_sleep(); - if (cb == NULL) + if (!cb) return ERR_PTR(-EINVAL); /* copy some code from deprecated fakereq. */ req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (req == NULL) { + if (!req) { CERROR("ptlrpc: run out of memory!\n"); return ERR_PTR(-ENOMEM); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c index da1f0b1ac..a14daff3f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/connection.c +++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c @@ -72,7 +72,8 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self, * returned and may be compared against out object. */ /* In the function below, .hs_keycmp resolves to - * conn_keycmp() */ + * conn_keycmp() + */ /* coverity[overrun-buffer-val] */ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash); if (conn != conn2) { @@ -172,7 +173,7 @@ conn_keycmp(const void *key, struct hlist_node *hnode) struct ptlrpc_connection *conn; const lnet_process_id_t *conn_key; - LASSERT(key != NULL); + LASSERT(key); conn_key = key; conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash); diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c index 990156986..47be21ac9 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/events.c +++ b/drivers/staging/lustre/lustre/ptlrpc/events.c @@ -71,7 +71,8 @@ void request_out_callback(lnet_event_t *ev) if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { /* Failed send: make it seem like the reply timed out, just - * like failing sends in client.c does currently... */ + * like failing sends in client.c does currently... + */ req->rq_net_err = 1; ptlrpc_client_wake_req(req); @@ -95,7 +96,8 @@ void reply_in_callback(lnet_event_t *ev) LASSERT(ev->md.start == req->rq_repbuf); LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len); /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests - for adaptive timeouts' early reply. */ + * for adaptive timeouts' early reply. + */ LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0); spin_lock(&req->rq_lock); @@ -151,7 +153,8 @@ void reply_in_callback(lnet_event_t *ev) req->rq_reply_off = ev->offset; req->rq_nob_received = ev->mlength; /* LNetMDUnlink can't be called under the LNET_LOCK, - so we must unlink in ptlrpc_unregister_reply */ + * so we must unlink in ptlrpc_unregister_reply + */ DEBUG_REQ(D_INFO, req, "reply in flags=%x mlen=%u offset=%d replen=%d", lustre_msg_get_flags(req->rq_reqmsg), @@ -162,7 +165,8 @@ void reply_in_callback(lnet_event_t *ev) out_wake: /* NB don't unlock till after wakeup; req can disappear under us - * since we don't have our own ref */ + * since we don't have our own ref + */ ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } @@ -213,7 +217,8 @@ void client_bulk_callback(lnet_event_t *ev) desc->bd_failure = 1; /* NB don't unlock till after wakeup; desc can disappear under us - * otherwise */ + * otherwise + */ if (desc->bd_md_count == 0) ptlrpc_client_wake_req(desc->bd_req); @@ -250,7 +255,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, __u64 new_seq; /* set sequence ID for request and add it to history list, - * it must be called with hold svcpt::scp_lock */ + * it must be called with hold svcpt::scp_lock + */ new_seq = (sec << REQS_SEC_SHIFT) | (usec << REQS_USEC_SHIFT) | @@ -258,7 +264,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, if (new_seq > svcpt->scp_hist_seq) { /* This handles the initial case of scp_hist_seq == 0 or - * we just jumped into a new time window */ + * we just jumped into a new time window + */ svcpt->scp_hist_seq = new_seq; } else { LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT); @@ -266,7 +273,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, * however, it's possible that we used up all bits for * sequence and jumped into the next usec bucket (future time), * then we hope there will be less RPCs per bucket at some - * point, and sequence will catch up again */ + * point, and sequence will catch up again + */ svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt)); new_seq = svcpt->scp_hist_seq; } @@ -302,7 +310,8 @@ void request_in_callback(lnet_event_t *ev) * request buffer we can use the request object embedded in * rqbd. Note that if we failed to allocate a request, * we'd have to re-post the rqbd, which we can't do in this - * context. */ + * context. + */ req = &rqbd->rqbd_req; memset(req, 0, sizeof(*req)); } else { @@ -312,7 +321,7 @@ void request_in_callback(lnet_event_t *ev) return; } req = ptlrpc_request_cache_alloc(GFP_ATOMIC); - if (req == NULL) { + if (!req) { CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n", service->srv_name, libcfs_id2str(ev->initiator)); @@ -322,7 +331,8 @@ void request_in_callback(lnet_event_t *ev) /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, * flags are reset and scalars are zero. We only set the message - * size to non-zero if this was a successful receive. */ + * size to non-zero if this was a successful receive. + */ req->rq_xid = ev->match_bits; req->rq_reqbuf = ev->md.start + ev->offset; if (ev->type == LNET_EVENT_PUT && ev->status == 0) @@ -352,7 +362,8 @@ void request_in_callback(lnet_event_t *ev) svcpt->scp_nrqbds_posted); /* Normally, don't complain about 0 buffers posted; LNET won't - * drop incoming reqs since we set the portal lazy */ + * drop incoming reqs since we set the portal lazy + */ if (test_req_buffer_pressure && ev->type != LNET_EVENT_UNLINK && svcpt->scp_nrqbds_posted == 0) @@ -369,7 +380,8 @@ void request_in_callback(lnet_event_t *ev) svcpt->scp_nreqs_incoming++; /* NB everything can disappear under us once the request - * has been queued and we unlock, so do the wake now... */ + * has been queued and we unlock, so do the wake now... + */ wake_up(&svcpt->scp_waitq); spin_unlock(&svcpt->scp_lock); @@ -390,7 +402,8 @@ void reply_out_callback(lnet_event_t *ev) if (!rs->rs_difficult) { /* 'Easy' replies have no further processing so I drop the - * net's ref on 'rs' */ + * net's ref on 'rs' + */ LASSERT(ev->unlinked); ptlrpc_rs_decref(rs); return; @@ -400,7 +413,8 @@ void reply_out_callback(lnet_event_t *ev) if (ev->unlinked) { /* Last network callback. The net's ref on 'rs' stays put - * until ptlrpc_handle_rs() is done with it */ + * until ptlrpc_handle_rs() is done with it + */ spin_lock(&svcpt->scp_rep_lock); spin_lock(&rs->rs_lock); @@ -438,15 +452,12 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, __u32 best_order = 0; int count = 0; int rc = -ENOENT; - int portals_compatibility; int dist; __u32 order; lnet_nid_t dst_nid; lnet_nid_t src_nid; - portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL); - - peer->pid = LUSTRE_SRV_LNET_PID; + peer->pid = LNET_PID_LUSTRE; /* Choose the matching UUID that's closest */ while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) { @@ -466,14 +477,6 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, best_dist = dist; best_order = order; - if (portals_compatibility > 1) { - /* Strong portals compatibility: Zero the nid's - * NET, so if I'm reading new config logs, or - * getting configured by (new) lconf I can - * still talk to old servers. */ - dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid)); - src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid)); - } peer->nid = dst_nid; *self = src_nid; rc = 0; @@ -494,7 +497,8 @@ static void ptlrpc_ni_fini(void) /* Wait for the event queue to become idle since there may still be * messages in flight with pending events (i.e. the fire-and-forget * messages == client requests and "non-difficult" server - * replies */ + * replies + */ for (retries = 0;; retries++) { rc = LNetEQFree(ptlrpc_eq_h); @@ -524,7 +528,7 @@ static lnet_pid_t ptl_get_pid(void) { lnet_pid_t pid; - pid = LUSTRE_SRV_LNET_PID; + pid = LNET_PID_LUSTRE; return pid; } @@ -544,11 +548,13 @@ static int ptlrpc_ni_init(void) } /* CAVEAT EMPTOR: how we process portals events is _radically_ - * different depending on... */ + * different depending on... + */ /* kernel LNet calls our master callback when there are new event, * because we are guaranteed to get every event via callback, * so we just set EQ size to 0 to avoid overhead of serializing - * enqueue/dequeue operations in LNet. */ + * enqueue/dequeue operations in LNet. + */ rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h); if (rc == 0) return 0; diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c index f752c789b..cd94fed0f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/import.c +++ b/drivers/staging/lustre/lustre/ptlrpc/import.c @@ -112,7 +112,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp); * CLOSED. I would rather refcount the import and free it after * disconnection like we do with exports. To do that, the client_obd * will need to save the peer info somewhere other than in the import, - * though. */ + * though. + */ int ptlrpc_init_import(struct obd_import *imp) { spin_lock(&imp->imp_lock); @@ -139,7 +140,7 @@ static void deuuidify(char *uuid, const char *prefix, char **uuid_start, return; if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR), - UUID_STR, strlen(UUID_STR))) + UUID_STR, strlen(UUID_STR))) *uuid_len -= strlen(UUID_STR); } @@ -282,11 +283,13 @@ void ptlrpc_invalidate_import(struct obd_import *imp) /* Wait forever until inflight == 0. We really can't do it another * way because in some cases we need to wait for very long reply * unlink. We can't do anything before that because there is really - * no guarantee that some rdma transfer is not in progress right now. */ + * no guarantee that some rdma transfer is not in progress right now. + */ do { /* Calculate max timeout for waiting on rpcs to error * out. Use obd_timeout if calculated value is smaller - * than it. */ + * than it. + */ if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { timeout = ptlrpc_inflight_timeout(imp); timeout += timeout / 3; @@ -304,7 +307,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp) /* Wait for all requests to error out and call completion * callbacks. Cap it at obd_timeout -- these should all - * have been locally cancelled by ptlrpc_abort_inflight. */ + * have been locally cancelled by ptlrpc_abort_inflight. + */ lwi = LWI_TIMEOUT_INTERVAL( cfs_timeout_cap(cfs_time_seconds(timeout)), (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2, @@ -328,28 +332,30 @@ void ptlrpc_invalidate_import(struct obd_import *imp) * maybe waiting for long reply unlink in * sluggish nets). Let's check this. If there * is no inflight and unregistering != 0, this - * is bug. */ + * is bug. + */ LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n", count); /* Let's save one loop as soon as inflight have * dropped to zero. No new inflights possible at - * this point. */ + * this point. + */ rc = 0; } else { list_for_each_safe(tmp, n, - &imp->imp_sending_list) { + &imp->imp_sending_list) { req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on sending list"); } list_for_each_safe(tmp, n, - &imp->imp_delayed_list) { + &imp->imp_delayed_list) { req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on delayed list"); } @@ -427,7 +433,6 @@ EXPORT_SYMBOL(ptlrpc_fail_import); int ptlrpc_reconnect_import(struct obd_import *imp) { -#ifdef ENABLE_PINGER struct l_wait_info lwi; int secs = cfs_time_seconds(obd_timeout); int rc; @@ -443,33 +448,6 @@ int ptlrpc_reconnect_import(struct obd_import *imp) CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd), ptlrpc_import_state_name(imp->imp_state)); return rc; -#else - ptlrpc_set_import_discon(imp, 0); - /* Force a new connect attempt */ - ptlrpc_invalidate_import(imp); - /* Do a fresh connect next time by zeroing the handle */ - ptlrpc_disconnect_import(imp, 1); - /* Wait for all invalidate calls to finish */ - if (atomic_read(&imp->imp_inval_count) > 0) { - int rc; - struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); - - rc = l_wait_event(imp->imp_recovery_waitq, - (atomic_read(&imp->imp_inval_count) == 0), - &lwi); - if (rc) - CERROR("Interrupted, inval=%d\n", - atomic_read(&imp->imp_inval_count)); - } - - /* Allow reconnect attempts */ - imp->imp_obd->obd_no_recov = 0; - /* Remove 'invalid' flag */ - ptlrpc_activate_import(imp); - /* Attempt a new connect */ - ptlrpc_recover_import(imp, NULL, 0); - return 0; -#endif } EXPORT_SYMBOL(ptlrpc_reconnect_import); @@ -501,18 +479,20 @@ static int import_select_connection(struct obd_import *imp) conn->oic_last_attempt); /* If we have not tried this connection since - the last successful attempt, go with this one */ + * the last successful attempt, go with this one + */ if ((conn->oic_last_attempt == 0) || cfs_time_beforeq_64(conn->oic_last_attempt, - imp->imp_last_success_conn)) { + imp->imp_last_success_conn)) { imp_conn = conn; tried_all = 0; break; } /* If all of the connections have already been tried - since the last successful connection; just choose the - least recently used */ + * since the last successful connection; just choose the + * least recently used + */ if (!imp_conn) imp_conn = conn; else if (cfs_time_before_64(conn->oic_last_attempt, @@ -529,10 +509,11 @@ static int import_select_connection(struct obd_import *imp) LASSERT(imp_conn->oic_conn); /* If we've tried everything, and we're back to the beginning of the - list, increase our timeout and try again. It will be reset when - we do finally connect. (FIXME: really we should wait for all network - state associated with the last connection attempt to drain before - trying to reconnect on it.) */ + * list, increase our timeout and try again. It will be reset when + * we do finally connect. (FIXME: really we should wait for all network + * state associated with the last connection attempt to drain before + * trying to reconnect on it.) + */ if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) { struct adaptive_timeout *at = &imp->imp_at.iat_net_latency; @@ -553,7 +534,6 @@ static int import_select_connection(struct obd_import *imp) imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); dlmexp = class_conn2export(&imp->imp_dlm_handle); - LASSERT(dlmexp != NULL); ptlrpc_connection_put(dlmexp->exp_connection); dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); class_export_put(dlmexp); @@ -590,7 +570,8 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) struct list_head *tmp; /* The requests in committed_list always have smaller transnos than - * the requests in replay_list */ + * the requests in replay_list + */ if (!list_empty(&imp->imp_committed_list)) { tmp = imp->imp_committed_list.next; req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); @@ -674,7 +655,8 @@ int ptlrpc_connect_import(struct obd_import *imp) goto out; /* Reset connect flags to the originally requested flags, in case - * the server is updated on-the-fly we will get the new features. */ + * the server is updated on-the-fly we will get the new features. + */ imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig; /* Reset ocd_version each time so the server knows the exact versions */ imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE; @@ -687,7 +669,7 @@ int ptlrpc_connect_import(struct obd_import *imp) goto out; request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT); - if (request == NULL) { + if (!request) { rc = -ENOMEM; goto out; } @@ -700,7 +682,8 @@ int ptlrpc_connect_import(struct obd_import *imp) } /* Report the rpc service time to the server so that it knows how long - * to wait for clients to join recovery */ + * to wait for clients to join recovery + */ lustre_msg_set_service_time(request->rq_reqmsg, at_timeout2est(request->rq_timeout)); @@ -708,7 +691,8 @@ int ptlrpc_connect_import(struct obd_import *imp) * import_select_connection will increase the net latency on * repeated reconnect attempts to cover slow networks. * We override/ignore the server rpc completion estimate here, - * which may be large if this is a reconnect attempt */ + * which may be large if this is a reconnect attempt + */ request->rq_timeout = INITIAL_CONNECT_TIMEOUT; lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); @@ -799,7 +783,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (rc) { /* if this reconnect to busy export - not need select new target - * for connecting*/ + * for connecting + */ imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc); spin_unlock(&imp->imp_lock); ptlrpc_maybe_ping_import_soon(imp); @@ -817,7 +802,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, ocd = req_capsule_server_sized_get(&request->rq_pill, &RMF_CONNECT_DATA, ret); - if (ocd == NULL) { + if (!ocd) { CERROR("%s: no connect data from server\n", imp->imp_obd->obd_name); rc = -EPROTO; @@ -851,7 +836,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (!exp) { /* This could happen if export is cleaned during the - connect attempt */ + * connect attempt + */ CERROR("%s: missing export after connect\n", imp->imp_obd->obd_name); rc = -ENODEV; @@ -877,14 +863,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, } /* if applies, adjust the imp->imp_msg_magic here - * according to reply flags */ + * according to reply flags + */ imp->imp_remote_handle = *lustre_msg_get_handle(request->rq_repmsg); /* Initial connects are allowed for clients with non-random * uuids when servers are in recovery. Simply signal the - * servers replay is complete and wait in REPLAY_WAIT. */ + * servers replay is complete and wait in REPLAY_WAIT. + */ if (msg_flags & MSG_CONNECT_RECOVERING) { CDEBUG(D_HA, "connect to %s during recovery\n", obd2cli_tgt(imp->imp_obd)); @@ -923,7 +911,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, * already erased all of our state because of previous * eviction. If it is in recovery - we are safe to * participate since we can reestablish all of our state - * with server again */ + * with server again + */ if ((msg_flags & MSG_CONNECT_RECOVERING)) { CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n", obd2cli_tgt(imp->imp_obd), @@ -1015,8 +1004,7 @@ finish: spin_lock(&imp->imp_lock); list_del(&imp->imp_conn_current->oic_item); - list_add(&imp->imp_conn_current->oic_item, - &imp->imp_conn_list); + list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list); imp->imp_last_success_conn = imp->imp_conn_current->oic_last_attempt; @@ -1039,7 +1027,8 @@ finish: ocd->ocd_version < LUSTRE_VERSION_CODE - LUSTRE_VERSION_OFFSET_WARN)) { /* Sigh, some compilers do not like #ifdef in the middle - of macro arguments */ + * of macro arguments + */ const char *older = "older. Consider upgrading server or downgrading client" ; const char *newer = "newer than client version. Consider upgrading client" @@ -1061,7 +1050,8 @@ finish: * fixup is version-limited, because we don't want to carry the * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we * need interop with unpatched 2.2 servers. For newer servers, - * the client will do MNE swabbing only as needed. LU-1644 */ + * the client will do MNE swabbing only as needed. LU-1644 + */ if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) && OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 && @@ -1079,7 +1069,8 @@ finish: if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) { /* We sent to the server ocd_cksum_types with bits set * for algorithms we understand. The server masked off - * the checksum types it doesn't support */ + * the checksum types it doesn't support + */ if ((ocd->ocd_cksum_types & cksum_types_supported_client()) == 0) { LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n", @@ -1093,14 +1084,15 @@ finish: } } else { /* The server does not support OBD_CONNECT_CKSUM. - * Enforce ADLER for backward compatibility*/ + * Enforce ADLER for backward compatibility + */ cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; } cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types); if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) cli->cl_max_pages_per_rpc = - min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, + min(ocd->ocd_brw_size >> PAGE_SHIFT, cli->cl_max_pages_per_rpc); else if (imp->imp_connect_op == MDS_CONNECT || imp->imp_connect_op == MGS_CONNECT) @@ -1109,7 +1101,8 @@ finish: /* Reset ns_connect_flags only for initial connect. It might be * changed in while using FS and if we reset it in reconnect * this leads to losing user settings done before such as - * disable lru_resize, etc. */ + * disable lru_resize, etc. + */ if (old_connect_flags != exp_connect_flags(exp) || aa->pcaa_initial_connect) { CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n", @@ -1123,13 +1116,14 @@ finish: if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) && (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) /* We need a per-message support flag, because - a. we don't know if the incoming connect reply - supports AT or not (in reply_in_callback) - until we unpack it. - b. failovered server means export and flags are gone - (in ptlrpc_send_reply). - Can only be set when we know AT is supported at - both ends */ + * a. we don't know if the incoming connect reply + * supports AT or not (in reply_in_callback) + * until we unpack it. + * b. failovered server means export and flags are gone + * (in ptlrpc_send_reply). + * Can only be set when we know AT is supported at + * both ends + */ imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT; else imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; @@ -1162,7 +1156,7 @@ out: struct obd_connect_data *ocd; /* reply message might not be ready */ - if (request->rq_repmsg == NULL) + if (!request->rq_repmsg) return -EPROTO; ocd = req_capsule_server_get(&request->rq_pill, @@ -1243,7 +1237,7 @@ static int signal_completed_replay(struct obd_import *imp) req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION, OBD_PING); - if (req == NULL) { + if (!req) { atomic_dec(&imp->imp_replay_inflight); return -ENOMEM; } @@ -1337,12 +1331,13 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) { struct task_struct *task; /* bug 17802: XXX client_disconnect_export vs connect request - * race. if client will evicted at this time, we start + * race. if client is evicted at this time, we start * invalidate thread without reference to import and import can - * be freed at same time. */ + * be freed at same time. + */ class_import_get(imp); task = kthread_run(ptlrpc_invalidate_import_thread, imp, - "ll_imp_inval"); + "ll_imp_inval"); if (IS_ERR(task)) { class_import_put(imp); CERROR("error starting invalidate thread: %d\n", rc); @@ -1471,11 +1466,13 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) if (req) { /* We are disconnecting, do not retry a failed DISCONNECT rpc if * it fails. We can get through the above with a down server - * if the client doesn't know the server is gone yet. */ + * if the client doesn't know the server is gone yet. + */ req->rq_no_resend = 1; /* We want client umounts to happen quickly, no matter the - server state... */ + * server state... + */ req->rq_timeout = min_t(int, req->rq_timeout, INITIAL_CONNECT_TIMEOUT); @@ -1507,9 +1504,10 @@ EXPORT_SYMBOL(ptlrpc_disconnect_import); extern unsigned int at_min, at_max, at_history; /* Bin into timeslices using AT_BINS bins. - This gives us a max of the last binlimit*AT_BINS secs without the storage, - but still smoothing out a return to normalcy from a slow response. - (E.g. remember the maximum latency in each minute of the last 4 minutes.) */ + * This gives us a max of the last binlimit*AT_BINS secs without the storage, + * but still smoothing out a return to normalcy from a slow response. + * (E.g. remember the maximum latency in each minute of the last 4 minutes.) + */ int at_measured(struct adaptive_timeout *at, unsigned int val) { unsigned int old = at->at_current; @@ -1523,7 +1521,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) if (val == 0) /* 0's don't count, because we never want our timeout to - drop to 0, and because 0 could mean an error */ + * drop to 0, and because 0 could mean an error + */ return 0; spin_lock(&at->at_lock); @@ -1565,7 +1564,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) if (at->at_flags & AT_FLG_NOHIST) /* Only keep last reported val; keeping the rest of the history - for proc only */ + * for debugfs only + */ at->at_current = val; if (at_max > 0) diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c index c0e613c23..5b06901e5 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/layout.c +++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c @@ -118,25 +118,6 @@ static const struct req_msg_field *quotactl_only[] = { &RMF_OBD_QUOTACTL }; -static const struct req_msg_field *quota_body_only[] = { - &RMF_PTLRPC_BODY, - &RMF_QUOTA_BODY -}; - -static const struct req_msg_field *ldlm_intent_quota_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_QUOTA_BODY -}; - -static const struct req_msg_field *ldlm_intent_quota_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_DLM_LVB, - &RMF_QUOTA_BODY -}; - static const struct req_msg_field *mdt_close_client[] = { &RMF_PTLRPC_BODY, &RMF_MDT_EPOCH, @@ -514,16 +495,6 @@ static const struct req_msg_field *mds_setattr_server[] = { &RMF_CAPA2 }; -static const struct req_msg_field *mds_update_client[] = { - &RMF_PTLRPC_BODY, - &RMF_UPDATE, -}; - -static const struct req_msg_field *mds_update_server[] = { - &RMF_PTLRPC_BODY, - &RMF_UPDATE_REPLY, -}; - static const struct req_msg_field *llog_origin_handle_create_client[] = { &RMF_PTLRPC_BODY, &RMF_LLOGD_BODY, @@ -551,16 +522,6 @@ static const struct req_msg_field *llog_origin_handle_next_block_server[] = { &RMF_EADATA }; -static const struct req_msg_field *obd_idx_read_client[] = { - &RMF_PTLRPC_BODY, - &RMF_IDX_INFO -}; - -static const struct req_msg_field *obd_idx_read_server[] = { - &RMF_PTLRPC_BODY, - &RMF_IDX_INFO -}; - static const struct req_msg_field *ost_body_only[] = { &RMF_PTLRPC_BODY, &RMF_OST_BODY @@ -676,7 +637,6 @@ static const struct req_msg_field *mdt_hsm_request[] = { static struct req_format *req_formats[] = { &RQF_OBD_PING, &RQF_OBD_SET_INFO, - &RQF_OBD_IDX_READ, &RQF_SEC_CTX, &RQF_MGS_TARGET_REG, &RQF_MGS_SET_INFO, @@ -721,7 +681,6 @@ static struct req_format *req_formats[] = { &RQF_MDS_HSM_ACTION, &RQF_MDS_HSM_REQUEST, &RQF_MDS_SWAP_LAYOUTS, - &RQF_UPDATE_OBJ, &RQF_QC_CALLBACK, &RQF_OST_CONNECT, &RQF_OST_DISCONNECT, @@ -759,8 +718,6 @@ static struct req_format *req_formats[] = { &RQF_LDLM_INTENT_CREATE, &RQF_LDLM_INTENT_UNLINK, &RQF_LDLM_INTENT_GETXATTR, - &RQF_LDLM_INTENT_QUOTA, - &RQF_QUOTA_DQACQ, &RQF_LOG_CANCEL, &RQF_LLOG_ORIGIN_HANDLE_CREATE, &RQF_LLOG_ORIGIN_HANDLE_DESTROY, @@ -899,11 +856,6 @@ struct req_msg_field RMF_OBD_QUOTACTL = lustre_swab_obd_quotactl, NULL); EXPORT_SYMBOL(RMF_OBD_QUOTACTL); -struct req_msg_field RMF_QUOTA_BODY = - DEFINE_MSGF("quota_body", 0, - sizeof(struct quota_body), lustre_swab_quota_body, NULL); -EXPORT_SYMBOL(RMF_QUOTA_BODY); - struct req_msg_field RMF_MDT_EPOCH = DEFINE_MSGF("mdt_ioepoch", 0, sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL); @@ -938,12 +890,12 @@ EXPORT_SYMBOL(RMF_SYMTGT); struct req_msg_field RMF_TGTUUID = DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); + NULL); EXPORT_SYMBOL(RMF_TGTUUID); struct req_msg_field RMF_CLUUID = DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); + NULL); EXPORT_SYMBOL(RMF_CLUUID); struct req_msg_field RMF_STRING = @@ -1078,7 +1030,7 @@ EXPORT_SYMBOL(RMF_RCS); struct req_msg_field RMF_EAVALS_LENS = DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32), - lustre_swab_generic_32s, NULL); + lustre_swab_generic_32s, NULL); EXPORT_SYMBOL(RMF_EAVALS_LENS); struct req_msg_field RMF_OBD_ID = @@ -1105,10 +1057,6 @@ struct req_msg_field RMF_FIEMAP_VAL = DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL); EXPORT_SYMBOL(RMF_FIEMAP_VAL); -struct req_msg_field RMF_IDX_INFO = - DEFINE_MSGF("idx_info", 0, sizeof(struct idx_info), - lustre_swab_idx_info, NULL); -EXPORT_SYMBOL(RMF_IDX_INFO); struct req_msg_field RMF_HSM_USER_STATE = DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state), lustre_swab_hsm_user_state, NULL); @@ -1145,15 +1093,6 @@ struct req_msg_field RMF_MDS_HSM_REQUEST = lustre_swab_hsm_request, NULL); EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST); -struct req_msg_field RMF_UPDATE = DEFINE_MSGF("update", 0, -1, - lustre_swab_update_buf, NULL); -EXPORT_SYMBOL(RMF_UPDATE); - -struct req_msg_field RMF_UPDATE_REPLY = DEFINE_MSGF("update_reply", 0, -1, - lustre_swab_update_reply_buf, - NULL); -EXPORT_SYMBOL(RMF_UPDATE_REPLY); - struct req_msg_field RMF_SWAP_LAYOUTS = DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts), lustre_swab_swap_layouts, NULL); @@ -1196,29 +1135,23 @@ struct req_format RQF_OBD_SET_INFO = DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty); EXPORT_SYMBOL(RQF_OBD_SET_INFO); -/* Read index file through the network */ -struct req_format RQF_OBD_IDX_READ = - DEFINE_REQ_FMT0("OBD_IDX_READ", - obd_idx_read_client, obd_idx_read_server); -EXPORT_SYMBOL(RQF_OBD_IDX_READ); - struct req_format RQF_SEC_CTX = DEFINE_REQ_FMT0("SEC_CTX", empty, empty); EXPORT_SYMBOL(RQF_SEC_CTX); struct req_format RQF_MGS_TARGET_REG = DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only, - mgs_target_info_only); + mgs_target_info_only); EXPORT_SYMBOL(RQF_MGS_TARGET_REG); struct req_format RQF_MGS_SET_INFO = DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info, - mgs_set_info); + mgs_set_info); EXPORT_SYMBOL(RQF_MGS_SET_INFO); struct req_format RQF_MGS_CONFIG_READ = DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client, - mgs_config_read_server); + mgs_config_read_server); EXPORT_SYMBOL(RQF_MGS_CONFIG_READ); struct req_format RQF_SEQ_QUERY = @@ -1253,16 +1186,6 @@ struct req_format RQF_QC_CALLBACK = DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty); EXPORT_SYMBOL(RQF_QC_CALLBACK); -struct req_format RQF_QUOTA_DQACQ = - DEFINE_REQ_FMT0("QUOTA_DQACQ", quota_body_only, quota_body_only); -EXPORT_SYMBOL(RQF_QUOTA_DQACQ); - -struct req_format RQF_LDLM_INTENT_QUOTA = - DEFINE_REQ_FMT0("LDLM_INTENT_QUOTA", - ldlm_intent_quota_client, - ldlm_intent_quota_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_QUOTA); - struct req_format RQF_MDS_GETSTATUS = DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa); EXPORT_SYMBOL(RQF_MDS_GETSTATUS); @@ -1357,11 +1280,6 @@ struct req_format RQF_MDS_GET_INFO = mds_getinfo_server); EXPORT_SYMBOL(RQF_MDS_GET_INFO); -struct req_format RQF_UPDATE_OBJ = - DEFINE_REQ_FMT0("OBJECT_UPDATE_OBJ", mds_update_client, - mds_update_server); -EXPORT_SYMBOL(RQF_UPDATE_OBJ); - struct req_format RQF_LDLM_ENQUEUE = DEFINE_REQ_FMT0("LDLM_ENQUEUE", ldlm_enqueue_client, ldlm_enqueue_lvb_server); @@ -1598,32 +1516,32 @@ EXPORT_SYMBOL(RQF_OST_STATFS); struct req_format RQF_OST_SET_GRANT_INFO = DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client, - ost_body_only); + ost_body_only); EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO); struct req_format RQF_OST_GET_INFO_GENERIC = DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client, - ost_get_info_generic_server); + ost_get_info_generic_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC); struct req_format RQF_OST_GET_INFO_LAST_ID = DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client, - ost_get_last_id_server); + ost_get_last_id_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID); struct req_format RQF_OST_GET_INFO_LAST_FID = DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client, - ost_get_last_fid_server); + ost_get_last_fid_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID); struct req_format RQF_OST_SET_INFO_LAST_FID = DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client, - empty); + empty); EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID); struct req_format RQF_OST_GET_INFO_FIEMAP = DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client, - ost_get_fiemap_server); + ost_get_fiemap_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP); #if !defined(__REQ_LAYOUT_USER__) @@ -1712,7 +1630,7 @@ void req_capsule_init(struct req_capsule *pill, * high-priority RPC queue getting peeked at before ost_handle() * handles an OST RPC. */ - if (req != NULL && pill == &req->rq_pill && req->rq_pill_init) + if (req && pill == &req->rq_pill && req->rq_pill_init) return; memset(pill, 0, sizeof(*pill)); @@ -1720,7 +1638,7 @@ void req_capsule_init(struct req_capsule *pill, pill->rc_loc = location; req_capsule_init_area(pill); - if (req != NULL && pill == &req->rq_pill) + if (req && pill == &req->rq_pill) req->rq_pill_init = 1; } EXPORT_SYMBOL(req_capsule_init); @@ -1752,7 +1670,7 @@ static struct lustre_msg *__req_msg(const struct req_capsule *pill, */ void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt) { - LASSERT(pill->rc_fmt == NULL || pill->rc_fmt == fmt); + LASSERT(!pill->rc_fmt || pill->rc_fmt == fmt); LASSERT(__req_format_is_sane(fmt)); pill->rc_fmt = fmt; @@ -1773,8 +1691,6 @@ int req_capsule_filled_sizes(struct req_capsule *pill, const struct req_format *fmt = pill->rc_fmt; int i; - LASSERT(fmt != NULL); - for (i = 0; i < fmt->rf_fields[loc].nr; ++i) { if (pill->rc_area[loc][i] == -1) { pill->rc_area[loc][i] = @@ -1810,15 +1726,15 @@ int req_capsule_server_pack(struct req_capsule *pill) LASSERT(pill->rc_loc == RCL_SERVER); fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); count = req_capsule_filled_sizes(pill, RCL_SERVER); rc = lustre_pack_reply(pill->rc_req, count, pill->rc_area[RCL_SERVER], NULL); if (rc != 0) { DEBUG_REQ(D_ERROR, pill->rc_req, - "Cannot pack %d fields in format `%s': ", - count, fmt->rf_name); + "Cannot pack %d fields in format `%s': ", + count, fmt->rf_name); } return rc; } @@ -1835,9 +1751,8 @@ static int __req_capsule_offset(const struct req_capsule *pill, int offset; offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc]; - LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", - pill->rc_fmt->rf_name, - field->rmf_name, offset, loc); + LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name, + field->rmf_name, offset, loc); offset--; LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR); @@ -1865,7 +1780,7 @@ swabber_dumper_helper(struct req_capsule *pill, swabber = swabber ?: field->rmf_swabber; if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) && - swabber != NULL && value != NULL) + swabber && value) do_swab = 1; else do_swab = 0; @@ -1883,7 +1798,7 @@ swabber_dumper_helper(struct req_capsule *pill, return; swabber(value); ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset); - if (dump) { + if (dump && field->rmf_dumper) { CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n", field->rmf_name); field->rmf_dumper(value); @@ -1947,17 +1862,15 @@ static void *__req_capsule_get(struct req_capsule *pill, [RCL_SERVER] = "server" }; - LASSERT(pill != NULL); - LASSERT(pill != LP_POISON); fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); LASSERT(fmt != LP_POISON); LASSERT(__req_format_is_sane(fmt)); offset = __req_capsule_offset(pill, field, loc); msg = __req_msg(pill, loc); - LASSERT(msg != NULL); + LASSERT(msg); getter = (field->rmf_flags & RMF_F_STRING) ? (typeof(getter))lustre_msg_string : lustre_msg_buf; @@ -1980,7 +1893,7 @@ static void *__req_capsule_get(struct req_capsule *pill, } value = getter(msg, offset, len); - if (value == NULL) { + if (!value) { DEBUG_REQ(D_ERROR, pill->rc_req, "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n", field->rmf_name, offset, lustre_msg_bufcount(msg), @@ -2209,7 +2122,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt) const struct req_format *old; - LASSERT(pill->rc_fmt != NULL); + LASSERT(pill->rc_fmt); LASSERT(__req_format_is_sane(fmt)); old = pill->rc_fmt; @@ -2222,7 +2135,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt) const struct req_msg_field *ofield = FMT_FIELD(old, i, j); /* "opaque" fields can be transmogrified */ - if (ofield->rmf_swabber == NULL && + if (!ofield->rmf_swabber && (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 && (ofield->rmf_size == -1 || ofield->rmf_flags == RMF_F_NO_SIZE_CHECK)) @@ -2289,7 +2202,7 @@ void req_capsule_shrink(struct req_capsule *pill, int offset; fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); LASSERT(__req_format_is_sane(fmt)); LASSERT(req_capsule_has_field(pill, field, loc)); LASSERT(req_capsule_field_present(pill, field, loc)); @@ -2299,7 +2212,7 @@ void req_capsule_shrink(struct req_capsule *pill, msg = __req_msg(pill, loc); len = lustre_msg_buflen(msg, offset); LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n", - fmt->rf_name, field->rmf_name, len, newlen); + fmt->rf_name, field->rmf_name, len, newlen); if (loc == RCL_CLIENT) pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen, diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c index e87702073..a23ac5f9a 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c @@ -75,7 +75,8 @@ } while (0) /* This is a callback from the llog_* functions. - * Assumes caller has already pushed us into the kernel context. */ + * Assumes caller has already pushed us into the kernel context. + */ static int llog_client_open(const struct lu_env *env, struct llog_handle *lgh, struct llog_logid *logid, char *name, enum llog_open_param open_param) @@ -93,7 +94,7 @@ static int llog_client_open(const struct lu_env *env, LASSERT(lgh); req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -130,7 +131,7 @@ static int llog_client_open(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } @@ -158,7 +159,7 @@ static int llog_client_next_block(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_NEXT_BLOCK); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -179,14 +180,14 @@ static int llog_client_next_block(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } /* The log records are swabbed as they are processed */ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (ptr == NULL) { + if (!ptr) { rc = -EFAULT; goto out; } @@ -216,7 +217,7 @@ static int llog_client_prev_block(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_PREV_BLOCK); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -236,13 +237,13 @@ static int llog_client_prev_block(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (ptr == NULL) { + if (!ptr) { rc = -EFAULT; goto out; } @@ -269,7 +270,7 @@ static int llog_client_read_header(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_READ_HEADER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -285,7 +286,7 @@ static int llog_client_read_header(const struct lu_env *env, goto out; hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR); - if (hdr == NULL) { + if (!hdr) { rc = -EFAULT; goto out; } @@ -316,8 +317,9 @@ static int llog_client_close(const struct lu_env *env, struct llog_handle *handle) { /* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because - the servers all close the file at the end of every - other LLOG_ RPC. */ + * the servers all close the file at the end of every + * other LLOG_ RPC. + */ return 0; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c index dac66f5b3..fbccb6221 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c +++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c @@ -58,7 +58,7 @@ int llog_initiator_connect(struct llog_ctxt *ctxt) LASSERT(ctxt); new_imp = ctxt->loc_obd->u.cli.cl_import; - LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp, + LASSERTF(!ctxt->loc_imp || ctxt->loc_imp == new_imp, "%p - %p\n", ctxt->loc_imp, new_imp); mutex_lock(&ctxt->loc_mutex); if (ctxt->loc_imp != new_imp) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c index cc55b7973..c95a91ce2 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c @@ -131,7 +131,6 @@ static struct ll_rpc_opcode { { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" }, { SEC_CTX_FINI, "sec_ctx_fini" }, { FLD_QUERY, "fld_query" }, - { UPDATE_OBJ, "update_obj" }, }; static struct ll_eopcode { @@ -192,15 +191,15 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir, unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV; - LASSERT(*debugfs_root_ret == NULL); - LASSERT(*stats_ret == NULL); + LASSERT(!*debugfs_root_ret); + LASSERT(!*stats_ret); svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES, 0); - if (svc_stats == NULL) + if (!svc_stats) return; - if (dir != NULL) { + if (dir) { svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL); if (IS_ERR(svc_debugfs_entry)) { lprocfs_free_stats(&svc_stats); @@ -246,11 +245,11 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir, rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats); if (rc < 0) { - if (dir != NULL) + if (dir) ldebugfs_remove(&svc_debugfs_entry); lprocfs_free_stats(&svc_stats); } else { - if (dir != NULL) + if (dir) *debugfs_root_ret = svc_debugfs_entry; *stats_ret = svc_stats; } @@ -307,8 +306,9 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, /* This sanity check is more of an insanity check; we can still * hose a kernel by allowing the request history to grow too - * far. */ - bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + * far. + */ + bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (val > totalram_pages / (2 * bufpages)) return -ERANGE; @@ -454,10 +454,8 @@ static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state) * \param[out] info Holds returned status information */ static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_pol_info *info) + struct ptlrpc_nrs_pol_info *info) { - LASSERT(policy != NULL); - LASSERT(info != NULL); assert_spin_locked(&policy->pol_nrs->nrs_lock); memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX); @@ -508,7 +506,7 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n) spin_unlock(&nrs->nrs_lock); infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS); - if (infos == NULL) { + if (!infos) { rc = -ENOMEM; goto unlock; } @@ -520,8 +518,7 @@ again: pol_idx = 0; - list_for_each_entry(policy, &nrs->nrs_policy_list, - pol_list) { + list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) { LASSERT(pol_idx < num_pols); nrs_policy_get_info_locked(policy, &tmp); @@ -592,7 +589,7 @@ again: * active: 0 */ seq_printf(m, "%s\n", - !hp ? "\nregular_requests:" : "high_priority_requests:"); + !hp ? "\nregular_requests:" : "high_priority_requests:"); for (pol_idx = 0; pol_idx < num_pols; pol_idx++) { seq_printf(m, " - name: %s\n" @@ -676,7 +673,7 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file, /** * No [reg|hp] token has been specified */ - if (cmd == NULL) + if (!cmd) goto default_queue; /** @@ -733,15 +730,15 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt, struct list_head *e; struct ptlrpc_request *req; - if (srhi->srhi_req != NULL && - srhi->srhi_seq > svcpt->scp_hist_seq_culled && + if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled && srhi->srhi_seq <= seq) { /* If srhi_req was set previously, hasn't been culled and * we're searching for a seq on or after it (i.e. more * recent), search from it onwards. * Since the service history is LRU (i.e. culled reqs will * be near the head), we shouldn't have to do long - * re-scans */ + * re-scans + */ LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq, "%s:%d: seek seq %llu, request seq %llu\n", svcpt->scp_service->srv_name, svcpt->scp_cpt, @@ -919,7 +916,8 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) * here. The request could contain any old crap, so you * must be just as careful as the service's request * parser. Currently I only print stuff here I know is OK - * to look at coz it was set up in request_in_callback()!!! */ + * to look at coz it was set up in request_in_callback()!!! + */ seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ", req->rq_history_seq, nidstr, libcfs_id2str(req->rq_peer), req->rq_xid, @@ -927,7 +925,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) (s64)req->rq_arrival_time.tv_sec, (long)(req->rq_sent - req->rq_arrival_time.tv_sec), (long)(req->rq_sent - req->rq_deadline)); - if (svc->srv_ops.so_req_printer == NULL) + if (!svc->srv_ops.so_req_printer) seq_putc(s, '\n'); else svc->srv_ops.so_req_printer(s, srhi->srhi_req); @@ -971,7 +969,7 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n) if (AT_OFF) { seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n", - obd_timeout); + obd_timeout); return 0; } @@ -982,8 +980,8 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n) s2dhms(&ts, ktime_get_real_seconds() - worstt); seq_printf(m, "%10s : cur %3u worst %3u (at %lld, " - DHMS_FMT" ago) ", "service", - cur, worst, (s64)worstt, DHMS_VARS(&ts)); + DHMS_FMT " ago) ", "service", + cur, worst, (s64)worstt, DHMS_VARS(&ts)); lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate); } @@ -1103,7 +1101,7 @@ void ptlrpc_ldebugfs_register_service(struct dentry *entry, "stats", &svc->srv_debugfs_entry, &svc->srv_stats); - if (svc->srv_debugfs_entry == NULL) + if (IS_ERR_OR_NULL(svc->srv_debugfs_entry)) return; ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL); @@ -1129,7 +1127,7 @@ void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount) int opc = opcode_offset(op); svc_stats = req->rq_import->imp_obd->obd_svc_stats; - if (svc_stats == NULL || opc <= 0) + if (!svc_stats || opc <= 0) return; LASSERT(opc < LUSTRE_MAX_OPCODES); if (!(op == LDLM_ENQUEUE || op == MDS_REINT)) @@ -1166,7 +1164,7 @@ EXPORT_SYMBOL(ptlrpc_lprocfs_brw); void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc) { - if (svc->srv_debugfs_entry != NULL) + if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry)) ldebugfs_remove(&svc->srv_debugfs_entry); if (svc->srv_stats) @@ -1198,7 +1196,7 @@ int lprocfs_wr_ping(struct file *file, const char __user *buffer, req = ptlrpc_prep_ping(obd->u.cli.cl_import); up_read(&obd->u.cli.cl_sem); - if (req == NULL) + if (!req) return -ENOMEM; req->rq_send_state = LUSTRE_IMP_FULL; @@ -1228,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer, const char prefix[] = "connection="; const int prefix_len = sizeof(prefix) - 1; - if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) + if (count > PAGE_SIZE - 1 || count <= prefix_len) return -EINVAL; kbuf = kzalloc(count + 1, GFP_NOFS); @@ -1298,7 +1296,7 @@ int lprocfs_rd_pinger_recov(struct seq_file *m, void *n) EXPORT_SYMBOL(lprocfs_rd_pinger_recov); int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer, - size_t count, loff_t *off) + size_t count, loff_t *off) { struct obd_device *obd = ((struct seq_file *)file->private_data)->private; struct client_obd *cli = &obd->u.cli; diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c index c5d7ff5cb..10b8fe82a 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c +++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c @@ -56,7 +56,6 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len, lnet_md_t md; LASSERT(portal != 0); - LASSERT(conn != NULL); CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer)); md.start = base; md.length = len; @@ -88,7 +87,8 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len, int rc2; /* We're going to get an UNLINK event when I unlink below, * which will complete just like any other failed send, so - * I fall through and return success here! */ + * I fall through and return success here! + */ CERROR("LNetPut(%s, %d, %lld) failed: %d\n", libcfs_id2str(conn->c_peer), portal, xid, rc); rc2 = LNetMDUnlink(*mdh); @@ -130,7 +130,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) LASSERT(desc->bd_md_count == 0); LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); - LASSERT(desc->bd_req != NULL); + LASSERT(desc->bd_req); LASSERT(desc->bd_type == BULK_PUT_SINK || desc->bd_type == BULK_GET_SOURCE); @@ -153,7 +153,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) * using the same RDMA match bits after an error. * * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The - * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */ + * first bulk XID is power-of-two aligned before rq_xid. LU-1431 + */ xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); LASSERTF(!(desc->bd_registered && req->rq_send_state != LUSTRE_IMP_REPLAY) || @@ -209,7 +210,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) } /* Set rq_xid to matchbits of the final bulk so that server can - * infer the number of bulks that were prepared */ + * infer the number of bulks that were prepared + */ req->rq_xid = --xid; LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), "bd_last_xid = x%llu, rq_xid = x%llu\n", @@ -260,7 +262,8 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) /* the unlink ensures the callback happens ASAP and is the last * one. If it fails, it must be because completion just happened, * but we must still l_wait_event() in this case to give liblustre - * a chance to run client_bulk_callback() */ + * a chance to run client_bulk_callback() + */ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ @@ -273,14 +276,15 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) if (async) return 0; - if (req->rq_set != NULL) + if (req->rq_set) wq = &req->rq_set->set_waitq; else wq = &req->rq_reply_waitq; for (;;) { /* Network access will complete in finite time but the HUGE - * timeout lets us CWARN for visibility of sluggish NALs */ + * timeout lets us CWARN for visibility of sluggish LNDs + */ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); @@ -305,13 +309,13 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) req->rq_arrival_time.tv_sec, 1); if (!(flags & PTLRPC_REPLY_EARLY) && - (req->rq_type != PTL_RPC_MSG_ERR) && - (req->rq_reqmsg != NULL) && + (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg && !(lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) { /* early replies, errors and recovery requests don't count - * toward our service time estimate */ + * toward our service time estimate + */ int oldse = at_measured(&svcpt->scp_at_estimate, service_time); if (oldse != 0) { @@ -325,7 +329,8 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) lustre_msg_set_service_time(req->rq_repmsg, service_time); /* Report service time estimate for future client reqs, but report 0 * (to be ignored by client) if it's a error reply during recovery. - * (bz15815) */ + * (bz15815) + */ if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export) lustre_msg_set_timeout(req->rq_repmsg, 0); else @@ -360,10 +365,10 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) * target_queue_final_reply(). */ LASSERT(req->rq_no_reply == 0); - LASSERT(req->rq_reqbuf != NULL); - LASSERT(rs != NULL); + LASSERT(req->rq_reqbuf); + LASSERT(rs); LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult); - LASSERT(req->rq_repmsg != NULL); + LASSERT(req->rq_repmsg); LASSERT(req->rq_repmsg == rs->rs_msg); LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback); LASSERT(rs->rs_cb_id.cbid_arg == rs); @@ -403,12 +408,12 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) ptlrpc_at_set_reply(req, flags); - if (req->rq_export == NULL || req->rq_export->exp_connection == NULL) + if (!req->rq_export || !req->rq_export->exp_connection) conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); else conn = ptlrpc_connection_addref(req->rq_export->exp_connection); - if (unlikely(conn == NULL)) { + if (unlikely(!conn)) { CERROR("not replying on NULL connection\n"); /* bug 9635 */ return -ENOTCONN; } @@ -498,14 +503,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) LASSERT(request->rq_wait_ctx == 0); /* If this is a re-transmit, we're required to have disengaged - * cleanly from the previous attempt */ + * cleanly from the previous attempt + */ LASSERT(!request->rq_receiving_reply); LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) && - (request->rq_import->imp_state == LUSTRE_IMP_FULL))); + (request->rq_import->imp_state == LUSTRE_IMP_FULL))); - if (unlikely(obd != NULL && obd->obd_fail)) { + if (unlikely(obd && obd->obd_fail)) { CDEBUG(D_HA, "muting rpc for failed imp obd %s\n", - obd->obd_name); + obd->obd_name); /* this prevents us from waiting in ptlrpc_queue_wait */ spin_lock(&request->rq_lock); request->rq_err = 1; @@ -535,7 +541,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) goto out; /* bulk register should be done after wrap_request() */ - if (request->rq_bulk != NULL) { + if (request->rq_bulk) { rc = ptlrpc_register_bulk(request); if (rc != 0) goto out; @@ -543,14 +549,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) if (!noreply) { LASSERT(request->rq_replen != 0); - if (request->rq_repbuf == NULL) { - LASSERT(request->rq_repdata == NULL); - LASSERT(request->rq_repmsg == NULL); + if (!request->rq_repbuf) { + LASSERT(!request->rq_repdata); + LASSERT(!request->rq_repmsg); rc = sptlrpc_cli_alloc_repbuf(request, request->rq_replen); if (rc) { /* this prevents us from looping in - * ptlrpc_queue_wait */ + * ptlrpc_queue_wait + */ spin_lock(&request->rq_lock); request->rq_err = 1; spin_unlock(&request->rq_lock); @@ -602,7 +609,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) reply_md.eq_handle = ptlrpc_eq_h; /* We must see the unlink callback to unset rq_reply_unlink, - so we can't auto-unlink */ + * so we can't auto-unlink + */ rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN, &request->rq_reply_md_h); if (rc != 0) { @@ -623,7 +631,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) /* add references on request for request_out_callback */ ptlrpc_request_addref(request); - if (obd != NULL && obd->obd_svc_stats != NULL) + if (obd && obd->obd_svc_stats) lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR, atomic_read(&request->rq_import->imp_inflight)); @@ -632,7 +640,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) ktime_get_real_ts64(&request->rq_arrival_time); request->rq_sent = ktime_get_real_seconds(); /* We give the server rq_timeout secs to process the req, and - add the network latency for our local timeout. */ + * add the network latency for our local timeout. + */ request->rq_deadline = request->rq_sent + request->rq_timeout + ptlrpc_at_get_net_latency(request); @@ -656,7 +665,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) cleanup_me: /* MEUnlink is safe; the PUT didn't even get off the ground, and * nobody apart from the PUT's target has the right nid+XID to - * access the reply buffer. */ + * access the reply buffer. + */ rc2 = LNetMEUnlink(reply_me_h); LASSERT(rc2 == 0); /* UNLINKED callback called synchronously */ @@ -664,7 +674,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) cleanup_bulk: /* We do sync unlink here as there was no real transfer here so - * the chance to have long unlink to sluggish net is smaller here. */ + * the chance to have long unlink to sluggish net is smaller here. + */ ptlrpc_unregister_bulk(request, 0); out: if (request->rq_memalloc) @@ -692,7 +703,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd) /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL, * which means buffer can only be attached on local CPT, and LND - * threads can find it by grabbing a local lock */ + * threads can find it by grabbing a local lock + */ rc = LNetMEAttach(service->srv_req_portal, match_id, 0, ~0, LNET_UNLINK, rqbd->rqbd_svcpt->scp_cpt >= 0 ? diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c index 7044e1ff6..710fb806f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c +++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c @@ -13,10 +13,6 @@ * GNU General Public License version 2 for more details. A copy is * included in the COPYING file that accompanied this code. - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * GPL HEADER END */ /* @@ -47,9 +43,6 @@ #include "../../include/linux/libcfs/libcfs.h" #include "ptlrpc_internal.h" -/* XXX: This is just for liblustre. Remove the #if defined directive when the - * "cfs_" prefix is dropped from cfs_list_head. */ - /** * NRS core object. */ @@ -57,7 +50,7 @@ struct nrs_core nrs_core; static int nrs_policy_init(struct ptlrpc_nrs_policy *policy) { - return policy->pol_desc->pd_ops->op_policy_init != NULL ? + return policy->pol_desc->pd_ops->op_policy_init ? policy->pol_desc->pd_ops->op_policy_init(policy) : 0; } @@ -66,7 +59,7 @@ static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy) LASSERT(policy->pol_ref == 0); LASSERT(policy->pol_req_queued == 0); - if (policy->pol_desc->pd_ops->op_policy_fini != NULL) + if (policy->pol_desc->pd_ops->op_policy_fini) policy->pol_desc->pd_ops->op_policy_fini(policy); } @@ -82,7 +75,7 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy, if (policy->pol_state == NRS_POL_STATE_STOPPED) return -ENODEV; - return policy->pol_desc->pd_ops->op_policy_ctl != NULL ? + return policy->pol_desc->pd_ops->op_policy_ctl ? policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) : -ENOSYS; } @@ -91,7 +84,7 @@ static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy) { struct ptlrpc_nrs *nrs = policy->pol_nrs; - if (policy->pol_desc->pd_ops->op_policy_stop != NULL) { + if (policy->pol_desc->pd_ops->op_policy_stop) { spin_unlock(&nrs->nrs_lock); policy->pol_desc->pd_ops->op_policy_stop(policy); @@ -154,7 +147,7 @@ static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs) { struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary; - if (tmp == NULL) + if (!tmp) return; nrs->nrs_policy_primary = NULL; @@ -220,12 +213,12 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy) * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can * register with NRS core. */ - LASSERT(nrs->nrs_policy_fallback == NULL); + LASSERT(!nrs->nrs_policy_fallback); } else { /** * Shouldn't start primary policy if w/o fallback policy. */ - if (nrs->nrs_policy_fallback == NULL) + if (!nrs->nrs_policy_fallback) return -EPERM; if (policy->pol_state == NRS_POL_STATE_STARTED) @@ -311,7 +304,7 @@ static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy) policy->pol_ref--; if (unlikely(policy->pol_ref == 0 && - policy->pol_state == NRS_POL_STATE_STOPPING)) + policy->pol_state == NRS_POL_STATE_STOPPING)) nrs_policy_stop0(policy); } @@ -326,7 +319,7 @@ static void nrs_policy_put(struct ptlrpc_nrs_policy *policy) * Find and return a policy by name. */ static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs, - char *name) + char *name) { struct ptlrpc_nrs_policy *tmp; @@ -348,10 +341,10 @@ static void nrs_resource_put(struct ptlrpc_nrs_resource *res) { struct ptlrpc_nrs_policy *policy = res->res_policy; - if (policy->pol_desc->pd_ops->op_res_put != NULL) { + if (policy->pol_desc->pd_ops->op_res_put) { struct ptlrpc_nrs_resource *parent; - for (; res != NULL; res = parent) { + for (; res; res = parent) { parent = res->res_parent; policy->pol_desc->pd_ops->op_res_put(policy, res); } @@ -390,12 +383,11 @@ struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy, rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res, &tmp, moving_req); if (rc < 0) { - if (res != NULL) + if (res) nrs_resource_put(res); return NULL; } - LASSERT(tmp != NULL); tmp->res_parent = res; tmp->res_policy = policy; res = tmp; @@ -445,7 +437,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, nrs_policy_get_locked(fallback); primary = nrs->nrs_policy_primary; - if (primary != NULL) + if (primary) nrs_policy_get_locked(primary); spin_unlock(&nrs->nrs_lock); @@ -454,9 +446,9 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, * Obtain resource hierarchy references. */ resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req); - LASSERT(resp[NRS_RES_FALLBACK] != NULL); + LASSERT(resp[NRS_RES_FALLBACK]); - if (primary != NULL) { + if (primary) { resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq, moving_req); /** @@ -465,7 +457,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, * reference on the policy as it will not be used for this * request. */ - if (resp[NRS_RES_PRIMARY] == NULL) + if (!resp[NRS_RES_PRIMARY]) nrs_policy_put(primary); } } @@ -482,11 +474,10 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp) { struct ptlrpc_nrs_policy *pols[NRS_RES_MAX]; - struct ptlrpc_nrs *nrs = NULL; int i; for (i = 0; i < NRS_RES_MAX; i++) { - if (resp[i] != NULL) { + if (resp[i]) { pols[i] = resp[i]->res_policy; nrs_resource_put(resp[i]); resp[i] = NULL; @@ -496,18 +487,9 @@ static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp) } for (i = 0; i < NRS_RES_MAX; i++) { - if (pols[i] == NULL) - continue; - - if (nrs == NULL) { - nrs = pols[i]->pol_nrs; - spin_lock(&nrs->nrs_lock); - } - nrs_policy_put_locked(pols[i]); + if (pols[i]) + nrs_policy_put(pols[i]); } - - if (nrs != NULL) - spin_unlock(&nrs->nrs_lock); } /** @@ -536,7 +518,7 @@ struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy, nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force); - LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy)); + LASSERT(ergo(nrq, nrs_request_policy(nrq) == policy)); return nrq; } @@ -562,7 +544,7 @@ static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq) * the preferred choice. */ for (i = NRS_RES_MAX - 1; i >= 0; i--) { - if (nrq->nr_res_ptrs[i] == NULL) + if (!nrq->nr_res_ptrs[i]) continue; nrq->nr_res_idx = i; @@ -632,7 +614,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, spin_lock(&nrs->nrs_lock); policy = nrs_policy_find_locked(nrs, name); - if (policy == NULL) { + if (!policy) { rc = -ENOENT; goto out; } @@ -654,7 +636,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, break; } out: - if (policy != NULL) + if (policy) nrs_policy_put_locked(policy); spin_unlock(&nrs->nrs_lock); @@ -679,7 +661,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name) spin_lock(&nrs->nrs_lock); policy = nrs_policy_find_locked(nrs, name); - if (policy == NULL) { + if (!policy) { spin_unlock(&nrs->nrs_lock); CERROR("Can't find NRS policy %s\n", name); @@ -712,7 +694,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name) nrs_policy_fini(policy); - LASSERT(policy->pol_private == NULL); + LASSERT(!policy->pol_private); kfree(policy); return 0; @@ -736,18 +718,16 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs, struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt; int rc; - LASSERT(svcpt != NULL); - LASSERT(desc->pd_ops != NULL); - LASSERT(desc->pd_ops->op_res_get != NULL); - LASSERT(desc->pd_ops->op_req_get != NULL); - LASSERT(desc->pd_ops->op_req_enqueue != NULL); - LASSERT(desc->pd_ops->op_req_dequeue != NULL); - LASSERT(desc->pd_compat != NULL); + LASSERT(desc->pd_ops->op_res_get); + LASSERT(desc->pd_ops->op_req_get); + LASSERT(desc->pd_ops->op_req_enqueue); + LASSERT(desc->pd_ops->op_req_dequeue); + LASSERT(desc->pd_compat); policy = kzalloc_node(sizeof(*policy), GFP_NOFS, cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, svcpt->scp_cpt)); - if (policy == NULL) + if (!policy) return -ENOMEM; policy->pol_nrs = nrs; @@ -767,7 +747,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs, spin_lock(&nrs->nrs_lock); tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name); - if (tmp != NULL) { + if (tmp) { CERROR("NRS policy %s has been registered, can't register it for %s\n", policy->pol_desc->pd_name, svcpt->scp_service->srv_name); @@ -817,7 +797,7 @@ static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req) */ if (unlikely(list_empty(&policy->pol_list_queued))) list_add_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); + &policy->pol_nrs->nrs_policy_queued); } /** @@ -957,14 +937,14 @@ static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt) /** * Optionally allocate a high-priority NRS head. */ - if (svcpt->scp_service->srv_ops.so_hpreq_handler == NULL) + if (!svcpt->scp_service->srv_ops.so_hpreq_handler) goto out; svcpt->scp_nrs_hp = kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS, cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, svcpt->scp_cpt)); - if (svcpt->scp_nrs_hp == NULL) { + if (!svcpt->scp_nrs_hp) { rc = -ENOMEM; goto out; } @@ -998,8 +978,7 @@ again: nrs = nrs_svcpt2nrs(svcpt, hp); nrs->nrs_stopping = 1; - list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, - pol_list) { + list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) { rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name); LASSERT(rc == 0); } @@ -1089,7 +1068,7 @@ again: } } - if (desc->pd_ops->op_lprocfs_fini != NULL) + if (desc->pd_ops->op_lprocfs_fini) desc->pd_ops->op_lprocfs_fini(svc); } @@ -1115,15 +1094,15 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) { struct ptlrpc_service *svc; struct ptlrpc_nrs_pol_desc *desc; + size_t len; int rc = 0; - LASSERT(conf != NULL); - LASSERT(conf->nc_ops != NULL); - LASSERT(conf->nc_compat != NULL); + LASSERT(conf->nc_ops); + LASSERT(conf->nc_compat); LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one, - conf->nc_compat_svc_name != NULL)); + conf->nc_compat_svc_name)); LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0, - conf->nc_owner != NULL)); + conf->nc_owner)); conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0'; @@ -1146,7 +1125,7 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) mutex_lock(&nrs_core.nrs_mutex); - if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) { + if (nrs_policy_find_desc_locked(conf->nc_name)) { CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n", conf->nc_name); rc = -EEXIST; @@ -1159,7 +1138,12 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) goto fail; } - strncpy(desc->pd_name, conf->nc_name, NRS_POL_NAME_MAX); + len = strlcpy(desc->pd_name, conf->nc_name, sizeof(desc->pd_name)); + if (len >= sizeof(desc->pd_name)) { + kfree(desc); + rc = -E2BIG; + goto fail; + } desc->pd_ops = conf->nc_ops; desc->pd_compat = conf->nc_compat; desc->pd_compat_svc_name = conf->nc_compat_svc_name; @@ -1224,7 +1208,7 @@ again: * No need to take a reference to other modules here, as we * will be calling from the module's init() function. */ - if (desc->pd_ops->op_lprocfs_init != NULL) { + if (desc->pd_ops->op_lprocfs_init) { rc = desc->pd_ops->op_lprocfs_init(svc); if (rc != 0) { rc2 = nrs_policy_unregister_locked(desc); @@ -1288,7 +1272,7 @@ int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc) if (!nrs_policy_compatible(svc, desc)) continue; - if (desc->pd_ops->op_lprocfs_init != NULL) { + if (desc->pd_ops->op_lprocfs_init) { rc = desc->pd_ops->op_lprocfs_init(svc); if (rc != 0) goto failed; @@ -1329,7 +1313,7 @@ void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc) if (!nrs_policy_compatible(svc, desc)) continue; - if (desc->pd_ops->op_lprocfs_fini != NULL) + if (desc->pd_ops->op_lprocfs_fini) desc->pd_ops->op_lprocfs_fini(svc); } @@ -1376,7 +1360,8 @@ void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req) if (req->rq_nrq.nr_initialized) { nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs); /* no protection on bit nr_initialized because no - * contention at this late stage */ + * contention at this late stage + */ req->rq_nrq.nr_finalized = 1; } } @@ -1434,7 +1419,7 @@ static void nrs_request_removed(struct ptlrpc_nrs_policy *policy) policy->pol_nrs->nrs_req_queued); list_move_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); + &policy->pol_nrs->nrs_policy_queued); } } @@ -1466,10 +1451,9 @@ ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp, * Always try to drain requests from all NRS polices even if they are * inactive, because the user can change policy status at runtime. */ - list_for_each_entry(policy, &nrs->nrs_policy_queued, - pol_list_queued) { + list_for_each_entry(policy, &nrs->nrs_policy_queued, pol_list_queued) { nrq = nrs_request_get(policy, peek, force); - if (nrq != NULL) { + if (nrq) { if (likely(!peek)) { nrq->nr_started = 1; @@ -1619,8 +1603,7 @@ void ptlrpc_nrs_fini(void) struct ptlrpc_nrs_pol_desc *desc; struct ptlrpc_nrs_pol_desc *tmp; - list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, - pd_list) { + list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, pd_list) { list_del_init(&desc->pd_list); kfree(desc); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c index 8e21f0cdc..b123a9324 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c +++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c @@ -13,10 +13,6 @@ * GNU General Public License version 2 for more details. A copy is * included in the COPYING file that accompanied this code. - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * GPL HEADER END */ /* @@ -83,7 +79,7 @@ static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy) head = kzalloc_node(sizeof(*head), GFP_NOFS, cfs_cpt_spread_node(nrs_pol2cptab(policy), nrs_pol2cptid(policy))); - if (head == NULL) + if (!head) return -ENOMEM; INIT_LIST_HEAD(&head->fh_list); @@ -104,7 +100,7 @@ static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy) { struct nrs_fifo_head *head = policy->pol_private; - LASSERT(head != NULL); + LASSERT(head); LASSERT(list_empty(&head->fh_list)); kfree(head); @@ -167,9 +163,9 @@ struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy, nrq = unlikely(list_empty(&head->fh_list)) ? NULL : list_entry(head->fh_list.next, struct ptlrpc_nrs_request, - nr_u.fifo.fr_list); + nr_u.fifo.fr_list); - if (likely(!peek && nrq != NULL)) { + if (likely(!peek && nrq)) { struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request, rq_nrq); diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c index f3cb5184f..492d63fad 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c @@ -133,7 +133,8 @@ EXPORT_SYMBOL(lustre_msg_size_v2); * NOTE: this should only be used for NEW requests, and should always be * in the form of a v2 request. If this is a connection to a v1 * target then the first buffer will be stripped because the ptlrpc - * data is part of the lustre_msg_v1 header. b=14043 */ + * data is part of the lustre_msg_v1 header. b=14043 + */ int lustre_msg_size(__u32 magic, int count, __u32 *lens) { __u32 size[] = { sizeof(struct ptlrpc_body) }; @@ -157,7 +158,8 @@ int lustre_msg_size(__u32 magic, int count, __u32 *lens) EXPORT_SYMBOL(lustre_msg_size); /* This is used to determine the size of a buffer that was already packed - * and will correctly handle the different message formats. */ + * and will correctly handle the different message formats. + */ int lustre_packed_msg_size(struct lustre_msg *msg) { switch (msg->lm_magic) { @@ -183,7 +185,7 @@ void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens, for (i = 0; i < count; i++) msg->lm_buflens[i] = lens[i]; - if (bufs == NULL) + if (!bufs) return; ptr = (char *)msg + lustre_msg_hdr_size_v2(count); @@ -267,7 +269,8 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt) spin_unlock(&svcpt->scp_rep_lock); /* If we cannot get anything for some long time, we better - * bail out instead of waiting infinitely */ + * bail out instead of waiting infinitely + */ lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL); rc = l_wait_event(svcpt->scp_rep_waitq, !list_empty(&svcpt->scp_rep_idle), &lwi); @@ -277,7 +280,7 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt) } rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, rs_list); + struct ptlrpc_reply_state, rs_list); list_del(&rs->rs_list); spin_unlock(&svcpt->scp_rep_lock); @@ -306,7 +309,7 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count, struct ptlrpc_reply_state *rs; int msg_len, rc; - LASSERT(req->rq_reply_state == NULL); + LASSERT(!req->rq_reply_state); if ((flags & LPRFL_EARLY_REPLY) == 0) { spin_lock(&req->rq_lock); @@ -383,7 +386,6 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size) { int i, offset, buflen, bufcount; - LASSERT(m != NULL); LASSERT(n >= 0); bufcount = m->lm_bufcount; @@ -488,7 +490,7 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs) LASSERT(!rs->rs_difficult || rs->rs_handled); LASSERT(!rs->rs_on_net); LASSERT(!rs->rs_scheduled); - LASSERT(rs->rs_export == NULL); + LASSERT(!rs->rs_export); LASSERT(rs->rs_nlocks == 0); LASSERT(list_empty(&rs->rs_exp_list)); LASSERT(list_empty(&rs->rs_obd_list)); @@ -677,7 +679,8 @@ int lustre_msg_buflen(struct lustre_msg *m, int n) EXPORT_SYMBOL(lustre_msg_buflen); /* NB return the bufcount for lustre_msg_v2 format, so if message is packed - * in V1 format, the result is one bigger. (add struct ptlrpc_body). */ + * in V1 format, the result is one bigger. (add struct ptlrpc_body). + */ int lustre_msg_bufcount(struct lustre_msg *m) { switch (m->lm_magic) { @@ -705,7 +708,7 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len) LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic); } - if (str == NULL) { + if (!str) { CERROR("can't unpack string in msg %p buffer[%d]\n", m, index); return NULL; } @@ -740,7 +743,6 @@ static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index, { void *ptr = NULL; - LASSERT(msg != NULL); switch (msg->lm_magic) { case LUSTRE_MSG_MAGIC_V2: ptr = lustre_msg_buf_v2(msg, index, min_size); @@ -799,7 +801,8 @@ __u32 lustre_msg_get_flags(struct lustre_msg *msg) /* no break */ default: /* flags might be printed in debug code while message - * uninitialized */ + * uninitialized + */ return 0; } } @@ -1032,7 +1035,8 @@ int lustre_msg_get_status(struct lustre_msg *msg) /* no break */ default: /* status might be printed in debug code while message - * uninitialized */ + * uninitialized + */ return -EINVAL; } } @@ -1368,7 +1372,8 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) struct ptlrpc_body *pb; /* Don't set jobid for ldlm ast RPCs, they've been shrunk. - * See the comment in ptlrpc_request_pack(). */ + * See the comment in ptlrpc_request_pack(). + */ if (!opc || opc == LDLM_BL_CALLBACK || opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK) return; @@ -1377,7 +1382,7 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) sizeof(struct ptlrpc_body)); LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - if (jobid != NULL) + if (jobid) memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE); else if (pb->pb_jobid[0] == '\0') lustre_get_jobid(pb->pb_jobid); @@ -1427,7 +1432,7 @@ int do_set_info_async(struct obd_import *imp, int rc; req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -1488,7 +1493,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) * clients and servers without ptlrpc_body_v2 (< 2.3) * do not swab any fields beyond pb_jobid, as we are * using this swab function for both ptlrpc_body - * and ptlrpc_body_v2. */ + * and ptlrpc_body_v2. + */ CLASSERT(offsetof(typeof(*b), pb_jobid) != 0); } EXPORT_SYMBOL(lustre_swab_ptlrpc_body); @@ -1502,7 +1508,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd) __swab32s(&ocd->ocd_index); __swab32s(&ocd->ocd_brw_size); /* ocd_blocksize and ocd_inodespace don't need to be swabbed because - * they are 8-byte values */ + * they are 8-byte values + */ __swab16s(&ocd->ocd_grant_extent); __swab32s(&ocd->ocd_unused); __swab64s(&ocd->ocd_transno); @@ -1512,7 +1519,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd) /* Fields after ocd_cksum_types are only accessible by the receiver * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. */ + * may result in out-of-bound memory access and kernel oops. + */ if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE) __swab32s(&ocd->ocd_max_easize); if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES) @@ -1848,20 +1856,6 @@ void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) } EXPORT_SYMBOL(lustre_swab_fiemap); -void lustre_swab_idx_info(struct idx_info *ii) -{ - __swab32s(&ii->ii_magic); - __swab32s(&ii->ii_flags); - __swab16s(&ii->ii_count); - __swab32s(&ii->ii_attrs); - lustre_swab_lu_fid(&ii->ii_fid); - __swab64s(&ii->ii_version); - __swab64s(&ii->ii_hash_start); - __swab64s(&ii->ii_hash_end); - __swab16s(&ii->ii_keysize); - __swab16s(&ii->ii_recsize); -} - void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr) { __swab32s(&rr->rr_opcode); @@ -1914,7 +1908,7 @@ static void print_lum(struct lov_user_md *lum) CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size); CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count); CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n", - lum->lmm_stripe_offset); + lum->lmm_stripe_offset); } static void lustre_swab_lmm_oi(struct ost_id *oi) @@ -1986,7 +1980,8 @@ static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d) { /* the lock data is a union and the first two fields are always an * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock - * data the same way. */ + * data the same way. + */ __swab64s(&d->l_extent.start); __swab64s(&d->l_extent.end); __swab64s(&d->l_extent.gid); @@ -2035,16 +2030,6 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r) } EXPORT_SYMBOL(lustre_swab_ldlm_reply); -void lustre_swab_quota_body(struct quota_body *b) -{ - lustre_swab_lu_fid(&b->qb_fid); - lustre_swab_lu_fid((struct lu_fid *)&b->qb_id); - __swab32s(&b->qb_flags); - __swab64s(&b->qb_count); - __swab64s(&b->qb_usage); - __swab64s(&b->qb_slv_ver); -} - /* Dump functions */ void dump_ioo(struct obd_ioobj *ioo) { @@ -2288,24 +2273,6 @@ void lustre_swab_hsm_request(struct hsm_request *hr) } EXPORT_SYMBOL(lustre_swab_hsm_request); -void lustre_swab_update_buf(struct update_buf *ub) -{ - __swab32s(&ub->ub_magic); - __swab32s(&ub->ub_count); -} -EXPORT_SYMBOL(lustre_swab_update_buf); - -void lustre_swab_update_reply_buf(struct update_reply *ur) -{ - int i; - - __swab32s(&ur->ur_version); - __swab32s(&ur->ur_count); - for (i = 0; i < ur->ur_count; i++) - __swab32s(&ur->ur_lens[i]); -} -EXPORT_SYMBOL(lustre_swab_update_reply_buf); - void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl) { __swab64s(&msl->msl_flags); diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c index fb2d5236a..8a869315c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c @@ -68,7 +68,7 @@ int ptlrpc_obd_ping(struct obd_device *obd) struct ptlrpc_request *req; req = ptlrpc_prep_ping(obd->u.cli.cl_import); - if (req == NULL) + if (!req) return -ENOMEM; req->rq_send_state = LUSTRE_IMP_FULL; @@ -86,7 +86,7 @@ static int ptlrpc_ping(struct obd_import *imp) struct ptlrpc_request *req; req = ptlrpc_prep_ping(imp); - if (req == NULL) { + if (!req) { CERROR("OOM trying to ping %s->%s\n", imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); @@ -242,7 +242,7 @@ static int ptlrpc_pinger_main(void *arg) list_for_each(iter, &pinger_imports) { struct obd_import *imp = list_entry(iter, struct obd_import, - imp_pinger_chain); + imp_pinger_chain); ptlrpc_pinger_process_import(imp, this_ping); /* obd_timeout might have changed */ @@ -257,11 +257,12 @@ static int ptlrpc_pinger_main(void *arg) /* Wait until the next ping time, or until we're stopped. */ time_to_next_wake = pinger_check_timeout(this_ping); /* The ping sent by ptlrpc_send_rpc may get sent out - say .01 second after this. - ptlrpc_pinger_sending_on_import will then set the - next ping time to next_ping + .01 sec, which means - we will SKIP the next ping at next_ping, and the - ping will get sent 2 timeouts from now! Beware. */ + * say .01 second after this. + * ptlrpc_pinger_sending_on_import will then set the + * next ping time to next_ping + .01 sec, which means + * we will SKIP the next ping at next_ping, and the + * ping will get sent 2 timeouts from now! Beware. + */ CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n", time_to_next_wake, cfs_time_add(this_ping, @@ -293,6 +294,7 @@ static struct ptlrpc_thread pinger_thread; int ptlrpc_start_pinger(void) { struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc; if (!thread_is_init(&pinger_thread) && @@ -303,10 +305,11 @@ int ptlrpc_start_pinger(void) strcpy(pinger_thread.t_name, "ll_ping"); - rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread, - "%s", pinger_thread.t_name)); - if (IS_ERR_VALUE(rc)) { - CERROR("cannot start thread: %d\n", rc); + task = kthread_run(ptlrpc_pinger_main, &pinger_thread, + pinger_thread.t_name); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start pinger thread: rc = %d\n", rc); return rc; } l_wait_event(pinger_thread.t_ctl_waitq, @@ -401,7 +404,8 @@ EXPORT_SYMBOL(ptlrpc_pinger_del_import); * be called when timeout happens. */ static struct timeout_item *ptlrpc_new_timeout(int time, - enum timeout_event event, timeout_cb_t cb, void *data) + enum timeout_event event, + timeout_cb_t cb, void *data) { struct timeout_item *ti; @@ -489,7 +493,6 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list, break; } } - LASSERTF(ti != NULL, "ti is NULL !\n"); if (list_empty(&ti->ti_obd_list)) { list_del(&ti->ti_chain); kfree(ti); diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h index 8f67e0562..6ca26c98d 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h @@ -101,8 +101,6 @@ struct nrs_core { * registration/unregistration, and NRS core lprocfs operations. */ struct mutex nrs_mutex; - /* XXX: This is just for liblustre. Remove the #if defined directive - * when the * "cfs_" prefix is dropped from cfs_list_head. */ /** * List of all policy descriptors registered with NRS core; protected * by nrs_core::nrs_mutex. diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c index c4f1d0f5d..a8ec0e9d7 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c @@ -162,8 +162,8 @@ static void __exit ptlrpc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. "); MODULE_DESCRIPTION("Lustre Request Processor and Lock Management"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.0"); module_init(ptlrpc_init); module_exit(ptlrpc_exit); diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c index 60fb0ced7..db003f5da 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c @@ -163,8 +163,6 @@ void ptlrpcd_wake(struct ptlrpc_request *req) { struct ptlrpc_request_set *rq_set = req->rq_set; - LASSERT(rq_set != NULL); - wake_up(&rq_set->set_waitq); } EXPORT_SYMBOL(ptlrpcd_wake); @@ -176,7 +174,7 @@ ptlrpcd_select_pc(struct ptlrpc_request *req) int cpt; int idx; - if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL) + if (req && req->rq_send_state != LUSTRE_IMP_FULL) return &ptlrpcd_rcv; cpt = cfs_cpt_current(cfs_cpt_table, 1); @@ -209,11 +207,10 @@ static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des, if (likely(!list_empty(&src->set_new_requests))) { list_for_each_safe(pos, tmp, &src->set_new_requests) { req = list_entry(pos, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); req->rq_set = des; } - list_splice_init(&src->set_new_requests, - &des->set_requests); + list_splice_init(&src->set_new_requests, &des->set_requests); rc = atomic_read(&src->set_new_count); atomic_add(rc, &des->set_remaining); atomic_set(&src->set_new_count, 0); @@ -240,10 +237,11 @@ void ptlrpcd_add_req(struct ptlrpc_request *req) req->rq_invalid_rqset = 0; spin_unlock(&req->rq_lock); - l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi); + l_wait_event(req->rq_set_waitq, !req->rq_set, &lwi); } else if (req->rq_set) { /* If we have a valid "rq_set", just reuse it to avoid double - * linked. */ + * linked. + */ LASSERT(req->rq_phase == RQ_PHASE_NEW); LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY); @@ -286,9 +284,9 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) spin_lock(&set->set_new_req_lock); if (likely(!list_empty(&set->set_new_requests))) { list_splice_init(&set->set_new_requests, - &set->set_requests); + &set->set_requests); atomic_add(atomic_read(&set->set_new_count), - &set->set_remaining); + &set->set_remaining); atomic_set(&set->set_new_count, 0); /* * Need to calculate its timeout. @@ -321,7 +319,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) rc |= ptlrpc_check_set(env, set); /* NB: ptlrpc_check_set has already moved completed request at the - * head of seq::set_requests */ + * head of seq::set_requests + */ list_for_each_safe(pos, tmp, &set->set_requests) { req = list_entry(pos, struct ptlrpc_request, rq_set_chain); if (req->rq_phase != RQ_PHASE_COMPLETE) @@ -339,7 +338,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) rc = atomic_read(&set->set_new_count); /* If we have nothing to do, check whether we can take some - * work from our partner threads. */ + * work from our partner threads. + */ if (rc == 0 && pc->pc_npartners > 0) { struct ptlrpcd_ctl *partner; struct ptlrpc_request_set *ps; @@ -349,12 +349,12 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) partner = pc->pc_partners[pc->pc_cursor++]; if (pc->pc_cursor >= pc->pc_npartners) pc->pc_cursor = 0; - if (partner == NULL) + if (!partner) continue; spin_lock(&partner->pc_lock); ps = partner->pc_set; - if (ps == NULL) { + if (!ps) { spin_unlock(&partner->pc_lock); continue; } @@ -422,7 +422,6 @@ static int ptlrpcd(void *arg) complete(&pc->pc_starting); /* - * This mainloop strongly resembles ptlrpc_set_wait() except that our * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when * there are requests in the set. New requests come in on the set's @@ -580,7 +579,7 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc) return 0; out_set: - if (pc->pc_set != NULL) { + if (pc->pc_set) { struct ptlrpc_request_set *set = pc->pc_set; spin_lock(&pc->pc_lock); @@ -631,7 +630,7 @@ void ptlrpcd_free(struct ptlrpcd_ctl *pc) out: if (pc->pc_npartners > 0) { - LASSERT(pc->pc_partners != NULL); + LASSERT(pc->pc_partners); kfree(pc->pc_partners); pc->pc_partners = NULL; @@ -645,7 +644,7 @@ static void ptlrpcd_fini(void) int i; int j; - if (ptlrpcds != NULL) { + if (ptlrpcds) { for (i = 0; i < ptlrpcds_num; i++) { if (!ptlrpcds[i]) break; diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c index db6626cab..30d9a164e 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/recover.c +++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c @@ -107,14 +107,14 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* Replay all the committed open requests on committed_list first */ if (!list_empty(&imp->imp_committed_list)) { tmp = imp->imp_committed_list.prev; - req = list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); /* The last request on committed_list hasn't been replayed */ if (req->rq_transno > last_transno) { /* Since the imp_committed_list is immutable before * all of it's requests being replayed, it's safe to - * use a cursor to accelerate the search */ + * use a cursor to accelerate the search + */ imp->imp_replay_cursor = imp->imp_replay_cursor->next; while (imp->imp_replay_cursor != @@ -137,8 +137,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) } /* All the requests in committed list have been replayed, let's replay - * the imp_replay_list */ - if (req == NULL) { + * the imp_replay_list + */ + if (!req) { list_for_each_safe(tmp, pos, &imp->imp_replay_list) { req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); @@ -152,15 +153,16 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* If need to resend the last sent transno (because a reconnect * has occurred), then stop on the matching req and send it again. * If, however, the last sent transno has been committed then we - * continue replay from the next request. */ - if (req != NULL && imp->imp_resend_replay) + * continue replay from the next request. + */ + if (req && imp->imp_resend_replay) lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); spin_lock(&imp->imp_lock); imp->imp_resend_replay = 0; spin_unlock(&imp->imp_lock); - if (req != NULL) { + if (req) { rc = ptlrpc_replay_req(req); if (rc) { CERROR("recovery replay error %d for req %llu\n", @@ -192,9 +194,8 @@ int ptlrpc_resend(struct obd_import *imp) return -1; } - list_for_each_entry_safe(req, next, &imp->imp_sending_list, - rq_list) { - LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, + list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { + LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, "req %p bad\n", req); LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); if (!ptlrpc_no_resend(req)) @@ -249,7 +250,8 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) } /* Wait for recovery to complete and resend. If evicted, then - this request will be errored out later.*/ + * this request will be errored out later. + */ spin_lock(&failed_req->rq_lock); if (!failed_req->rq_no_resend) failed_req->rq_resend = 1; @@ -260,7 +262,7 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) * Administratively active/deactive a client. * This should only be called by the ioctl interface, currently * - the lctl deactivate and activate commands - * - echo 0/1 >> /proc/osc/XXX/active + * - echo 0/1 >> /sys/fs/lustre/osc/XXX/active * - client umount -f (ll_umount_begin) */ int ptlrpc_set_import_active(struct obd_import *imp, int active) @@ -271,13 +273,15 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active) LASSERT(obd); /* When deactivating, mark import invalid, and abort in-flight - * requests. */ + * requests. + */ if (!active) { LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n", obd2cli_tgt(imp->imp_obd)); /* set before invalidate to avoid messages about imp_inval - * set without imp_deactive in ptlrpc_import_delay_req */ + * set without imp_deactive in ptlrpc_import_delay_req + */ spin_lock(&imp->imp_lock); imp->imp_deactive = 1; spin_unlock(&imp->imp_lock); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index 39f5261c9..187fd1d68 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -94,7 +94,7 @@ int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy) LASSERT(number < SPTLRPC_POLICY_MAX); write_lock(&policy_lock); - if (unlikely(policies[number] == NULL)) { + if (unlikely(!policies[number])) { write_unlock(&policy_lock); CERROR("%s: already unregistered\n", policy->sp_name); return -EINVAL; @@ -126,11 +126,11 @@ struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor) policy = policies[number]; if (policy && !try_module_get(policy->sp_owner)) policy = NULL; - if (policy == NULL) + if (!policy) flag = atomic_read(&loaded); read_unlock(&policy_lock); - if (policy != NULL || flag != 0 || + if (policy || flag != 0 || number != SPTLRPC_POLICY_GSS) break; @@ -327,7 +327,7 @@ static int import_sec_validate_get(struct obd_import *imp, } *sec = sptlrpc_import_sec_ref(imp); - if (*sec == NULL) { + if (!*sec) { CERROR("import %p (%s) with no sec\n", imp, ptlrpc_import_state_name(imp->imp_state)); return -EACCES; @@ -429,7 +429,7 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, reqmsg_size = req->rq_reqlen; if (reqmsg_size != 0) { reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS); - if (reqmsg == NULL) + if (!reqmsg) return -ENOMEM; memcpy(reqmsg, req->rq_reqmsg, reqmsg_size); } @@ -445,7 +445,8 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, /* alloc new request buffer * we don't need to alloc reply buffer here, leave it to the - * rest procedure of ptlrpc */ + * rest procedure of ptlrpc + */ if (reqmsg_size != 0) { rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size); if (!rc) { @@ -609,7 +610,7 @@ again: if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) { CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n", - req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); + req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); req_off_ctx_list(req, ctx); sptlrpc_req_replace_dead_ctx(req); ctx = req->rq_cli_ctx; @@ -798,7 +799,8 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode) spin_unlock(&sec->ps_lock); /* force SVC_NULL for context initiation rpc, SVC_INTG for context - * destruction rpc */ + * destruction rpc + */ if (unlikely(req->rq_ctx_init)) flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL); else if (unlikely(req->rq_ctx_fini)) @@ -938,7 +940,7 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req) LASSERT(ctx->cc_sec); LASSERT(req->rq_repbuf); LASSERT(req->rq_repdata); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repmsg); req->rq_rep_swab_mask = 0; @@ -1000,8 +1002,8 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req) int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req) { LASSERT(req->rq_repbuf); - LASSERT(req->rq_repdata == NULL); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repdata); + LASSERT(!req->rq_repmsg); LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len); if (req->rq_reply_off == 0 && @@ -1046,13 +1048,13 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, int rc; early_req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (early_req == NULL) + if (!early_req) return -ENOMEM; early_size = req->rq_nob_received; early_bufsz = size_roundup_power2(early_size); early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS); - if (early_buf == NULL) { + if (!early_buf) { rc = -ENOMEM; goto err_req; } @@ -1067,8 +1069,8 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, } LASSERT(req->rq_repbuf); - LASSERT(req->rq_repdata == NULL); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repdata); + LASSERT(!req->rq_repmsg); if (req->rq_reply_off != 0) { CERROR("early reply with offset %u\n", req->rq_reply_off); @@ -1354,12 +1356,12 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp, might_sleep(); - if (imp == NULL) + if (!imp) return 0; conn = imp->imp_connection; - if (svc_ctx == NULL) { + if (!svc_ctx) { struct client_obd *cliobd = &imp->imp_obd->u.cli; /* * normal import, determine flavor from rule set, except @@ -1447,11 +1449,11 @@ static void import_flush_ctx_common(struct obd_import *imp, { struct ptlrpc_sec *sec; - if (imp == NULL) + if (!imp) return; sec = sptlrpc_import_sec_ref(imp); - if (sec == NULL) + if (!sec) return; sec_cop_flush_ctx_cache(sec, uid, grace, force); @@ -1484,7 +1486,7 @@ int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize) LASSERT(ctx); LASSERT(ctx->cc_sec); LASSERT(ctx->cc_sec->ps_policy); - LASSERT(req->rq_reqmsg == NULL); + LASSERT(!req->rq_reqmsg); LASSERT_ATOMIC_POS(&ctx->cc_refcount); policy = ctx->cc_sec->ps_policy; @@ -1515,7 +1517,7 @@ void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req) LASSERT(ctx->cc_sec->ps_policy); LASSERT_ATOMIC_POS(&ctx->cc_refcount); - if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL) + if (!req->rq_reqbuf && !req->rq_clrbuf) return; policy = ctx->cc_sec->ps_policy; @@ -1632,7 +1634,7 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req) LASSERT(ctx->cc_sec->ps_policy); LASSERT_ATOMIC_POS(&ctx->cc_refcount); - if (req->rq_repbuf == NULL) + if (!req->rq_repbuf) return; LASSERT(req->rq_repbuf_len); @@ -1684,12 +1686,13 @@ int sptlrpc_target_export_check(struct obd_export *exp, { struct sptlrpc_flavor flavor; - if (exp == NULL) + if (!exp) return 0; /* client side export has no imp_reverse, skip - * FIXME maybe we should check flavor this as well??? */ - if (exp->exp_imp_reverse == NULL) + * FIXME maybe we should check flavor this as well??? + */ + if (!exp->exp_imp_reverse) return 0; /* don't care about ctx fini rpc */ @@ -1702,11 +1705,13 @@ int sptlrpc_target_export_check(struct obd_export *exp, * the first req with the new flavor, then treat it as current flavor, * adapt reverse sec according to it. * note the first rpc with new flavor might not be with root ctx, in - * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */ + * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. + */ if (unlikely(exp->exp_flvr_changed) && flavor_allowed(&exp->exp_flvr_old[1], req)) { /* make the new flavor as "current", and old ones as - * about-to-expire */ + * about-to-expire + */ CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc); flavor = exp->exp_flvr_old[1]; @@ -1742,10 +1747,12 @@ int sptlrpc_target_export_check(struct obd_export *exp, } /* if it equals to the current flavor, we accept it, but need to - * dealing with reverse sec/ctx */ + * dealing with reverse sec/ctx + */ if (likely(flavor_allowed(&exp->exp_flvr, req))) { /* most cases should return here, we only interested in - * gss root ctx init */ + * gss root ctx init + */ if (!req->rq_auth_gss || !req->rq_ctx_init || (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt && !req->rq_auth_usr_ost)) { @@ -1755,7 +1762,8 @@ int sptlrpc_target_export_check(struct obd_export *exp, /* if flavor just changed, we should not proceed, just leave * it and current flavor will be discovered and replaced - * shortly, and let _this_ rpc pass through */ + * shortly, and let _this_ rpc pass through + */ if (exp->exp_flvr_changed) { LASSERT(exp->exp_flvr_adapt); spin_unlock(&exp->exp_lock); @@ -1809,7 +1817,8 @@ int sptlrpc_target_export_check(struct obd_export *exp, } /* now it doesn't match the current flavor, the only chance we can - * accept it is match the old flavors which is not expired. */ + * accept it is match the old flavors which is not expired. + */ if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) { if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) { if (flavor_allowed(&exp->exp_flvr_old[1], req)) { @@ -1915,9 +1924,9 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req) int rc; LASSERT(msg); - LASSERT(req->rq_reqmsg == NULL); - LASSERT(req->rq_repmsg == NULL); - LASSERT(req->rq_svc_ctx == NULL); + LASSERT(!req->rq_reqmsg); + LASSERT(!req->rq_repmsg); + LASSERT(!req->rq_svc_ctx); req->rq_req_swab_mask = 0; @@ -1986,15 +1995,15 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen) if (svcpt->scp_service->srv_max_reply_size < msglen + sizeof(struct ptlrpc_reply_state)) { /* Just return failure if the size is too big */ - CERROR("size of message is too big (%zd), %d allowed", - msglen + sizeof(struct ptlrpc_reply_state), - svcpt->scp_service->srv_max_reply_size); + CERROR("size of message is too big (%zd), %d allowed\n", + msglen + sizeof(struct ptlrpc_reply_state), + svcpt->scp_service->srv_max_reply_size); return -ENOMEM; } /* failed alloc, try emergency pool */ rs = lustre_get_emerg_rs(svcpt); - if (rs == NULL) + if (!rs) return -ENOMEM; req->rq_reply_state = rs; @@ -2059,7 +2068,7 @@ void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req) { struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - if (ctx != NULL) + if (ctx) atomic_inc(&ctx->sc_refcount); } @@ -2067,7 +2076,7 @@ void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req) { struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - if (ctx == NULL) + if (!ctx) return; LASSERT_ATOMIC_POS(&ctx->sc_refcount); @@ -2156,7 +2165,7 @@ int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, * in case of privacy mode, nob_transferred needs to be adjusted. */ if (desc->bd_nob != desc->bd_nob_transferred) { - CERROR("nob %d doesn't match transferred nob %d", + CERROR("nob %d doesn't match transferred nob %d\n", desc->bd_nob, desc->bd_nob_transferred); return -EPROTO; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c index 6152c1b76..d3872b8c9 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c @@ -58,7 +58,7 @@ * bulk encryption page pools * ****************************************/ -#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) +#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *)) #define PAGES_PER_POOL (POINTERS_PER_PAGE) #define IDLE_IDX_MAX (100) @@ -120,7 +120,7 @@ static struct ptlrpc_enc_page_pool { } page_pools; /* - * /proc/fs/lustre/sptlrpc/encrypt_page_pools + * /sys/kernel/debug/lustre/sptlrpc/encrypt_page_pools */ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) { @@ -195,7 +195,7 @@ static void enc_pools_release_free_pages(long npages) while (npages--) { LASSERT(page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); + LASSERT(page_pools.epp_pools[p_idx][g_idx]); __free_page(page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = NULL; @@ -304,7 +304,6 @@ static unsigned long enc_pools_cleanup(struct page ***pools, int npools) static inline void enc_pools_wakeup(void) { assert_spin_locked(&page_pools.epp_lock); - LASSERT(page_pools.epp_waitqlen >= 0); if (unlikely(page_pools.epp_waitqlen)) { LASSERT(waitqueue_active(&page_pools.epp_waitq)); @@ -317,7 +316,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) int p_idx, g_idx; int i; - if (desc->bd_enc_iov == NULL) + if (!desc->bd_enc_iov) return; LASSERT(desc->bd_iov_count > 0); @@ -332,9 +331,9 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) LASSERT(page_pools.epp_pools[p_idx]); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_iov[i].kiov_page != NULL); + LASSERT(desc->bd_enc_iov[i].kiov_page); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL); + LASSERT(!page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_iov[i].kiov_page; @@ -413,7 +412,7 @@ int sptlrpc_enc_pool_init(void) page_pools.epp_st_max_wait = 0; enc_pools_alloc(); - if (page_pools.epp_pools == NULL) + if (!page_pools.epp_pools) return -ENOMEM; register_shrinker(&pools_shrinker); @@ -476,7 +475,7 @@ int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) int size = msg->lm_buflens[offset]; bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); - if (bsd == NULL) { + if (!bsd) { CERROR("Invalid bulk sec desc: size %d\n", size); return -EINVAL; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c index 4b0b81c11..a51b18bbf 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c @@ -78,7 +78,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) memset(flvr, 0, sizeof(*flvr)); - if (str == NULL || str[0] == '\0') { + if (!str || str[0] == '\0') { flvr->sf_rpc = SPTLRPC_FLVR_INVALID; return 0; } @@ -103,7 +103,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) * format: plain-hash: */ alg = strchr(bulk, ':'); - if (alg == NULL) + if (!alg) goto err_out; *alg++ = '\0'; @@ -166,7 +166,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule) sptlrpc_rule_init(rule); flavor = strchr(param, '='); - if (flavor == NULL) { + if (!flavor) { CERROR("invalid param, no '='\n"); return -EINVAL; } @@ -216,7 +216,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule) static void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset) { LASSERT(rset->srs_nslot || - (rset->srs_nrule == 0 && rset->srs_rules == NULL)); + (rset->srs_nrule == 0 && !rset->srs_rules)); if (rset->srs_nslot) { kfree(rset->srs_rules); @@ -241,7 +241,7 @@ static int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset) /* better use realloc() if available */ rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS); - if (rules == NULL) + if (!rules) return -ENOMEM; if (rset->srs_nrule) { @@ -450,7 +450,7 @@ static void target2fsname(const char *tgt, char *fsname, int buflen) } /* if we didn't find the pattern, treat the whole string as fsname */ - if (ptr == NULL) + if (!ptr) len = strlen(tgt); else len = ptr - tgt; @@ -467,7 +467,7 @@ static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf) sptlrpc_rule_set_free(&conf->sc_rset); list_for_each_entry_safe(conf_tgt, conf_tgt_next, - &conf->sc_tgts, sct_list) { + &conf->sc_tgts, sct_list) { sptlrpc_rule_set_free(&conf_tgt->sct_rset); list_del(&conf_tgt->sct_list); kfree(conf_tgt); @@ -517,6 +517,7 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname, int create) { struct sptlrpc_conf *conf; + size_t len; list_for_each_entry(conf, &sptlrpc_confs, sc_list) { if (strcmp(conf->sc_fsname, fsname) == 0) @@ -530,7 +531,11 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname, if (!conf) return NULL; - strcpy(conf->sc_fsname, fsname); + len = strlcpy(conf->sc_fsname, fsname, sizeof(conf->sc_fsname)); + if (len >= sizeof(conf->sc_fsname)) { + kfree(conf); + return NULL; + } sptlrpc_rule_set_init(&conf->sc_rset); INIT_LIST_HEAD(&conf->sc_tgts); list_add(&conf->sc_list, &sptlrpc_confs); @@ -579,13 +584,13 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg, int rc; target = lustre_cfg_string(lcfg, 1); - if (target == NULL) { + if (!target) { CERROR("missing target name\n"); return -EINVAL; } param = lustre_cfg_string(lcfg, 2); - if (param == NULL) { + if (!param) { CERROR("missing parameter\n"); return -EINVAL; } @@ -603,12 +608,12 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg, if (rc) return -EINVAL; - if (conf == NULL) { + if (!conf) { target2fsname(target, fsname, sizeof(fsname)); mutex_lock(&sptlrpc_conf_lock); conf = sptlrpc_conf_get(fsname, 0); - if (conf == NULL) { + if (!conf) { CERROR("can't find conf\n"); rc = -ENOMEM; } else { @@ -638,7 +643,7 @@ static int logname2fsname(const char *logname, char *buf, int buflen) int len; ptr = strrchr(logname, '-'); - if (ptr == NULL || strcmp(ptr, "-sptlrpc")) { + if (!ptr || strcmp(ptr, "-sptlrpc")) { CERROR("%s is not a sptlrpc config log\n", logname); return -EINVAL; } @@ -772,7 +777,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from, mutex_lock(&sptlrpc_conf_lock); conf = sptlrpc_conf_get(name, 0); - if (conf == NULL) + if (!conf) goto out; /* convert uuid name (supposed end with _UUID) to target name */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c index 6e58d5f95..9082da06b 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c @@ -109,7 +109,7 @@ static void sec_process_ctx_list(void) while (!list_empty(&sec_gc_ctx_list)) { ctx = list_entry(sec_gc_ctx_list.next, - struct ptlrpc_cli_ctx, cc_gc_chain); + struct ptlrpc_cli_ctx, cc_gc_chain); list_del_init(&ctx->cc_gc_chain); spin_unlock(&sec_gc_ctx_list_lock); @@ -131,7 +131,7 @@ static void sec_do_gc(struct ptlrpc_sec *sec) if (unlikely(sec->ps_gc_next == 0)) { CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n", - sec, sec->ps_policy->sp_name); + sec, sec->ps_policy->sp_name); return; } @@ -166,11 +166,13 @@ again: * is not optimal. we perhaps want to use balanced binary tree * to trace each sec as order of expiry time. * another issue here is we wakeup as fixed interval instead of - * according to each sec's expiry time */ + * according to each sec's expiry time + */ mutex_lock(&sec_gc_mutex); list_for_each_entry(sec, &sec_gc_list, ps_gc_list) { /* if someone is waiting to be deleted, let it - * proceed as soon as possible. */ + * proceed as soon as possible. + */ if (atomic_read(&sec_gc_wait_del)) { CDEBUG(D_SEC, "deletion pending, start over\n"); mutex_unlock(&sec_gc_mutex); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c index bda9a77af..e610a8ddd 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c @@ -82,7 +82,7 @@ static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v) if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); - if (sec == NULL) + if (!sec) goto out; sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)); @@ -121,7 +121,7 @@ static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v) if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); - if (sec == NULL) + if (!sec) goto out; if (sec->ps_policy->sp_cops->display) @@ -178,7 +178,7 @@ int sptlrpc_lproc_init(void) { int rc; - LASSERT(sptlrpc_debugfs_dir == NULL); + LASSERT(!sptlrpc_debugfs_dir); sptlrpc_debugfs_dir = ldebugfs_register("sptlrpc", debugfs_lustre_root, sptlrpc_lprocfs_vars, NULL); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c index ebfa6092b..40e5349de 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c @@ -250,7 +250,7 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, alloc_size = size_roundup_power2(newmsg_size); newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS); - if (newbuf == NULL) + if (!newbuf) return -ENOMEM; /* Must lock this, so that otherwise unprotected change of @@ -258,7 +258,8 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, * imp_replay_list traversing threads. See LU-3333 * This is a bandaid at best, we really need to deal with this * in request enlarging code before unpacking that's already - * there */ + * there + */ if (req->rq_import) spin_lock(&req->rq_import->imp_lock); memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); @@ -319,7 +320,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize) LASSERT(rs->rs_size >= rs_size); } else { rs = libcfs_kvzalloc(rs_size, GFP_NOFS); - if (rs == NULL) + if (!rs) return -ENOMEM; rs->rs_size = rs_size; diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c index 905a41451..6276bf59c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c @@ -104,7 +104,7 @@ static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed) return -EPROTO; bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE); - if (bsd == NULL) { + if (!bsd) { CERROR("bulk sec desc has short size %d\n", lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF)); return -EPROTO; @@ -227,7 +227,7 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) swabbed = ptlrpc_rep_need_swab(req); phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (phdr == NULL) { + if (!phdr) { CERROR("missing plain header\n"); return -EPROTO; } @@ -264,7 +264,8 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) } } else { /* whether we sent with bulk or not, we expect the same - * in reply, except for early reply */ + * in reply, except for early reply + */ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, phdr->ph_flags & PLAIN_FL_BULK)) { @@ -419,7 +420,7 @@ void plain_destroy_sec(struct ptlrpc_sec *sec) LASSERT(sec->ps_import); LASSERT(atomic_read(&sec->ps_refcount) == 0); LASSERT(atomic_read(&sec->ps_nctx) == 0); - LASSERT(plsec->pls_ctx == NULL); + LASSERT(!plsec->pls_ctx); class_import_put(sec->ps_import); @@ -468,7 +469,7 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp, /* install ctx immediately if this is a reverse sec */ if (svc_ctx) { ctx = plain_sec_install_ctx(plsec); - if (ctx == NULL) { + if (!ctx) { plain_destroy_sec(sec); return NULL; } @@ -492,7 +493,7 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec, atomic_inc(&ctx->cc_refcount); read_unlock(&plsec->pls_lock); - if (unlikely(ctx == NULL)) + if (unlikely(!ctx)) ctx = plain_sec_install_ctx(plsec); return ctx; @@ -665,7 +666,7 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, newbuf_size = size_roundup_power2(newbuf_size); newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS); - if (newbuf == NULL) + if (!newbuf) return -ENOMEM; /* Must lock this, so that otherwise unprotected change of @@ -673,7 +674,8 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, * imp_replay_list traversing threads. See LU-3333 * This is a bandaid at best, we really need to deal with this * in request enlarging code before unpacking that's already - * there */ + * there + */ if (req->rq_import) spin_lock(&req->rq_import->imp_lock); @@ -732,7 +734,7 @@ int plain_accept(struct ptlrpc_request *req) swabbed = ptlrpc_req_need_swab(req); phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (phdr == NULL) { + if (!phdr) { CERROR("missing plain header\n"); return -EPROTO; } @@ -801,7 +803,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize) LASSERT(rs->rs_size >= rs_size); } else { rs = libcfs_kvzalloc(rs_size, GFP_NOFS); - if (rs == NULL) + if (!rs) return -ENOMEM; rs->rs_size = rs_size; diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 8598300a6..1bbd1d39c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -77,7 +77,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, svcpt->scp_cpt)); - if (rqbd == NULL) + if (!rqbd) return NULL; rqbd->rqbd_svcpt = svcpt; @@ -89,7 +89,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) svcpt->scp_cpt, svc->srv_buf_size, GFP_KERNEL); - if (rqbd->rqbd_buffer == NULL) { + if (!rqbd->rqbd_buffer) { kfree(rqbd); return NULL; } @@ -144,13 +144,14 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post) for (i = 0; i < svc->srv_nbuf_per_group; i++) { /* NB: another thread might have recycled enough rqbds, we - * need to make sure it wouldn't over-allocate, see LU-1212. */ + * need to make sure it wouldn't over-allocate, see LU-1212. + */ if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group) break; rqbd = ptlrpc_alloc_rqbd(svcpt); - if (rqbd == NULL) { + if (!rqbd) { CERROR("%s: Can't allocate request buffer\n", svc->srv_name); rc = -ENOMEM; @@ -298,8 +299,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) } rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); list_del(&rqbd->rqbd_list); /* assume we will post successfully */ @@ -322,7 +323,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); /* Don't complain if no request buffers are posted right now; LNET - * won't drop requests because we set the portal lazy! */ + * won't drop requests because we set the portal lazy! + */ spin_unlock(&svcpt->scp_lock); @@ -363,13 +365,15 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, init = max_t(int, init, tc->tc_nthrs_init); /* NB: please see comments in lustre_lnet.h for definition - * details of these members */ + * details of these members + */ LASSERT(tc->tc_nthrs_max != 0); if (tc->tc_nthrs_user != 0) { /* In case there is a reason to test a service with many * threads, we give a less strict check here, it can - * be up to 8 * nthrs_max */ + * be up to 8 * nthrs_max + */ total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user); nthrs = total / svc->srv_ncpts; init = max(init, nthrs); @@ -379,7 +383,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, total = tc->tc_nthrs_max; if (tc->tc_nthrs_base == 0) { /* don't care about base threads number per partition, - * this is most for non-affinity service */ + * this is most for non-affinity service + */ nthrs = total / svc->srv_ncpts; goto out; } @@ -390,7 +395,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, /* NB: Increase the base number if it's single partition * and total number of cores/HTs is larger or equal to 4. - * result will always < 2 * nthrs_base */ + * result will always < 2 * nthrs_base + */ weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY); for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */ (tc->tc_nthrs_base >> i) != 0; i++) @@ -490,7 +496,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, array->paa_reqs_array = kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (array->paa_reqs_array == NULL) + if (!array->paa_reqs_array) return -ENOMEM; for (index = 0; index < size; index++) @@ -499,14 +505,15 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, array->paa_reqs_count = kzalloc_node(sizeof(__u32) * size, GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (array->paa_reqs_count == NULL) + if (!array->paa_reqs_count) goto free_reqs_array; setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer, (unsigned long)svcpt); /* At SOW, service time should be quick; 10s seems generous. If client - * timeout is less than this, we'll be sending an early reply. */ + * timeout is less than this, we'll be sending an early reply. + */ at_init(&svcpt->scp_at_estimate, 10, 0); /* assign this before call ptlrpc_grow_req_bufs */ @@ -514,7 +521,8 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, /* Now allocate the request buffers, but don't post them now */ rc = ptlrpc_grow_req_bufs(svcpt, 0); /* We shouldn't be under memory pressure at startup, so - * fail if we can't allocate all our buffers at this time. */ + * fail if we can't allocate all our buffers at this time. + */ if (rc != 0) goto free_reqs_count; @@ -556,14 +564,14 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, LASSERT(conf->psc_thr.tc_ctx_tags != 0); cptable = cconf->cc_cptable; - if (cptable == NULL) + if (!cptable) cptable = cfs_cpt_table; if (!conf->psc_thr.tc_cpu_affinity) { ncpts = 1; } else { ncpts = cfs_cpt_number(cptable); - if (cconf->cc_pattern != NULL) { + if (cconf->cc_pattern) { struct cfs_expr_list *el; rc = cfs_expr_list_parse(cconf->cc_pattern, @@ -632,11 +640,11 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, if (!conf->psc_thr.tc_cpu_affinity) cpt = CFS_CPT_ANY; else - cpt = cpts != NULL ? cpts[i] : i; + cpt = cpts ? cpts[i] : i; svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS, cfs_cpt_spread_node(cptable, cpt)); - if (svcpt == NULL) { + if (!svcpt) { rc = -ENOMEM; goto failed; } @@ -696,7 +704,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req) LASSERT(list_empty(&req->rq_timed_list)); /* DEBUG_REQ() assumes the reply state of a request with a valid - * ref will not be destroyed until that reference is dropped. */ + * ref will not be destroyed until that reference is dropped. + */ ptlrpc_req_drop_rs(req); sptlrpc_svc_ctx_decref(req); @@ -704,7 +713,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req) if (req != &req->rq_rqbd->rqbd_req) { /* NB request buffers use an embedded * req if the incoming req unlinked the - * MD; this isn't one of them! */ + * MD; this isn't one of them! + */ ptlrpc_request_cache_free(req); } } @@ -728,7 +738,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) if (req->rq_at_linked) { spin_lock(&svcpt->scp_at_lock); /* recheck with lock, in case it's unlinked by - * ptlrpc_at_check_timed() */ + * ptlrpc_at_check_timed() + */ if (likely(req->rq_at_linked)) ptlrpc_at_remove_timed(req); spin_unlock(&svcpt->scp_at_lock); @@ -755,20 +766,22 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) svcpt->scp_hist_nrqbds++; /* cull some history? - * I expect only about 1 or 2 rqbds need to be recycled here */ + * I expect only about 1 or 2 rqbds need to be recycled here + */ while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) { rqbd = list_entry(svcpt->scp_hist_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); list_del(&rqbd->rqbd_list); svcpt->scp_hist_nrqbds--; /* remove rqbd's reqs from svc's req history while - * I've got the service lock */ + * I've got the service lock + */ list_for_each(tmp, &rqbd->rqbd_reqs) { req = list_entry(tmp, struct ptlrpc_request, - rq_list); + rq_list); /* Track the highest culled req seq */ if (req->rq_history_seq > svcpt->scp_hist_seq_culled) { @@ -782,8 +795,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { req = list_entry(rqbd->rqbd_reqs.next, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); list_del(&req->rq_list); ptlrpc_server_free_request(req); } @@ -795,8 +808,7 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) */ LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0); - list_add_tail(&rqbd->rqbd_list, - &svcpt->scp_rqbd_idle); + list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); } spin_unlock(&svcpt->scp_lock); @@ -846,7 +858,7 @@ static void ptlrpc_server_finish_active_request( ptlrpc_nrs_req_finalize(req); - if (req->rq_export != NULL) + if (req->rq_export) class_export_rpc_dec(req->rq_export); ptlrpc_server_finish_request(svcpt, req); @@ -869,13 +881,13 @@ static int ptlrpc_check_req(struct ptlrpc_request *req) req->rq_export->exp_conn_cnt); return -EEXIST; } - if (unlikely(obd == NULL || obd->obd_fail)) { + if (unlikely(!obd || obd->obd_fail)) { /* * Failing over, don't handle any more reqs, send * error response instead. */ CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n", - req, (obd != NULL) ? obd->obd_name : "unknown"); + req, obd ? obd->obd_name : "unknown"); rc = -ENODEV; } else if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_REPLAY | MSG_REQ_REPLAY_DONE)) { @@ -942,13 +954,13 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) div_u64_rem(req->rq_deadline, array->paa_size, &index); if (array->paa_reqs_count[index] > 0) { /* latest rpcs will have the latest deadlines in the list, - * so search backward. */ - list_for_each_entry_reverse(rq, - &array->paa_reqs_array[index], - rq_timed_list) { + * so search backward. + */ + list_for_each_entry_reverse(rq, &array->paa_reqs_array[index], + rq_timed_list) { if (req->rq_deadline >= rq->rq_deadline) { list_add(&req->rq_timed_list, - &rq->rq_timed_list); + &rq->rq_timed_list); break; } } @@ -956,8 +968,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) /* Add the request at the head of the list */ if (list_empty(&req->rq_timed_list)) - list_add(&req->rq_timed_list, - &array->paa_reqs_array[index]); + list_add(&req->rq_timed_list, &array->paa_reqs_array[index]); spin_lock(&req->rq_lock); req->rq_at_linked = 1; @@ -1003,7 +1014,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) int rc; /* deadline is when the client expects us to reply, margin is the - difference between clients' and servers' expectations */ + * difference between clients' and servers' expectations + */ DEBUG_REQ(D_ADAPTTO, req, "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d", AT_OFF ? "AT off - not " : "", @@ -1027,12 +1039,14 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) } /* Fake our processing time into the future to ask the clients - * for some extra amount of time */ + * for some extra amount of time + */ at_measured(&svcpt->scp_at_estimate, at_extra + ktime_get_real_seconds() - req->rq_arrival_time.tv_sec); /* Check to see if we've actually increased the deadline - - * we may be past adaptive_max */ + * we may be past adaptive_max + */ if (req->rq_deadline >= req->rq_arrival_time.tv_sec + at_get(&svcpt->scp_at_estimate)) { DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n", @@ -1044,7 +1058,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate); reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS); - if (reqcopy == NULL) + if (!reqcopy) return -ENOMEM; reqmsg = libcfs_kvzalloc(req->rq_reqlen, GFP_NOFS); if (!reqmsg) { @@ -1074,7 +1088,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) /* Connection ref */ reqcopy->rq_export = class_conn2export( lustre_msg_get_handle(reqcopy->rq_reqmsg)); - if (reqcopy->rq_export == NULL) { + if (!reqcopy->rq_export) { rc = -ENODEV; goto out; } @@ -1102,7 +1116,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) } /* Free the (early) reply state from lustre_pack_reply. - (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */ + * (ptlrpc_send_reply takes it's own rs ref, so this is safe here) + */ ptlrpc_req_drop_rs(reqcopy); out_put: @@ -1117,8 +1132,9 @@ out_free: } /* Send early replies to everybody expiring within at_early_margin - asking for at_extra time */ -static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) + * asking for at_extra time + */ +static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) { struct ptlrpc_at_array *array = &svcpt->scp_at_array; struct ptlrpc_request *rq, *n; @@ -1132,14 +1148,14 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) spin_lock(&svcpt->scp_at_lock); if (svcpt->scp_at_check == 0) { spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime); svcpt->scp_at_check = 0; if (array->paa_count == 0) { spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } /* The timer went off, but maybe the nearest rpc already completed. */ @@ -1148,20 +1164,20 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) /* We've still got plenty of time. Reset the timer. */ ptlrpc_at_set_timer(svcpt); spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } /* We're close to a timeout, and we don't know how much longer the - server will take. Send early replies to everyone expiring soon. */ + * server will take. Send early replies to everyone expiring soon. + */ INIT_LIST_HEAD(&work_list); deadline = -1; div_u64_rem(array->paa_deadline, array->paa_size, &index); count = array->paa_count; while (count > 0) { count -= array->paa_reqs_count[index]; - list_for_each_entry_safe(rq, n, - &array->paa_reqs_array[index], - rq_timed_list) { + list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index], + rq_timed_list) { if (rq->rq_deadline > now + at_early_margin) { /* update the earliest deadline */ if (deadline == -1 || @@ -1194,7 +1210,8 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) first, at_extra, counter); if (first < 0) { /* We're already past request deadlines before we even get a - chance to send early replies */ + * chance to send early replies + */ LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n", svcpt->scp_service->srv_name); CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n", @@ -1204,10 +1221,11 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) } /* we took additional refcount so entries can't be deleted from list, no - * locking is needed */ + * locking is needed + */ while (!list_empty(&work_list)) { rq = list_entry(work_list.next, struct ptlrpc_request, - rq_timed_list); + rq_timed_list); list_del_init(&rq->rq_timed_list); if (ptlrpc_at_send_early_reply(rq) == 0) @@ -1215,8 +1233,6 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) ptlrpc_server_drop_request(rq); } - - return 1; /* return "did_something" for liblustre */ } /** @@ -1237,7 +1253,8 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, if (req->rq_export && req->rq_ops) { /* Perform request specific check. We should do this check * before the request is added into exp_hp_rpcs list otherwise - * it may hit swab race at LU-1044. */ + * it may hit swab race at LU-1044. + */ if (req->rq_ops->hpreq_check) { rc = req->rq_ops->hpreq_check(req); /** @@ -1257,8 +1274,7 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, } spin_lock_bh(&req->rq_export->exp_rpc_lock); - list_add(&req->rq_exp_list, - &req->rq_export->exp_hp_rpcs); + list_add(&req->rq_exp_list, &req->rq_export->exp_hp_rpcs); spin_unlock_bh(&req->rq_export->exp_rpc_lock); } @@ -1272,7 +1288,8 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req) { if (req->rq_export && req->rq_ops) { /* refresh lock timeout again so that client has more - * room to send lock cancel RPC. */ + * room to send lock cancel RPC. + */ if (req->rq_ops->hpreq_fini) req->rq_ops->hpreq_fini(req); @@ -1316,7 +1333,7 @@ static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt, CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) + if (svcpt->scp_service->srv_ops.so_hpreq_handler) running += 1; } @@ -1355,7 +1372,7 @@ static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt, CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) + if (svcpt->scp_service->srv_ops.so_hpreq_handler) running += 1; } @@ -1405,7 +1422,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) if (ptlrpc_server_high_pending(svcpt, force)) { req = ptlrpc_nrs_req_get_nolock(svcpt, true, force); - if (req != NULL) { + if (req) { svcpt->scp_hreq_count++; goto got_request; } @@ -1413,7 +1430,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) if (ptlrpc_server_normal_pending(svcpt, force)) { req = ptlrpc_nrs_req_get_nolock(svcpt, false, force); - if (req != NULL) { + if (req) { svcpt->scp_hreq_count = 0; goto got_request; } @@ -1457,11 +1474,12 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt, } req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); + struct ptlrpc_request, rq_list); list_del_init(&req->rq_list); svcpt->scp_nreqs_incoming--; /* Consider this still a "queued" request as far as stats are - * concerned */ + * concerned + */ spin_unlock(&svcpt->scp_lock); /* go through security check/transform */ @@ -1598,7 +1616,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, int fail_opc = 0; request = ptlrpc_server_request_get(svcpt, false); - if (request == NULL) + if (!request) return 0; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT)) @@ -1620,7 +1638,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, timediff = timespec64_sub(work_start, request->rq_arrival_time); timediff_usecs = timediff.tv_sec * USEC_PER_SEC + timediff.tv_nsec / NSEC_PER_USEC; - if (likely(svc->srv_stats != NULL)) { + if (likely(svc->srv_stats)) { lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR, timediff_usecs); lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR, @@ -1652,7 +1670,8 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, } /* Discard requests queued for longer than the deadline. - The deadline is increased if we send an early reply. */ + * The deadline is increased if we send an early reply. + */ if (ktime_get_real_seconds() > request->rq_deadline) { DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n", libcfs_id2str(request->rq_peer), @@ -1718,7 +1737,7 @@ put_conn: request->rq_status, (request->rq_repmsg ? lustre_msg_get_status(request->rq_repmsg) : -999)); - if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) { + if (likely(svc->srv_stats && request->rq_reqmsg)) { __u32 op = lustre_msg_get_opc(request->rq_reqmsg); int opc = opcode_offset(op); @@ -1804,7 +1823,8 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs) if (nlocks == 0 && !been_handled) { /* If we see this, we should already have seen the warning - * in mds_steal_ack_locks() */ + * in mds_steal_ack_locks() + */ CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n", rs, rs->rs_xid, rs->rs_transno, rs->rs_opc, @@ -1858,7 +1878,8 @@ ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt) /* CAVEAT EMPTOR: We might be allocating buffers here because we've * allowed the request history to grow out of control. We could put a * sanity check on that here and cull some history if we need the - * space. */ + * space. + */ if (avail <= low_water) ptlrpc_grow_req_bufs(svcpt, 1); @@ -1992,7 +2013,8 @@ static int ptlrpc_main(void *arg) /* NB: we will call cfs_cpt_bind() for all threads, because we * might want to run lustre server only on a subset of system CPUs, - * in that case ->scp_cpt is CFS_CPT_ANY */ + * in that case ->scp_cpt is CFS_CPT_ANY + */ rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt); if (rc != 0) { CWARN("%s: failed to bind %s on CPT %d\n", @@ -2008,7 +2030,7 @@ static int ptlrpc_main(void *arg) set_current_groups(ginfo); put_group_info(ginfo); - if (svc->srv_ops.so_thr_init != NULL) { + if (svc->srv_ops.so_thr_init) { rc = svc->srv_ops.so_thr_init(thread); if (rc) goto out; @@ -2035,7 +2057,7 @@ static int ptlrpc_main(void *arg) continue; CERROR("Failed to post rqbd for %s on CPT %d: %d\n", - svc->srv_name, svcpt->scp_cpt, rc); + svc->srv_name, svcpt->scp_cpt, rc); goto out_srv_fini; } @@ -2057,7 +2079,8 @@ static int ptlrpc_main(void *arg) /* SVC_STOPPING may already be set here if someone else is trying * to stop the service while this new thread has been dynamically * forked. We still set SVC_RUNNING to let our creator know that - * we are now running, however we will exit as soon as possible */ + * we are now running, however we will exit as soon as possible + */ thread_add_flags(thread, SVC_RUNNING); svcpt->scp_nthrs_running++; spin_unlock(&svcpt->scp_lock); @@ -2116,7 +2139,8 @@ static int ptlrpc_main(void *arg) ptlrpc_server_post_idle_rqbds(svcpt) < 0) { /* I just failed to repost request buffers. * Wait for a timeout (unless something else - * happens) before I try again */ + * happens) before I try again + */ svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10; CDEBUG(D_RPCTRACE, "Posted buffers: %d\n", svcpt->scp_nrqbds_posted); @@ -2132,10 +2156,10 @@ out_srv_fini: /* * deconstruct service specific state created by ptlrpc_start_thread() */ - if (svc->srv_ops.so_thr_done != NULL) + if (svc->srv_ops.so_thr_done) svc->srv_ops.so_thr_done(thread); - if (env != NULL) { + if (env) { lu_context_fini(&env->le_ctx); kfree(env); } @@ -2183,7 +2207,7 @@ static int ptlrpc_hr_main(void *arg) { struct ptlrpc_hr_thread *hrt = arg; struct ptlrpc_hr_partition *hrp = hrt->hrt_partition; - LIST_HEAD (replies); + LIST_HEAD(replies); char threadname[20]; int rc; @@ -2206,9 +2230,8 @@ static int ptlrpc_hr_main(void *arg) while (!list_empty(&replies)) { struct ptlrpc_reply_state *rs; - rs = list_entry(replies.prev, - struct ptlrpc_reply_state, - rs_list); + rs = list_entry(replies.prev, struct ptlrpc_reply_state, + rs_list); list_del_init(&rs->rs_list); ptlrpc_handle_rs(rs); } @@ -2229,18 +2252,18 @@ static void ptlrpc_stop_hr_threads(void) ptlrpc_hr.hr_stopping = 1; cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (hrp->hrp_thrs == NULL) + if (!hrp->hrp_thrs) continue; /* uninitialized */ for (j = 0; j < hrp->hrp_nthrs; j++) wake_up_all(&hrp->hrp_thrs[j].hrt_waitq); } cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (hrp->hrp_thrs == NULL) + if (!hrp->hrp_thrs) continue; /* uninitialized */ wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstopped) == - atomic_read(&hrp->hrp_nstarted)); + atomic_read(&hrp->hrp_nstopped) == + atomic_read(&hrp->hrp_nstarted)); } } @@ -2255,24 +2278,26 @@ static int ptlrpc_start_hr_threads(void) for (j = 0; j < hrp->hrp_nthrs; j++) { struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j]; - - rc = PTR_ERR(kthread_run(ptlrpc_hr_main, - &hrp->hrp_thrs[j], - "ptlrpc_hr%02d_%03d", - hrp->hrp_cpt, - hrt->hrt_id)); - if (IS_ERR_VALUE(rc)) + struct task_struct *task; + + task = kthread_run(ptlrpc_hr_main, + &hrp->hrp_thrs[j], + "ptlrpc_hr%02d_%03d", + hrp->hrp_cpt, hrt->hrt_id); + if (IS_ERR(task)) { + rc = PTR_ERR(task); break; + } } wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstarted) == j); - if (!IS_ERR_VALUE(rc)) - continue; + atomic_read(&hrp->hrp_nstarted) == j); - CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n", - i, j, rc); - ptlrpc_stop_hr_threads(); - return rc; + if (rc < 0) { + CERROR("cannot start reply handler thread %d:%d: rc = %d\n", + i, j, rc); + ptlrpc_stop_hr_threads(); + return rc; + } } return 0; } @@ -2281,7 +2306,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) { struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; - LIST_HEAD (zombie); + LIST_HEAD(zombie); CDEBUG(D_INFO, "Stopping threads for service %s\n", svcpt->scp_service->srv_name); @@ -2298,7 +2323,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) while (!list_empty(&svcpt->scp_threads)) { thread = list_entry(svcpt->scp_threads.next, - struct ptlrpc_thread, t_link); + struct ptlrpc_thread, t_link); if (thread_is_stopped(thread)) { list_del(&thread->t_link); list_add(&thread->t_link, &zombie); @@ -2333,7 +2358,7 @@ static void ptlrpc_stop_all_threads(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service != NULL) + if (svcpt->scp_service) ptlrpc_svcpt_stop_threads(svcpt); } } @@ -2374,10 +2399,9 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; struct ptlrpc_service *svc; + struct task_struct *task; int rc; - LASSERT(svcpt != NULL); - svc = svcpt->scp_service; CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n", @@ -2396,7 +2420,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) thread = kzalloc_node(sizeof(*thread), GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, svcpt->scp_cpt)); - if (thread == NULL) + if (!thread) return -ENOMEM; init_waitqueue_head(&thread->t_ctl_waitq); @@ -2409,7 +2433,8 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) if (svcpt->scp_nthrs_starting != 0) { /* serialize starting because some modules (obdfilter) - * might require unique and contiguous t_id */ + * might require unique and contiguous t_id + */ LASSERT(svcpt->scp_nthrs_starting == 1); spin_unlock(&svcpt->scp_lock); kfree(thread); @@ -2442,9 +2467,10 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) } CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); - rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name)); - if (IS_ERR_VALUE(rc)) { - CERROR("cannot start thread '%s': rc %d\n", + task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start thread '%s': rc = %d\n", thread->t_name, rc); spin_lock(&svcpt->scp_lock); --svcpt->scp_nthrs_starting; @@ -2488,7 +2514,7 @@ int ptlrpc_hr_init(void) ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table, sizeof(*hrp)); - if (ptlrpc_hr.hr_partitions == NULL) + if (!ptlrpc_hr.hr_partitions) return -ENOMEM; init_waitqueue_head(&ptlrpc_hr.hr_waitq); @@ -2509,7 +2535,7 @@ int ptlrpc_hr_init(void) kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS, cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table, i)); - if (hrp->hrp_thrs == NULL) { + if (!hrp->hrp_thrs) { rc = -ENOMEM; goto out; } @@ -2537,7 +2563,7 @@ void ptlrpc_hr_fini(void) struct ptlrpc_hr_partition *hrp; int i; - if (ptlrpc_hr.hr_partitions == NULL) + if (!ptlrpc_hr.hr_partitions) return; ptlrpc_stop_hr_threads(); @@ -2577,7 +2603,7 @@ ptlrpc_service_del_atimer(struct ptlrpc_service *svc) /* early disarm AT timer... */ ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service != NULL) + if (svcpt->scp_service) del_timer(&svcpt->scp_at_timer); } } @@ -2592,18 +2618,20 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) int i; /* All history will be culled when the next request buffer is - * freed in ptlrpc_service_purge_all() */ + * freed in ptlrpc_service_purge_all() + */ svc->srv_hist_nrqbds_cpt_max = 0; rc = LNetClearLazyPortal(svc->srv_req_portal); LASSERT(rc == 0); ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* Unlink all the request buffers. This forces a 'final' - * event with its 'unlink' flag set for each posted rqbd */ + * event with its 'unlink' flag set for each posted rqbd + */ list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted, rqbd_list) { rc = LNetMDUnlink(rqbd->rqbd_md_h); @@ -2612,17 +2640,19 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) } ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* Wait for the network to release any buffers - * it's currently filling */ + * it's currently filling + */ spin_lock(&svcpt->scp_lock); while (svcpt->scp_nrqbds_posted != 0) { spin_unlock(&svcpt->scp_lock); /* Network access will complete in finite time but * the HUGE timeout lets us CWARN for visibility - * of sluggish NALs */ + * of sluggish LNDs + */ lwi = LWI_TIMEOUT_INTERVAL( cfs_time_seconds(LONG_UNLINK), cfs_time_seconds(1), NULL, NULL); @@ -2648,13 +2678,13 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; spin_lock(&svcpt->scp_rep_lock); while (!list_empty(&svcpt->scp_rep_active)) { rs = list_entry(svcpt->scp_rep_active.next, - struct ptlrpc_reply_state, rs_list); + struct ptlrpc_reply_state, rs_list); spin_lock(&rs->rs_lock); ptlrpc_schedule_difficult_reply(rs); spin_unlock(&rs->rs_lock); @@ -2663,10 +2693,11 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) /* purge the request queue. NB No new replies (rqbds * all unlinked) and no service threads, so I'm the only - * thread noodling the request queue now */ + * thread noodling the request queue now + */ while (!list_empty(&svcpt->scp_req_incoming)) { req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); + struct ptlrpc_request, rq_list); list_del(&req->rq_list); svcpt->scp_nreqs_incoming--; @@ -2682,24 +2713,26 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) LASSERT(svcpt->scp_nreqs_incoming == 0); LASSERT(svcpt->scp_nreqs_active == 0); /* history should have been culled by - * ptlrpc_server_finish_request */ + * ptlrpc_server_finish_request + */ LASSERT(svcpt->scp_hist_nrqbds == 0); /* Now free all the request buffers since nothing - * references them any more... */ + * references them any more... + */ while (!list_empty(&svcpt->scp_rqbd_idle)) { rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); ptlrpc_free_rqbd(rqbd); } ptlrpc_wait_replies(svcpt); while (!list_empty(&svcpt->scp_rep_idle)) { rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, - rs_list); + struct ptlrpc_reply_state, + rs_list); list_del(&rs->rs_list); kvfree(rs); } @@ -2714,7 +2747,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* In case somebody rearmed this in the meantime */ @@ -2730,7 +2763,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc) ptlrpc_service_for_each_part(svcpt, i, svc) kfree(svcpt); - if (svc->srv_cpts != NULL) + if (svc->srv_cpts) cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts); kfree(svc); diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c index 61d9ca93c..3ffd2d91f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c +++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c @@ -333,17 +333,9 @@ void lustre_assert_wire_constants(void) CLASSERT(LDLM_MAX_TYPE == 14); CLASSERT(LUSTRE_RES_ID_SEQ_OFF == 0); CLASSERT(LUSTRE_RES_ID_VER_OID_OFF == 1); - LASSERTF(UPDATE_OBJ == 1000, "found %lld\n", - (long long)UPDATE_OBJ); - LASSERTF(UPDATE_LAST_OPC == 1001, "found %lld\n", - (long long)UPDATE_LAST_OPC); CLASSERT(LUSTRE_RES_ID_QUOTA_SEQ_OFF == 2); CLASSERT(LUSTRE_RES_ID_QUOTA_VER_OID_OFF == 3); CLASSERT(LUSTRE_RES_ID_HSH_OFF == 3); - CLASSERT(LQUOTA_TYPE_USR == 0); - CLASSERT(LQUOTA_TYPE_GRP == 1); - CLASSERT(LQUOTA_RES_MD == 1); - CLASSERT(LQUOTA_RES_DT == 2); LASSERTF(OBD_PING == 400, "found %lld\n", (long long)OBD_PING); LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n", @@ -437,30 +429,6 @@ void lustre_assert_wire_constants(void) (unsigned)LMAC_NOT_IN_OI); LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n", (unsigned)LMAC_FID_ON_OST); - LASSERTF(OBJ_CREATE == 1, "found %lld\n", - (long long)OBJ_CREATE); - LASSERTF(OBJ_DESTROY == 2, "found %lld\n", - (long long)OBJ_DESTROY); - LASSERTF(OBJ_REF_ADD == 3, "found %lld\n", - (long long)OBJ_REF_ADD); - LASSERTF(OBJ_REF_DEL == 4, "found %lld\n", - (long long)OBJ_REF_DEL); - LASSERTF(OBJ_ATTR_SET == 5, "found %lld\n", - (long long)OBJ_ATTR_SET); - LASSERTF(OBJ_ATTR_GET == 6, "found %lld\n", - (long long)OBJ_ATTR_GET); - LASSERTF(OBJ_XATTR_SET == 7, "found %lld\n", - (long long)OBJ_XATTR_SET); - LASSERTF(OBJ_XATTR_GET == 8, "found %lld\n", - (long long)OBJ_XATTR_GET); - LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n", - (long long)OBJ_INDEX_LOOKUP); - LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n", - (long long)OBJ_INDEX_LOOKUP); - LASSERTF(OBJ_INDEX_INSERT == 10, "found %lld\n", - (long long)OBJ_INDEX_INSERT); - LASSERTF(OBJ_INDEX_DELETE == 11, "found %lld\n", - (long long)OBJ_INDEX_DELETE); /* Checks for struct ost_id */ LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n", @@ -587,9 +555,6 @@ void lustre_assert_wire_constants(void) (long long)LDF_COLLIDE); LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n", (long long)LU_PAGE_SIZE); - /* Checks for union lu_page */ - LASSERTF((int)sizeof(union lu_page) == 4096, "found %lld\n", - (long long)(int)sizeof(union lu_page)); /* Checks for struct lustre_handle */ LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n", @@ -1535,11 +1500,6 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n", (long long)(int)sizeof(union lquota_id)); - LASSERTF(QUOTABLOCK_BITS == 10, "found %lld\n", - (long long)QUOTABLOCK_BITS); - LASSERTF(QUOTABLOCK_SIZE == 1024, "found %lld\n", - (long long)QUOTABLOCK_SIZE); - /* Checks for struct obd_quotactl */ LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n", (long long)(int)sizeof(struct obd_quotactl)); @@ -1642,138 +1602,6 @@ void lustre_assert_wire_constants(void) LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n", Q_FINVALIDATE); - /* Checks for struct lquota_acct_rec */ - LASSERTF((int)sizeof(struct lquota_acct_rec) == 16, "found %lld\n", - (long long)(int)sizeof(struct lquota_acct_rec)); - LASSERTF((int)offsetof(struct lquota_acct_rec, bspace) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_acct_rec, bspace)); - LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->bspace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_acct_rec *)0)->bspace)); - LASSERTF((int)offsetof(struct lquota_acct_rec, ispace) == 8, "found %lld\n", - (long long)(int)offsetof(struct lquota_acct_rec, ispace)); - LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->ispace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_acct_rec *)0)->ispace)); - - /* Checks for struct lquota_glb_rec */ - LASSERTF((int)sizeof(struct lquota_glb_rec) == 32, "found %lld\n", - (long long)(int)sizeof(struct lquota_glb_rec)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_hardlimit) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_hardlimit)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_softlimit) == 8, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_softlimit)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_time) == 16, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_time)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_time)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_granted) == 24, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_granted)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted)); - - /* Checks for struct lquota_slv_rec */ - LASSERTF((int)sizeof(struct lquota_slv_rec) == 8, "found %lld\n", - (long long)(int)sizeof(struct lquota_slv_rec)); - LASSERTF((int)offsetof(struct lquota_slv_rec, qsr_granted) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_slv_rec, qsr_granted)); - LASSERTF((int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted)); - - /* Checks for struct idx_info */ - LASSERTF((int)sizeof(struct idx_info) == 80, "found %lld\n", - (long long)(int)sizeof(struct idx_info)); - LASSERTF((int)offsetof(struct idx_info, ii_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_magic)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_magic)); - LASSERTF((int)offsetof(struct idx_info, ii_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_flags)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_flags)); - LASSERTF((int)offsetof(struct idx_info, ii_count) == 8, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_count)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_count) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_count)); - LASSERTF((int)offsetof(struct idx_info, ii_pad0) == 10, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad0)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad0) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad0)); - LASSERTF((int)offsetof(struct idx_info, ii_attrs) == 12, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_attrs)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_attrs) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_attrs)); - LASSERTF((int)offsetof(struct idx_info, ii_fid) == 16, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_fid)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_fid)); - LASSERTF((int)offsetof(struct idx_info, ii_version) == 32, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_version)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_version) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_version)); - LASSERTF((int)offsetof(struct idx_info, ii_hash_start) == 40, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_hash_start)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_start)); - LASSERTF((int)offsetof(struct idx_info, ii_hash_end) == 48, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_hash_end)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_end)); - LASSERTF((int)offsetof(struct idx_info, ii_keysize) == 56, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_keysize)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_keysize) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_keysize)); - LASSERTF((int)offsetof(struct idx_info, ii_recsize) == 58, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_recsize)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_recsize) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_recsize)); - LASSERTF((int)offsetof(struct idx_info, ii_pad1) == 60, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad1)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad1)); - LASSERTF((int)offsetof(struct idx_info, ii_pad2) == 64, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad2)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad2)); - LASSERTF((int)offsetof(struct idx_info, ii_pad3) == 72, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad3)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad3)); - CLASSERT(IDX_INFO_MAGIC == 0x3D37CC37); - - /* Checks for struct lu_idxpage */ - LASSERTF((int)sizeof(struct lu_idxpage) == 16, "found %lld\n", - (long long)(int)sizeof(struct lu_idxpage)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_magic)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_magic)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_flags)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_flags) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_flags)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_nr) == 6, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_nr)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_nr) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_nr)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_pad0) == 8, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_pad0)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_pad0) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_pad0)); - CLASSERT(LIP_MAGIC == 0x8A6D6B6C); - LASSERTF(LIP_HDR_SIZE == 16, "found %lld\n", - (long long)LIP_HDR_SIZE); - LASSERTF(II_FL_NOHASH == 1, "found %lld\n", - (long long)II_FL_NOHASH); - LASSERTF(II_FL_VARKEY == 2, "found %lld\n", - (long long)II_FL_VARKEY); - LASSERTF(II_FL_VARREC == 4, "found %lld\n", - (long long)II_FL_VARREC); - LASSERTF(II_FL_NONUNQ == 8, "found %lld\n", - (long long)II_FL_NONUNQ); - /* Checks for struct niobuf_remote */ LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n", (long long)(int)sizeof(struct niobuf_remote)); @@ -3753,50 +3581,6 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n", (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap)); - /* Checks for struct quota_body */ - LASSERTF((int)sizeof(struct quota_body) == 112, "found %lld\n", - (long long)(int)sizeof(struct quota_body)); - LASSERTF((int)offsetof(struct quota_body, qb_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_fid)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_fid)); - LASSERTF((int)offsetof(struct quota_body, qb_id) == 16, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_id)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_id) == 16, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_id)); - LASSERTF((int)offsetof(struct quota_body, qb_flags) == 32, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_flags)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_flags)); - LASSERTF((int)offsetof(struct quota_body, qb_padding) == 36, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_padding)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_padding)); - LASSERTF((int)offsetof(struct quota_body, qb_count) == 40, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_count)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_count) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_count)); - LASSERTF((int)offsetof(struct quota_body, qb_usage) == 48, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_usage)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_usage) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_usage)); - LASSERTF((int)offsetof(struct quota_body, qb_slv_ver) == 56, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_slv_ver)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_slv_ver) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_slv_ver)); - LASSERTF((int)offsetof(struct quota_body, qb_lockh) == 64, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_lockh)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_lockh) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_lockh)); - LASSERTF((int)offsetof(struct quota_body, qb_glb_lockh) == 72, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_glb_lockh)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_glb_lockh) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_glb_lockh)); - LASSERTF((int)offsetof(struct quota_body, qb_padding1[4]) == 112, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_padding1[4])); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding1[4]) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_padding1[4])); - /* Checks for struct mgs_target_info */ LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n", (long long)(int)sizeof(struct mgs_target_info)); @@ -4431,60 +4215,4 @@ void lustre_assert_wire_constants(void) LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4, "found %lld\n", (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id)); - - /* Checks for struct update_buf */ - LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n", - (long long)(int)sizeof(struct update_buf)); - LASSERTF((int)offsetof(struct update_buf, ub_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_magic)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_magic)); - LASSERTF((int)offsetof(struct update_buf, ub_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_count)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_count)); - LASSERTF((int)offsetof(struct update_buf, ub_bufs) == 8, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_bufs)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_bufs) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_bufs)); - - /* Checks for struct update_reply */ - LASSERTF((int)sizeof(struct update_reply) == 8, "found %lld\n", - (long long)(int)sizeof(struct update_reply)); - LASSERTF((int)offsetof(struct update_reply, ur_version) == 0, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_version)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_version)); - LASSERTF((int)offsetof(struct update_reply, ur_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_count)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_count)); - LASSERTF((int)offsetof(struct update_reply, ur_lens) == 8, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_lens)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_lens) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_lens)); - - /* Checks for struct update */ - LASSERTF((int)sizeof(struct update) == 56, "found %lld\n", - (long long)(int)sizeof(struct update)); - LASSERTF((int)offsetof(struct update, u_type) == 0, "found %lld\n", - (long long)(int)offsetof(struct update, u_type)); - LASSERTF((int)sizeof(((struct update *)0)->u_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_type)); - LASSERTF((int)offsetof(struct update, u_batchid) == 4, "found %lld\n", - (long long)(int)offsetof(struct update, u_batchid)); - LASSERTF((int)sizeof(((struct update *)0)->u_batchid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_batchid)); - LASSERTF((int)offsetof(struct update, u_fid) == 8, "found %lld\n", - (long long)(int)offsetof(struct update, u_fid)); - LASSERTF((int)sizeof(((struct update *)0)->u_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_fid)); - LASSERTF((int)offsetof(struct update, u_lens) == 24, "found %lld\n", - (long long)(int)offsetof(struct update, u_lens)); - LASSERTF((int)sizeof(((struct update *)0)->u_lens) == 32, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_lens)); - LASSERTF((int)offsetof(struct update, u_bufs) == 56, "found %lld\n", - (long long)(int)offsetof(struct update, u_bufs)); - LASSERTF((int)sizeof(((struct update *)0)->u_bufs) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_bufs)); } -- cgit v1.2.3-54-g00ecf