summaryrefslogtreecommitdiff
path: root/drivers/staging
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
commit863981e96738983919de841ec669e157e6bdaeb0 (patch)
treed6d89a12e7eb8017837c057935a2271290907f76 /drivers/staging
parent8dec7c70575785729a6a9e6719a955e9c545bcab (diff)
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/Kconfig17
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/ion/ion.c16
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_test.c2
-rw-r--r--drivers/staging/android/lowmemorykiller.c9
-rw-r--r--drivers/staging/android/sync.c356
-rw-r--r--drivers/staging/android/sync.h91
-rw-r--r--drivers/staging/android/sync_debug.c8
-rw-r--r--drivers/staging/android/timed_gpio.c166
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c110
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/android/uapi/sync.h84
-rw-r--r--drivers/staging/board/armadillo800eva.c8
-rw-r--r--drivers/staging/comedi/comedi_buf.c10
-rw-r--r--drivers/staging/comedi/comedi_fops.c54
-rw-r--r--drivers/staging/comedi/comedidev.h4
-rw-r--r--drivers/staging/comedi/drivers.c40
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h24
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c12
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c104
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c71
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c189
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c86
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c4
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.h14
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1365
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c119
-rw-r--r--drivers/staging/comedi/drivers/mite.c1113
-rw-r--r--drivers/staging/comedi/drivers/mite.h329
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c1174
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h33
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c65
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c95
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h82
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_c_common.c0
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c981
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c37
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c36
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h56
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c807
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h66
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h322
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c127
-rw-r--r--drivers/staging/comedi/drivers/plx9052.h122
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h2
-rw-r--r--drivers/staging/comedi/drivers/z8536.h89
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c52
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h23
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c28
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c131
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c22
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c279
-rw-r--r--drivers/staging/dgnc/digi.h4
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c24
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h40
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c2
-rw-r--r--drivers/staging/fbtft/fbtft-io.c8
-rw-r--r--drivers/staging/fbtft/fbtft_device.c6
-rw-r--r--drivers/staging/fsl-mc/README.txt138
-rw-r--r--drivers/staging/fsl-mc/TODO13
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c77
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h7
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c35
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h10
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h6
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c33
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c26
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c79
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c90
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c14
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h4
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h51
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h19
-rw-r--r--drivers/staging/fsl-mc/include/mc-private.h2
-rw-r--r--drivers/staging/fwserial/dma_fifo.c8
-rw-r--r--drivers/staging/fwserial/dma_fifo.h16
-rw-r--r--drivers/staging/fwserial/fwserial.c44
-rw-r--r--drivers/staging/fwserial/fwserial.h42
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c5
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c6
-rw-r--r--drivers/staging/gdm724x/hci_packet.h2
-rw-r--r--drivers/staging/gdm724x/netlink_k.c3
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c8
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c1
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c24
-rw-r--r--drivers/staging/i4l/pcbit/capi.h2
-rw-r--r--drivers/staging/i4l/pcbit/drv.c8
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.h2
-rw-r--r--drivers/staging/iio/accel/Kconfig23
-rw-r--r--drivers/staging/iio/accel/Makefile6
-rw-r--r--drivers/staging/iio/accel/adis16201.h156
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16203.h132
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16204.h68
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c253
-rw-r--r--drivers/staging/iio/accel/adis16209.h39
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16220.h140
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c494
-rw-r--r--drivers/staging/iio/accel/adis16240.h50
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c5
-rw-r--r--drivers/staging/iio/adc/ad7192.c50
-rw-r--r--drivers/staging/iio/adc/ad7280a.c40
-rw-r--r--drivers/staging/iio/adc/ad7280a.h8
-rw-r--r--drivers/staging/iio/adc/ad7606.h28
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c18
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c3
-rw-r--r--drivers/staging/iio/adc/ad7780.c2
-rw-r--r--drivers/staging/iio/frequency/ad9832.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c51
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.h28
-rw-r--r--drivers/staging/iio/light/isl29028.c55
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c211
-rw-r--r--drivers/staging/iio/meter/ade7753.c4
-rw-r--r--drivers/staging/iio/meter/ade7754.c4
-rw-r--r--drivers/staging/iio/meter/ade7758.h16
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c77
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c4
-rw-r--r--drivers/staging/iio/meter/ade7759.c4
-rw-r--r--drivers/staging/iio/meter/ade7854.c3
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.h8
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h51
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h79
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h136
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h18
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h161
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h31
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h75
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h80
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h29
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c405
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c101
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c139
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c126
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c54
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c283
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c154
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c31
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c132
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c17
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c143
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c7
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c82
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c52
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c215
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h40
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c282
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h47
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c270
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c44
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c133
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h156
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h204
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c94
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h978
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h408
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h125
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h112
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h54
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h14
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h120
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h22
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h60
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h3
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h77
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h5
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h4
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c1203
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c30
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c115
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c163
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c19
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c99
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c277
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c (renamed from drivers/staging/lustre/lustre/lclient/glimpse.c)87
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c327
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c (renamed from drivers/staging/lustre/lustre/lclient/lcommon_misc.c)45
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c71
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h274
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c176
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c29
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c143
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c367
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c318
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c270
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h332
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c928
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c141
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c211
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c121
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c45
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c182
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h105
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c246
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c996
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c26
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c54
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c12
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c183
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c62
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c9
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c386
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c7
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c24
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c5
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c430
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c2086
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c303
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c659
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c72
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c26
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c173
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c68
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c531
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h159
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h27
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c283
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1698
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c38
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c544
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c423
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c21
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c52
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c12
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/omap1/omap1_camera.c68
-rw-r--r--drivers/staging/media/omap4iss/iss.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/media/tw686x-kh/Kconfig17
-rw-r--r--drivers/staging/media/tw686x-kh/Makefile3
-rw-r--r--drivers/staging/media/tw686x-kh/TODO6
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-core.c140
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-regs.h103
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-video.c821
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh.h118
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_errors.h8
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h14
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_reg.h8
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c49
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/nvec/nvec.c11
-rw-r--r--drivers/staging/nvec/nvec_power.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c7
-rw-r--r--drivers/staging/octeon/ethernet-rx.h2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c15
-rw-r--r--drivers/staging/octeon/ethernet.c4
-rw-r--r--drivers/staging/rdma/Kconfig27
-rw-r--r--drivers/staging/rdma/Makefile2
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig29
-rw-r--r--drivers/staging/rdma/hfi1/Makefile21
-rw-r--r--drivers/staging/rdma/hfi1/TODO6
-rw-r--r--drivers/staging/rdma/hfi1/affinity.c430
-rw-r--r--drivers/staging/rdma/hfi1/affinity.h91
-rw-r--r--drivers/staging/rdma/hfi1/aspm.h309
-rw-r--r--drivers/staging/rdma/hfi1/chip.c14418
-rw-r--r--drivers/staging/rdma/hfi1/chip.h1363
-rw-r--r--drivers/staging/rdma/hfi1/chip_registers.h1306
-rw-r--r--drivers/staging/rdma/hfi1/common.h408
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.c1145
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.h75
-rw-r--r--drivers/staging/rdma/hfi1/device.c181
-rw-r--r--drivers/staging/rdma/hfi1/device.h59
-rw-r--r--drivers/staging/rdma/hfi1/diag.c1924
-rw-r--r--drivers/staging/rdma/hfi1/dma.c183
-rw-r--r--drivers/staging/rdma/hfi1/driver.c1403
-rw-r--r--drivers/staging/rdma/hfi1/efivar.c164
-rw-r--r--drivers/staging/rdma/hfi1/efivar.h57
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c471
-rw-r--r--drivers/staging/rdma/hfi1/eprom.h52
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c1773
-rw-r--r--drivers/staging/rdma/hfi1/firmware.c2049
-rw-r--r--drivers/staging/rdma/hfi1/hfi.h1946
-rw-r--r--drivers/staging/rdma/hfi1/init.c1809
-rw-r--r--drivers/staging/rdma/hfi1/intr.c200
-rw-r--r--drivers/staging/rdma/hfi1/iowait.h300
-rw-r--r--drivers/staging/rdma/hfi1/mad.c4402
-rw-r--r--drivers/staging/rdma/hfi1/mad.h437
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c302
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h74
-rw-r--r--drivers/staging/rdma/hfi1/opa_compat.h111
-rw-r--r--drivers/staging/rdma/hfi1/pcie.c1338
-rw-r--r--drivers/staging/rdma/hfi1/pio.c2043
-rw-r--r--drivers/staging/rdma/hfi1/pio.h326
-rw-r--r--drivers/staging/rdma/hfi1/pio_copy.c867
-rw-r--r--drivers/staging/rdma/hfi1/platform.c893
-rw-r--r--drivers/staging/rdma/hfi1/platform.h304
-rw-r--r--drivers/staging/rdma/hfi1/qp.c974
-rw-r--r--drivers/staging/rdma/hfi1/qp.h160
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.c606
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.h241
-rw-r--r--drivers/staging/rdma/hfi1/rc.c2581
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c977
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c3052
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h1082
-rw-r--r--drivers/staging/rdma/hfi1/sdma_txreq.h135
-rw-r--r--drivers/staging/rdma/hfi1/sysfs.c785
-rw-r--r--drivers/staging/rdma/hfi1/trace.c235
-rw-r--r--drivers/staging/rdma/hfi1/trace.h1369
-rw-r--r--drivers/staging/rdma/hfi1/twsi.c489
-rw-r--r--drivers/staging/rdma/hfi1/twsi.h65
-rw-r--r--drivers/staging/rdma/hfi1/uc.c604
-rw-r--r--drivers/staging/rdma/hfi1/ud.c911
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c1047
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.h79
-rw-r--r--drivers/staging/rdma/hfi1/user_pages.c135
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c1590
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.h84
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c1740
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h532
-rw-r--r--drivers/staging/rdma/hfi1/verbs_txreq.c149
-rw-r--r--drivers/staging/rdma/hfi1/verbs_txreq.h116
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseq.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c73
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/fw.h4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h5
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RTL8188E.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c13
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c77
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c22
-rw-r--r--drivers/staging/rtl8712/basic_types.h4
-rw-r--r--drivers/staging/rtl8712/drv_types.h4
-rw-r--r--drivers/staging/rtl8712/ethernet.h4
-rw-r--r--drivers/staging/rtl8712/hal_init.c25
-rw-r--r--drivers/staging/rtl8712/ieee80211.c4
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c4
-rw-r--r--drivers/staging/rtl8712/osdep_service.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c18
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723au/Kconfig7
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c4
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c25
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c10
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c2
-rw-r--r--drivers/staging/rtl8723au/include/ieee80211.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c54
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c5
-rw-r--r--drivers/staging/rts5208/ms.c16
-rw-r--r--drivers/staging/rts5208/rtsx_card.c21
-rw-r--r--drivers/staging/rts5208/rtsx_card.h2
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c35
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h3
-rw-r--r--drivers/staging/rts5208/sd.c16
-rw-r--r--drivers/staging/skein/skein_api.c3
-rw-r--r--drivers/staging/skein/skein_base.c90
-rw-r--r--drivers/staging/skein/skein_base.h45
-rw-r--r--drivers/staging/skein/skein_block.c92
-rw-r--r--drivers/staging/skein/skein_generic.c6
-rw-r--r--drivers/staging/skein/threefish_api.h2
-rw-r--r--drivers/staging/skein/threefish_block.c2144
-rw-r--r--drivers/staging/slicoss/slicoss.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/speakup/main.c6
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/speakup/serialio.h3
-rw-r--r--drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset14
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt19
-rw-r--r--drivers/staging/unisys/Documentation/proc-entries.txt93
-rw-r--r--drivers/staging/unisys/MAINTAINERS1
-rw-r--r--drivers/staging/unisys/include/channel.h10
-rw-r--r--drivers/staging/unisys/include/iochannel.h42
-rw-r--r--drivers/staging/unisys/include/visorbus.h127
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c394
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c5
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c444
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c114
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c24
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c223
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c22
-rw-r--r--drivers/staging/vt6655/baseband.c24
-rw-r--r--drivers/staging/vt6655/baseband.h6
-rw-r--r--drivers/staging/vt6655/card.c95
-rw-r--r--drivers/staging/vt6655/card.h9
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/desc.h3
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/vt6655/mac.c15
-rw-r--r--drivers/staging/vt6655/rxtx.c2
-rw-r--r--drivers/staging/vt6655/srom.c9
-rw-r--r--drivers/staging/vt6656/baseband.c26
-rw-r--r--drivers/staging/vt6656/channel.c4
-rw-r--r--drivers/staging/vt6656/int.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/staging/vt6656/wcmd.c8
-rw-r--r--drivers/staging/wilc1000/Kconfig1
-rw-r--r--drivers/staging/wilc1000/host_interface.c438
-rw-r--r--drivers/staging/wilc1000/host_interface.h8
-rw-r--r--drivers/staging/wilc1000/linux_mon.c24
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c98
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c81
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h15
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c53
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h21
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c10
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c8
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c5
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h1
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c28
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c5
-rw-r--r--drivers/staging/xgifb/vb_init.c16
-rw-r--r--drivers/staging/xgifb/vb_setmode.c22
-rw-r--r--drivers/staging/xgifb/vb_table.h135
-rw-r--r--drivers/staging/xgifb/vb_util.h8
620 files changed, 17794 insertions, 87540 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 45251e8fd..a81bdb894 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
-source "drivers/staging/rdma/Kconfig"
-
source "drivers/staging/android/Kconfig"
source "drivers/staging/board/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 3184844eb..d112e0819 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_VHBA) += vhba/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_STAGING_RDMA) += rdma/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index bd90d2002..6480f60eb 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,27 +14,13 @@ config ASHMEM
It is, in theory, a good memory allocator for low-memory devices,
because it can discard shared memory units when under memory pressure.
-config ANDROID_TIMED_OUTPUT
- bool "Timed output class driver"
- default y
-
-config ANDROID_TIMED_GPIO
- tristate "Android timed gpio driver"
- depends on GPIOLIB || COMPILE_TEST
- depends on ANDROID_TIMED_OUTPUT
- default n
- ---help---
- Unlike generic gpio is to allow programs to access and manipulate gpio
- registers from user space, timed output/gpio is a system to allow changing
- a gpio pin and restore it automatically after a specified timeout.
-
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
---help---
Registers processes to be killed when low memory conditions, this is useful
as there is no particular swap space on android.
- The registered process will kills according to the priorities in android init
+ The registered process will kill according to the priorities in android init
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
@@ -52,6 +38,7 @@ config SW_SYNC
bool "Software synchronization objects"
default n
depends on SYNC
+ depends on SYNC_FILE
---help---
A sync object driver that uses a 32bit counter to coordinate
synchronization. Useful when there is no hardware primitive backing
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c7b6c99cc..980d6dc4b 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -3,8 +3,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
-obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_SYNC) += sync.o sync_debug.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 85365672c..a2cf93b59 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -184,7 +184,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct scatterlist *sg;
int i, ret;
- buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
@@ -341,7 +341,7 @@ static struct ion_handle *ion_handle_create(struct ion_client *client,
{
struct ion_handle *handle;
- handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
kref_init(&handle->ref);
@@ -396,7 +396,7 @@ static int ion_handle_put_nolock(struct ion_handle *handle)
return ret;
}
-int ion_handle_put(struct ion_handle *handle)
+static int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
@@ -438,8 +438,8 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
@@ -827,7 +827,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
}
task_unlock(current->group_leader);
- client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
goto err_put_task_struct;
@@ -1035,7 +1035,7 @@ static void ion_vm_open(struct vm_area_struct *vma)
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list;
- vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
if (!vma_list)
return;
vma_list->vma = vma;
@@ -1650,7 +1650,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
struct ion_device *idev;
int ret;
- idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ idev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!idev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 0813163f9..e0553fee9 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -55,7 +55,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
if (allocated_size > chunk_heap->size - chunk_heap->allocated)
return -ENOMEM;
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
@@ -154,7 +154,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
if (ret)
return ERR_PTR(ret);
- chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
if (!chunk_heap)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 5678870bf..814a3c92a 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -68,6 +68,8 @@ static int __init ion_dummy_init(void)
int i, err;
idev = ion_device_create(NULL);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
GFP_KERNEL);
if (!heaps)
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 83a3af06d..5a396a1a8 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -208,7 +208,7 @@ static int ion_test_open(struct inode *inode, struct file *file)
struct ion_test_data *data;
struct miscdevice *miscdev = file->private_data;
- data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 2509e5df7..24d2745e9 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -131,7 +131,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (!p)
continue;
- if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
+ if (task_lmk_waiting(p) &&
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
task_unlock(p);
rcu_read_unlock();
@@ -162,13 +162,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (selected) {
task_lock(selected);
send_sig(SIGKILL, selected, 0);
- /*
- * FIXME: lowmemorykiller shouldn't abuse global OOM killer
- * infrastructure. There is no real reason why the selected
- * task should have access to the memory reserves.
- */
if (selected->mm)
- mark_oom_victim(selected);
+ task_set_lmk_waiting(selected);
task_unlock(selected);
lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
" to free %ldkB on behalf of '%s' (%d) because\n"
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3a8f21031..1d14c83c7 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -16,10 +16,7 @@
#include <linux/debugfs.h>
#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -32,7 +29,6 @@
#include "trace/sync.h"
static const struct fence_ops android_fence_ops;
-static const struct file_operations sync_file_fops;
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name)
@@ -136,170 +132,6 @@ struct fence *sync_pt_create(struct sync_timeline *obj, int size)
}
EXPORT_SYMBOL(sync_pt_create);
-static struct sync_file *sync_file_alloc(int size, const char *name)
-{
- struct sync_file *sync_file;
-
- sync_file = kzalloc(size, GFP_KERNEL);
- if (!sync_file)
- return NULL;
-
- sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
- sync_file, 0);
- if (IS_ERR(sync_file->file))
- goto err;
-
- kref_init(&sync_file->kref);
- strlcpy(sync_file->name, name, sizeof(sync_file->name));
-
- init_waitqueue_head(&sync_file->wq);
-
- return sync_file;
-
-err:
- kfree(sync_file);
- return NULL;
-}
-
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
-{
- struct sync_file_cb *check;
- struct sync_file *sync_file;
-
- check = container_of(cb, struct sync_file_cb, cb);
- sync_file = check->sync_file;
-
- if (atomic_dec_and_test(&sync_file->status))
- wake_up_all(&sync_file->wq);
-}
-
-/* TODO: implement a create which takes more that one fence */
-struct sync_file *sync_file_create(const char *name, struct fence *fence)
-{
- struct sync_file *sync_file;
-
- sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]),
- name);
- if (!sync_file)
- return NULL;
-
- sync_file->num_fences = 1;
- atomic_set(&sync_file->status, 1);
-
- sync_file->cbs[0].fence = fence;
- sync_file->cbs[0].sync_file = sync_file;
- if (fence_add_callback(fence, &sync_file->cbs[0].cb,
- fence_check_cb_func))
- atomic_dec(&sync_file->status);
-
- sync_file_debug_add(sync_file);
-
- return sync_file;
-}
-EXPORT_SYMBOL(sync_file_create);
-
-struct sync_file *sync_file_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
-
- if (file->f_op != &sync_file_fops)
- goto err;
-
- return file->private_data;
-
-err:
- fput(file);
- return NULL;
-}
-EXPORT_SYMBOL(sync_file_fdget);
-
-void sync_file_put(struct sync_file *sync_file)
-{
- fput(sync_file->file);
-}
-EXPORT_SYMBOL(sync_file_put);
-
-void sync_file_install(struct sync_file *sync_file, int fd)
-{
- fd_install(fd, sync_file->file);
-}
-EXPORT_SYMBOL(sync_file_install);
-
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
- struct fence *fence)
-{
- sync_file->cbs[*i].fence = fence;
- sync_file->cbs[*i].sync_file = sync_file;
-
- if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
- fence_check_cb_func)) {
- fence_get(fence);
- (*i)++;
- }
-}
-
-struct sync_file *sync_file_merge(const char *name,
- struct sync_file *a, struct sync_file *b)
-{
- int num_fences = a->num_fences + b->num_fences;
- struct sync_file *sync_file;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
-
- sync_file = sync_file_alloc(size, name);
- if (!sync_file)
- return NULL;
-
- atomic_set(&sync_file->status, num_fences);
-
- /*
- * Assume sync_file a and b are both ordered and have no
- * duplicates with the same context.
- *
- * If a sync_file can only be created with sync_file_merge
- * and sync_file_create, this is a reasonable assumption.
- */
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].fence;
- struct fence *pt_b = b->cbs[i_b].fence;
-
- if (pt_a->context < pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_a);
-
- i_a++;
- } else if (pt_a->context > pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_b);
-
- i_b++;
- } else {
- if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_file_add_pt(sync_file, &i, pt_a);
- else
- sync_file_add_pt(sync_file, &i, pt_b);
-
- i_a++;
- i_b++;
- }
- }
-
- for (; i_a < a->num_fences; i_a++)
- sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
-
- for (; i_b < b->num_fences; i_b++)
- sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
-
- if (num_fences > i)
- atomic_sub(num_fences - i, &sync_file->status);
- sync_file->num_fences = i;
-
- sync_file_debug_add(sync_file);
- return sync_file;
-}
-EXPORT_SYMBOL(sync_file_merge);
-
static const char *android_fence_get_driver_name(struct fence *fence)
{
struct sync_timeline *parent = fence_parent(fence);
@@ -387,191 +219,3 @@ static const struct fence_ops android_fence_ops = {
.fence_value_str = android_fence_value_str,
.timeline_value_str = android_fence_timeline_value_str,
};
-
-static void sync_file_free(struct kref *kref)
-{
- struct sync_file *sync_file = container_of(kref, struct sync_file,
- kref);
- int i;
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- fence_remove_callback(sync_file->cbs[i].fence,
- &sync_file->cbs[i].cb);
- fence_put(sync_file->cbs[i].fence);
- }
-
- kfree(sync_file);
-}
-
-static int sync_file_release(struct inode *inode, struct file *file)
-{
- struct sync_file *sync_file = file->private_data;
-
- sync_file_debug_remove(sync_file);
-
- kref_put(&sync_file->kref, sync_file_free);
- return 0;
-}
-
-static unsigned int sync_file_poll(struct file *file, poll_table *wait)
-{
- struct sync_file *sync_file = file->private_data;
- int status;
-
- poll_wait(file, &sync_file->wq, wait);
-
- status = atomic_read(&sync_file->status);
-
- if (!status)
- return POLLIN;
- if (status < 0)
- return POLLERR;
- return 0;
-}
-
-static long sync_file_ioctl_merge(struct sync_file *sync_file,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_file *fence2, *fence3;
- struct sync_merge_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fd;
- }
-
- fence2 = sync_file_fdget(data.fd2);
- if (!fence2) {
- err = -ENOENT;
- goto err_put_fd;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence3 = sync_file_merge(data.name, sync_file, fence2);
- if (!fence3) {
- err = -ENOMEM;
- goto err_put_fence2;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fence3;
- }
-
- sync_file_install(fence3, fd);
- sync_file_put(fence2);
- return 0;
-
-err_put_fence3:
- sync_file_put(fence3);
-
-err_put_fence2:
- sync_file_put(fence2);
-
-err_put_fd:
- put_unused_fd(fd);
- return err;
-}
-
-static int sync_fill_fence_info(struct fence *fence, void *data, int size)
-{
- struct sync_fence_info *info = data;
-
- if (size < sizeof(*info))
- return -ENOMEM;
-
- strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
- sizeof(info->obj_name));
- strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
- sizeof(info->driver_name));
- if (fence_is_signaled(fence))
- info->status = fence->status >= 0 ? 1 : fence->status;
- else
- info->status = 0;
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
-
- return sizeof(*info);
-}
-
-static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
- unsigned long arg)
-{
- struct sync_file_info *info;
- __u32 size;
- __u32 len = 0;
- int ret, i;
-
- if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
- return -EFAULT;
-
- if (size < sizeof(struct sync_file_info))
- return -EINVAL;
-
- if (size > 4096)
- size = 4096;
-
- info = kzalloc(size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- strlcpy(info->name, sync_file->name, sizeof(info->name));
- info->status = atomic_read(&sync_file->status);
- if (info->status >= 0)
- info->status = !info->status;
-
- len = sizeof(struct sync_file_info);
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- struct fence *fence = sync_file->cbs[i].fence;
-
- ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len);
-
- if (ret < 0)
- goto out;
-
- len += ret;
- }
-
- info->len = len;
-
- if (copy_to_user((void __user *)arg, info, len))
- ret = -EFAULT;
- else
- ret = 0;
-
-out:
- kfree(info);
-
- return ret;
-}
-
-static long sync_file_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sync_file *sync_file = file->private_data;
-
- switch (cmd) {
- case SYNC_IOC_MERGE:
- return sync_file_ioctl_merge(sync_file, arg);
-
- case SYNC_IOC_FENCE_INFO:
- return sync_file_ioctl_fence_info(sync_file, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sync_file_fops = {
- .release = sync_file_release,
- .poll = sync_file_poll,
- .unlocked_ioctl = sync_file_ioctl,
- .compat_ioctl = sync_file_ioctl,
-};
-
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index d2a173433..b56885c14 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -20,10 +20,10 @@
#include <linux/spinlock.h>
#include <linux/fence.h>
-#include "uapi/sync.h"
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
struct sync_timeline;
-struct sync_file;
/**
* struct sync_timeline_ops - sync object implementation ops
@@ -86,38 +86,6 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
child_list_lock);
}
-struct sync_file_cb {
- struct fence_cb cb;
- struct fence *fence;
- struct sync_file *sync_file;
-};
-
-/**
- * struct sync_file - sync file to export to the userspace
- * @file: file representing this fence
- * @kref: reference count on fence.
- * @name: name of sync_file. Useful for debugging
- * @sync_file_list: membership in global file list
- * @num_fences number of sync_pts in the fence
- * @wq: wait queue for fence signaling
- * @status: 0: signaled, >0:active, <0: error
- * @cbs: sync_pts callback information
- */
-struct sync_file {
- struct file *file;
- struct kref kref;
- char name[32];
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_file_list;
-#endif
- int num_fences;
-
- wait_queue_head_t wq;
- atomic_t status;
-
- struct sync_file_cb cbs[];
-};
-
/*
* API for sync_timeline implementers
*/
@@ -167,61 +135,6 @@ void sync_timeline_signal(struct sync_timeline *obj);
*/
struct fence *sync_pt_create(struct sync_timeline *parent, int size);
-/**
- * sync_fence_create() - creates a sync fence
- * @name: name of fence to create
- * @fence: fence to add to the sync_fence
- *
- * Creates a sync_file containg @fence. Once this is called, the sync_file
- * takes ownership of @fence.
- */
-struct sync_file *sync_file_create(const char *name, struct fence *fence);
-
-/*
- * API for sync_file consumers
- */
-
-/**
- * sync_file_merge() - merge two sync_files
- * @name: name of new fence
- * @a: sync_file a
- * @b: sync_file b
- *
- * Creates a new sync_file which contains copies of all the fences in both
- * @a and @b. @a and @b remain valid, independent sync_file. Returns the
- * new merged sync_file or NULL in case of error.
- */
-struct sync_file *sync_file_merge(const char *name,
- struct sync_file *a, struct sync_file *b);
-
-/**
- * sync_file_fdget() - get a sync_file from an fd
- * @fd: fd referencing a fence
- *
- * Ensures @fd references a valid sync_file, increments the refcount of the
- * backing file. Returns the sync_file or NULL in case of error.
- */
-struct sync_file *sync_file_fdget(int fd);
-
-/**
- * sync_file_put() - puts a reference of a sync_file
- * @sync_file: sync_file to put
- *
- * Puts a reference on @sync_fence. If this is the last reference, the
- * sync_fil and all it's sync_pts will be freed
- */
-void sync_file_put(struct sync_file *sync_file);
-
-/**
- * sync_file_install() - installs a sync_file into a file descriptor
- * @sync_file: sync_file to install
- * @fd: file descriptor in which to install the fence
- *
- * Installs @sync_file into @fd. @fd's should be acquired through
- * get_unused_fd_flags(O_CLOEXEC).
- */
-void sync_file_install(struct sync_file *sync_file, int fd);
-
#ifdef CONFIG_DEBUG_FS
void sync_timeline_debug_add(struct sync_timeline *obj);
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 5a7ec58fb..5f57499c9 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
#include <linux/time64.h>
+#include <linux/sync_file.h>
#include "sw_sync.h"
#ifdef CONFIG_DEBUG_FS
@@ -262,8 +263,7 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
goto err;
}
- data.name[sizeof(data.name) - 1] = '\0';
- sync_file = sync_file_create(data.name, fence);
+ sync_file = sync_file_create(fence);
if (!sync_file) {
fence_put(fence);
err = -ENOMEM;
@@ -272,12 +272,12 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
data.fence = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- sync_file_put(sync_file);
+ fput(sync_file->file);
err = -EFAULT;
goto err;
}
- sync_file_install(sync_file, fd);
+ fd_install(fd, sync_file->file);
return 0;
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
deleted file mode 100644
index 914fd1005..000000000
--- a/drivers/staging/android/timed_gpio.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/* drivers/misc/timed_gpio.c
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/hrtimer.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/ktime.h>
-
-#include "timed_output.h"
-#include "timed_gpio.h"
-
-struct timed_gpio_data {
- struct timed_output_dev dev;
- struct hrtimer timer;
- spinlock_t lock;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
-{
- struct timed_gpio_data *data =
- container_of(timer, struct timed_gpio_data, timer);
-
- gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
- return HRTIMER_NORESTART;
-}
-
-static int gpio_get_time(struct timed_output_dev *dev)
-{
- struct timed_gpio_data *data;
- ktime_t t;
-
- data = container_of(dev, struct timed_gpio_data, dev);
-
- if (!hrtimer_active(&data->timer))
- return 0;
-
- t = hrtimer_get_remaining(&data->timer);
-
- return ktime_to_ms(t);
-}
-
-static void gpio_enable(struct timed_output_dev *dev, int value)
-{
- struct timed_gpio_data *data =
- container_of(dev, struct timed_gpio_data, dev);
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
-
- /* cancel previous timer and set GPIO according to value */
- hrtimer_cancel(&data->timer);
- gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
-
- if (value > 0) {
- if (value > data->max_timeout)
- value = data->max_timeout;
-
- hrtimer_start(&data->timer,
- ktime_set(value / 1000, (value % 1000) * 1000000),
- HRTIMER_MODE_REL);
- }
-
- spin_unlock_irqrestore(&data->lock, flags);
-}
-
-static int timed_gpio_probe(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio *cur_gpio;
- struct timed_gpio_data *gpio_data, *gpio_dat;
- int i, ret;
-
- if (!pdata)
- return -EBUSY;
-
- gpio_data = devm_kcalloc(&pdev->dev, pdata->num_gpios,
- sizeof(*gpio_data), GFP_KERNEL);
- if (!gpio_data)
- return -ENOMEM;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- cur_gpio = &pdata->gpios[i];
- gpio_dat = &gpio_data[i];
-
- hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- gpio_dat->timer.function = gpio_timer_func;
- spin_lock_init(&gpio_dat->lock);
-
- gpio_dat->dev.name = cur_gpio->name;
- gpio_dat->dev.get_time = gpio_get_time;
- gpio_dat->dev.enable = gpio_enable;
- ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
- if (ret < 0)
- goto err_out;
- ret = timed_output_dev_register(&gpio_dat->dev);
- if (ret < 0) {
- gpio_free(cur_gpio->gpio);
- goto err_out;
- }
-
- gpio_dat->gpio = cur_gpio->gpio;
- gpio_dat->max_timeout = cur_gpio->max_timeout;
- gpio_dat->active_low = cur_gpio->active_low;
- gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
- }
-
- platform_set_drvdata(pdev, gpio_data);
-
- return 0;
-
-err_out:
- while (--i >= 0) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return ret;
-}
-
-static int timed_gpio_remove(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return 0;
-}
-
-static struct platform_driver timed_gpio_driver = {
- .probe = timed_gpio_probe,
- .remove = timed_gpio_remove,
- .driver = {
- .name = TIMED_GPIO_NAME,
- },
-};
-
-module_platform_driver(timed_gpio_driver);
-
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_DESCRIPTION("timed gpio driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
deleted file mode 100644
index d29e169d7..000000000
--- a/drivers/staging/android/timed_gpio.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* include/linux/timed_gpio.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_GPIO_H
-#define _LINUX_TIMED_GPIO_H
-
-#define TIMED_GPIO_NAME "timed-gpio"
-
-struct timed_gpio {
- const char *name;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-struct timed_gpio_platform_data {
- int num_gpios;
- struct timed_gpio *gpios;
-};
-
-#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
deleted file mode 100644
index aff9cdb00..000000000
--- a/drivers/staging/android/timed_output.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/* drivers/misc/timed_output.c
- *
- * Copyright (C) 2009 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "timed_output: " fmt
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/err.h>
-
-#include "timed_output.h"
-
-static struct class *timed_output_class;
-static atomic_t device_count;
-
-static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int remaining = tdev->get_time(tdev);
-
- return sprintf(buf, "%d\n", remaining);
-}
-
-static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int value;
- int rc;
-
- rc = kstrtoint(buf, 0, &value);
- if (rc != 0)
- return -EINVAL;
-
- tdev->enable(tdev, value);
-
- return size;
-}
-static DEVICE_ATTR_RW(enable);
-
-static struct attribute *timed_output_attrs[] = {
- &dev_attr_enable.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(timed_output);
-
-static int create_timed_output_class(void)
-{
- if (!timed_output_class) {
- timed_output_class = class_create(THIS_MODULE, "timed_output");
- if (IS_ERR(timed_output_class))
- return PTR_ERR(timed_output_class);
- atomic_set(&device_count, 0);
- timed_output_class->dev_groups = timed_output_groups;
- }
-
- return 0;
-}
-
-int timed_output_dev_register(struct timed_output_dev *tdev)
-{
- int ret;
-
- if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
- return -EINVAL;
-
- ret = create_timed_output_class();
- if (ret < 0)
- return ret;
-
- tdev->index = atomic_inc_return(&device_count);
- tdev->dev = device_create(timed_output_class, NULL,
- MKDEV(0, tdev->index), NULL, "%s", tdev->name);
- if (IS_ERR(tdev->dev))
- return PTR_ERR(tdev->dev);
-
- dev_set_drvdata(tdev->dev, tdev);
- tdev->state = 0;
- return 0;
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_register);
-
-void timed_output_dev_unregister(struct timed_output_dev *tdev)
-{
- tdev->enable(tdev, 0);
- device_destroy(timed_output_class, MKDEV(0, tdev->index));
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
-
-static int __init timed_output_init(void)
-{
- return create_timed_output_class();
-}
-device_initcall(timed_output_init);
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
deleted file mode 100644
index 13d2ca51c..000000000
--- a/drivers/staging/android/timed_output.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* include/linux/timed_output.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_OUTPUT_H
-#define _LINUX_TIMED_OUTPUT_H
-
-struct timed_output_dev {
- const char *name;
-
- /* enable the output and set the timer */
- void (*enable)(struct timed_output_dev *sdev, int timeout);
-
- /* returns the current number of milliseconds remaining on the timer */
- int (*get_time)(struct timed_output_dev *sdev);
-
- /* private data */
- struct device *dev;
- int index;
- int state;
-};
-
-int timed_output_dev_register(struct timed_output_dev *dev);
-void timed_output_dev_unregister(struct timed_output_dev *dev);
-
-#endif
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
deleted file mode 100644
index a0cf357e5..000000000
--- a/drivers/staging/android/uapi/sync.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SYNC_H
-#define _UAPI_LINUX_SYNC_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_fence_info - detailed fence information
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implementing the parent
- * @status: status of the fence 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- */
-struct sync_fence_info {
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-};
-
-/**
- * struct sync_file_info - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_file_info returned to
- * userspace including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @sync_fence_info: array of sync_fence_info for every fence in the sync_file
- */
-struct sync_file_info {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 sync_fence_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_file_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_file_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info)
-
-#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index bb63ece4d..4de4fd06e 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -87,10 +87,10 @@ static const struct board_staging_clk lcdc0_clocks[] __initconst = {
static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
{
- .pdev = &lcdc0_device,
- .clocks = lcdc0_clocks,
- .nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
+ .pdev = &lcdc0_device,
+ .clocks = lcdc0_clocks,
+ .nclocks = ARRAY_SIZE(lcdc0_clocks),
+ .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
},
};
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 90c28016c..c7d7682b1 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -80,14 +80,14 @@ static void __comedi_buf_free(struct comedi_device *dev,
static void __comedi_buf_alloc(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned n_pages)
+ unsigned int n_pages)
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned long flags;
- unsigned i;
+ unsigned int i;
if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
dev_err(dev->class_dev,
@@ -208,7 +208,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
/* allocate new buffer */
if (new_size) {
- unsigned n_pages = new_size >> PAGE_SHIFT;
+ unsigned int n_pages = new_size >> PAGE_SHIFT;
__comedi_buf_alloc(dev, s, n_pages);
@@ -302,7 +302,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
{
struct comedi_async *async = s->async;
unsigned int count = 0;
- const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
+ const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_free);
unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
- unsigned num_bytes;
+ unsigned int num_bytes;
if (!async)
return 0;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 7c7b477b0..629080f39 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -186,7 +186,7 @@ static bool comedi_clear_board_dev(struct comedi_device *dev)
return cleared;
}
-static struct comedi_device *comedi_clear_board_minor(unsigned minor)
+static struct comedi_device *comedi_clear_board_minor(unsigned int minor)
{
struct comedi_device *dev;
@@ -209,8 +209,8 @@ static void comedi_free_board_dev(struct comedi_device *dev)
}
}
-static struct comedi_subdevice
-*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor)
+static struct comedi_subdevice *
+comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned int minor)
{
struct comedi_subdevice *s;
unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
@@ -223,7 +223,7 @@ static struct comedi_subdevice
return s;
}
-static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
+static struct comedi_device *comedi_dev_get_from_board_minor(unsigned int minor)
{
struct comedi_device *dev;
@@ -233,7 +233,8 @@ static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
return dev;
}
-static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
+static struct comedi_device *
+comedi_dev_get_from_subdevice_minor(unsigned int minor)
{
struct comedi_device *dev;
struct comedi_subdevice *s;
@@ -258,7 +259,7 @@ static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
* reference incremented. Return NULL if no COMEDI device exists with the
* specified minor device number.
*/
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor)
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor)
{
if (minor < COMEDI_NUM_BOARD_MINORS)
return comedi_dev_get_from_board_minor(minor);
@@ -342,7 +343,8 @@ static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
}
static int resize_async_buffer(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned new_size)
+ struct comedi_subdevice *s,
+ unsigned int new_size)
{
struct comedi_async *async = s->async;
int retval;
@@ -616,19 +618,20 @@ static struct attribute *comedi_dev_attrs[] = {
ATTRIBUTE_GROUPS(comedi_dev);
static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
+ unsigned int bits)
{
s->runflags &= ~bits;
}
static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
+ unsigned int bits)
{
s->runflags |= bits;
}
static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
- unsigned mask, unsigned bits)
+ unsigned int mask,
+ unsigned int bits)
{
unsigned long flags;
@@ -638,15 +641,15 @@ static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
spin_unlock_irqrestore(&s->spin_lock, flags);
}
-static unsigned __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
+static unsigned int __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
{
return s->runflags;
}
-static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
+static unsigned int comedi_get_subdevice_runflags(struct comedi_subdevice *s)
{
unsigned long flags;
- unsigned runflags;
+ unsigned int runflags;
spin_lock_irqsave(&s->spin_lock, flags);
runflags = __comedi_get_subdevice_runflags(s);
@@ -654,12 +657,12 @@ static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
return runflags;
}
-static bool comedi_is_runflags_running(unsigned runflags)
+static bool comedi_is_runflags_running(unsigned int runflags)
{
return runflags & COMEDI_SRF_RUNNING;
}
-static bool comedi_is_runflags_in_error(unsigned runflags)
+static bool comedi_is_runflags_in_error(unsigned int runflags)
{
return runflags & COMEDI_SRF_ERROR;
}
@@ -673,7 +676,7 @@ static bool comedi_is_runflags_in_error(unsigned runflags)
*/
bool comedi_is_subdevice_running(struct comedi_subdevice *s)
{
- unsigned runflags = comedi_get_subdevice_runflags(s);
+ unsigned int runflags = comedi_get_subdevice_runflags(s);
return comedi_is_runflags_running(runflags);
}
@@ -681,14 +684,14 @@ EXPORT_SYMBOL_GPL(comedi_is_subdevice_running);
static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
+ unsigned int runflags = __comedi_get_subdevice_runflags(s);
return comedi_is_runflags_running(runflags);
}
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
+ unsigned int runflags = __comedi_get_subdevice_runflags(s);
return runflags & COMEDI_SRF_FREE_SPRIV;
}
@@ -2038,7 +2041,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- unsigned minor = iminor(file_inode(file));
+ unsigned int minor = iminor(file_inode(file));
struct comedi_file *cfp = file->private_data;
struct comedi_device *dev = cfp->dev;
int rc;
@@ -2342,7 +2345,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
add_wait_queue(&async->wait_head, &wait);
while (count == 0 && !retval) {
- unsigned runflags;
+ unsigned int runflags;
unsigned int wp, n1, n2;
set_current_state(TASK_INTERRUPTIBLE);
@@ -2485,7 +2488,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
n = min_t(size_t, m, nbytes);
if (n == 0) {
- unsigned runflags = comedi_get_subdevice_runflags(s);
+ unsigned int runflags =
+ comedi_get_subdevice_runflags(s);
if (!comedi_is_runflags_running(runflags)) {
if (comedi_is_runflags_in_error(runflags))
@@ -2573,7 +2577,7 @@ out:
static int comedi_open(struct inode *inode, struct file *file)
{
- const unsigned minor = iminor(inode);
+ const unsigned int minor = iminor(inode);
struct comedi_file *cfp;
struct comedi_device *dev = comedi_dev_get_from_minor(minor);
int rc;
@@ -2733,7 +2737,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
{
struct comedi_device *dev;
struct device *csdev;
- unsigned i;
+ unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -2791,7 +2795,7 @@ int comedi_alloc_subdevice_minor(struct comedi_subdevice *s)
{
struct comedi_device *dev = s->device;
struct device *csdev;
- unsigned i;
+ unsigned int i;
mutex_lock(&comedi_subdevice_minor_table_lock);
for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) {
@@ -2841,7 +2845,7 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
static void comedi_cleanup_board_minors(void)
{
struct comedi_device *dev;
- unsigned i;
+ unsigned int i;
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
dev = comedi_clear_board_minor(i);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 115807215..dcb637665 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -173,7 +173,7 @@ struct comedi_subdevice {
void *lock;
void *busy;
- unsigned runflags;
+ unsigned int runflags;
spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
unsigned int io_bits;
@@ -566,7 +566,7 @@ struct comedi_device {
void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
int comedi_dev_put(struct comedi_device *dev);
bool comedi_is_subdevice_running(struct comedi_subdevice *s);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index fc153c705..f092e5037 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -564,7 +564,7 @@ unsigned int comedi_handle_events(struct comedi_device *dev,
if (events == 0)
return events;
- if (events & COMEDI_CB_CANCEL_MASK)
+ if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
s->cancel(dev, s);
comedi_event(dev, s);
@@ -575,38 +575,35 @@ EXPORT_SYMBOL_GPL(comedi_handle_events);
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct comedi_insn new_insn;
+ struct comedi_insn _insn;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int base_chan = (chan < 32) ? 0 : chan;
+ unsigned int _data[2];
int ret;
- static const unsigned channels_per_bitfield = 32;
-
- unsigned chan = CR_CHAN(insn->chanspec);
- const unsigned base_bitfield_channel =
- (chan < channels_per_bitfield) ? 0 : chan;
- unsigned int new_data[2];
- memset(new_data, 0, sizeof(new_data));
- memset(&new_insn, 0, sizeof(new_insn));
- new_insn.insn = INSN_BITS;
- new_insn.chanspec = base_bitfield_channel;
- new_insn.n = 2;
- new_insn.subdev = insn->subdev;
+ memset(_data, 0, sizeof(_data));
+ memset(&_insn, 0, sizeof(_insn));
+ _insn.insn = INSN_BITS;
+ _insn.chanspec = base_chan;
+ _insn.n = 2;
+ _insn.subdev = insn->subdev;
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
- new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel))
- : 0; /* bits */
+ _data[0] = 1 << (chan - base_chan); /* mask */
+ _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
}
- ret = s->insn_bits(dev, s, &new_insn, new_data);
+ ret = s->insn_bits(dev, s, &_insn, _data);
if (ret < 0)
return ret;
if (insn->insn == INSN_READ)
- data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
+ data[0] = (_data[1] >> (chan - base_chan)) & 1;
return 1;
}
@@ -628,6 +625,9 @@ static int __comedi_device_postconfig_async(struct comedi_device *dev,
"async subdevices must have a do_cmdtest() function\n");
return -EINVAL;
}
+ if (!s->cancel)
+ dev_warn(dev->class_dev,
+ "async subdevices should have a cancel() function\n");
async = kzalloc(sizeof(*async), GFP_KERNEL);
if (!async)
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index d4b8c0195..f03e4c8c2 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -1,16 +1,14 @@
/*
- comedi/drivers/amcc_s5933.h
-
- Stuff for AMCC S5933 PCI Controller
-
- Author: Michal Dobes <dobes@tesnet.cz>
-
- Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
- made by Andrea Cisternino <acister@pcape1.pi.infn.it>
- and as result of espionage from MITE code made by David A. Schleef.
- Thanks to AMCC for their on-line documentation and bus master DMA
- example.
-*/
+ * Stuff for AMCC S5933 PCI Controller
+ *
+ * Author: Michal Dobes <dobes@tesnet.cz>
+ *
+ * Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
+ * made by Andrea Cisternino <acister@pcape1.pi.infn.it>
+ * and as result of espionage from MITE code made by David A. Schleef.
+ * Thanks to AMCC for their on-line documentation and bus master DMA
+ * example.
+ */
#ifndef _AMCC_S5933_H_
#define _AMCC_S5933_H_
@@ -58,7 +56,7 @@
#define INTCSR_INTR_ASSERTED 0x800000
/****************************************************************************/
-/* AMCC - PCI non-volatile ram command register (byte 3 of master control/status register) */
+/* AMCC - PCI non-volatile ram command register (byte 3 of AMCC_OP_REG_MCSR) */
/****************************************************************************/
#define MCSR_NV_LOAD_LOW_ADDR 0x0
#define MCSR_NV_LOAD_HIGH_ADDR 0x20
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index d1539e798..f6e4e9842 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -101,7 +101,7 @@ struct dio200_subdev_8255 {
};
struct dio200_subdev_intr {
- spinlock_t spinlock;
+ spinlock_t spinlock; /* protects the 'active' flag */
unsigned int ofs;
unsigned int valid_isns;
unsigned int enabled_isns;
@@ -221,7 +221,7 @@ static void dio200_start_intr(struct comedi_device *dev,
struct dio200_subdev_intr *subpriv = s->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int n;
- unsigned isn_bits;
+ unsigned int isn_bits;
/* Determine interrupt sources to enable. */
isn_bits = 0;
@@ -284,9 +284,9 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
{
const struct dio200_board *board = dev->board_ptr;
struct dio200_subdev_intr *subpriv = s->private;
- unsigned triggered;
- unsigned intstat;
- unsigned cur_enabled;
+ unsigned int triggered;
+ unsigned int intstat;
+ unsigned int cur_enabled;
unsigned long flags;
triggered = 0;
@@ -439,7 +439,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
static int dio200_subdev_intr_init(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int offset,
- unsigned valid_isns)
+ unsigned int valid_isns)
{
const struct dio200_board *board = dev->board_ptr;
struct dio200_subdev_intr *subpriv;
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index b1946ce6e..58b0b6b1a 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -1,46 +1,44 @@
/*
- comedi/drivers/amplc_pc263.c
- Driver for Amplicon PC263 and PCI263 relay boards.
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ * Driver for Amplicon PC263 relay board.
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: amplc_pc263
-Description: Amplicon PC263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PC263 (pc263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options:
- [0] - I/O port base address
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
+ * Driver: amplc_pc263
+ * Description: Amplicon PC263
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Devices: [Amplicon] PC263 (pc263)
+ * Updated: Fri, 12 Apr 2013 15:19:36 +0100
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ *
+ * The board appears as one subdevice, with 16 digital outputs, each
+ * connected to a reed-relay. Relay contacts are closed when output is 1.
+ * The state of the outputs can be read.
+ */
#include <linux/module.h>
#include "../comedidev.h"
/* PC263 registers */
-
-/*
- * Board descriptions for Amplicon PC263.
- */
+#define PC263_DO_0_7_REG 0x00
+#define PC263_DO_8_15_REG 0x01
struct pc263_board {
const char *name;
@@ -58,8 +56,8 @@ static int pc263_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
+ outb(s->state & 0xff, dev->iobase + PC263_DO_0_7_REG);
+ outb((s->state >> 8) & 0xff, dev->iobase + PC263_DO_8_15_REG);
}
data[1] = s->state;
@@ -80,28 +78,30 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
+ /* Digital Output subdevice */
s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pc263_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pc263_do_insn_bits;
+
/* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
+ s->state = inb(dev->iobase + PC263_DO_0_7_REG) |
+ (inb(dev->iobase + PC263_DO_8_15_REG) << 8);
return 0;
}
static struct comedi_driver amplc_pc263_driver = {
- .driver_name = "amplc_pc263",
- .module = THIS_MODULE,
- .attach = pc263_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pc263_boards[0].name,
- .offset = sizeof(struct pc263_board),
- .num_names = ARRAY_SIZE(pc263_boards),
+ .driver_name = "amplc_pc263",
+ .module = THIS_MODULE,
+ .attach = pc263_attach,
+ .detach = comedi_legacy_detach,
+ .board_name = &pc263_boards[0].name,
+ .offset = sizeof(struct pc263_board),
+ .num_names = ARRAY_SIZE(pc263_boards),
};
module_comedi_driver(amplc_pc263_driver);
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index cac011fdd..2e6decf1b 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -132,48 +132,53 @@
* DACCON values.
*/
/* (r/w) Scan trigger. */
-#define PCI224_DACCON_TRIG_MASK (7 << 0)
-#define PCI224_DACCON_TRIG_NONE (0 << 0) /* none */
-#define PCI224_DACCON_TRIG_SW (1 << 0) /* software trig */
-#define PCI224_DACCON_TRIG_EXTP (2 << 0) /* ext +ve edge */
-#define PCI224_DACCON_TRIG_EXTN (3 << 0) /* ext -ve edge */
-#define PCI224_DACCON_TRIG_Z2CT0 (4 << 0) /* Z2 CT0 out */
-#define PCI224_DACCON_TRIG_Z2CT1 (5 << 0) /* Z2 CT1 out */
-#define PCI224_DACCON_TRIG_Z2CT2 (6 << 0) /* Z2 CT2 out */
+#define PCI224_DACCON_TRIG(x) (((x) & 0x7) << 0)
+#define PCI224_DACCON_TRIG_MASK PCI224_DACCON_TRIG(7)
+#define PCI224_DACCON_TRIG_NONE PCI224_DACCON_TRIG(0) /* none */
+#define PCI224_DACCON_TRIG_SW PCI224_DACCON_TRIG(1) /* soft trig */
+#define PCI224_DACCON_TRIG_EXTP PCI224_DACCON_TRIG(2) /* ext + edge */
+#define PCI224_DACCON_TRIG_EXTN PCI224_DACCON_TRIG(3) /* ext - edge */
+#define PCI224_DACCON_TRIG_Z2CT0 PCI224_DACCON_TRIG(4) /* Z2 CT0 out */
+#define PCI224_DACCON_TRIG_Z2CT1 PCI224_DACCON_TRIG(5) /* Z2 CT1 out */
+#define PCI224_DACCON_TRIG_Z2CT2 PCI224_DACCON_TRIG(6) /* Z2 CT2 out */
/* (r/w) Polarity (PCI224 only, PCI234 always bipolar!). */
-#define PCI224_DACCON_POLAR_MASK (1 << 3)
-#define PCI224_DACCON_POLAR_UNI (0 << 3) /* range [0,Vref] */
-#define PCI224_DACCON_POLAR_BI (1 << 3) /* range [-Vref,Vref] */
+#define PCI224_DACCON_POLAR(x) (((x) & 0x1) << 3)
+#define PCI224_DACCON_POLAR_MASK PCI224_DACCON_POLAR(1)
+#define PCI224_DACCON_POLAR_UNI PCI224_DACCON_POLAR(0) /* [0,+V] */
+#define PCI224_DACCON_POLAR_BI PCI224_DACCON_POLAR(1) /* [-V,+V] */
/* (r/w) Internal Vref (PCI224 only, when LK1 in position 1-2). */
-#define PCI224_DACCON_VREF_MASK (3 << 4)
-#define PCI224_DACCON_VREF_1_25 (0 << 4) /* Vref = 1.25V */
-#define PCI224_DACCON_VREF_2_5 (1 << 4) /* Vref = 2.5V */
-#define PCI224_DACCON_VREF_5 (2 << 4) /* Vref = 5V */
-#define PCI224_DACCON_VREF_10 (3 << 4) /* Vref = 10V */
+#define PCI224_DACCON_VREF(x) (((x) & 0x3) << 4)
+#define PCI224_DACCON_VREF_MASK PCI224_DACCON_VREF(3)
+#define PCI224_DACCON_VREF_1_25 PCI224_DACCON_VREF(0) /* 1.25V */
+#define PCI224_DACCON_VREF_2_5 PCI224_DACCON_VREF(1) /* 2.5V */
+#define PCI224_DACCON_VREF_5 PCI224_DACCON_VREF(2) /* 5V */
+#define PCI224_DACCON_VREF_10 PCI224_DACCON_VREF(3) /* 10V */
/* (r/w) Wraparound mode enable (to play back stored waveform). */
-#define PCI224_DACCON_FIFOWRAP (1 << 7)
+#define PCI224_DACCON_FIFOWRAP BIT(7)
/* (r/w) FIFO enable. It MUST be set! */
-#define PCI224_DACCON_FIFOENAB (1 << 8)
+#define PCI224_DACCON_FIFOENAB BIT(8)
/* (r/w) FIFO interrupt trigger level (most values are not very useful). */
-#define PCI224_DACCON_FIFOINTR_MASK (7 << 9)
-#define PCI224_DACCON_FIFOINTR_EMPTY (0 << 9) /* when empty */
-#define PCI224_DACCON_FIFOINTR_NEMPTY (1 << 9) /* when not empty */
-#define PCI224_DACCON_FIFOINTR_NHALF (2 << 9) /* when not half full */
-#define PCI224_DACCON_FIFOINTR_HALF (3 << 9) /* when half full */
-#define PCI224_DACCON_FIFOINTR_NFULL (4 << 9) /* when not full */
-#define PCI224_DACCON_FIFOINTR_FULL (5 << 9) /* when full */
+#define PCI224_DACCON_FIFOINTR(x) (((x) & 0x7) << 9)
+#define PCI224_DACCON_FIFOINTR_MASK PCI224_DACCON_FIFOINTR(7)
+#define PCI224_DACCON_FIFOINTR_EMPTY PCI224_DACCON_FIFOINTR(0) /* empty */
+#define PCI224_DACCON_FIFOINTR_NEMPTY PCI224_DACCON_FIFOINTR(1) /* !empty */
+#define PCI224_DACCON_FIFOINTR_NHALF PCI224_DACCON_FIFOINTR(2) /* !half */
+#define PCI224_DACCON_FIFOINTR_HALF PCI224_DACCON_FIFOINTR(3) /* half */
+#define PCI224_DACCON_FIFOINTR_NFULL PCI224_DACCON_FIFOINTR(4) /* !full */
+#define PCI224_DACCON_FIFOINTR_FULL PCI224_DACCON_FIFOINTR(5) /* full */
/* (r-o) FIFO fill level. */
-#define PCI224_DACCON_FIFOFL_MASK (7 << 12)
-#define PCI224_DACCON_FIFOFL_EMPTY (1 << 12) /* 0 */
-#define PCI224_DACCON_FIFOFL_ONETOHALF (0 << 12) /* [1,2048] */
-#define PCI224_DACCON_FIFOFL_HALFTOFULL (4 << 12) /* [2049,4095] */
-#define PCI224_DACCON_FIFOFL_FULL (6 << 12) /* 4096 */
+#define PCI224_DACCON_FIFOFL(x) (((x) & 0x7) << 12)
+#define PCI224_DACCON_FIFOFL_MASK PCI224_DACCON_FIFOFL(7)
+#define PCI224_DACCON_FIFOFL_EMPTY PCI224_DACCON_FIFOFL(1) /* 0 */
+#define PCI224_DACCON_FIFOFL_ONETOHALF PCI224_DACCON_FIFOFL(0) /* 1-2048 */
+#define PCI224_DACCON_FIFOFL_HALFTOFULL PCI224_DACCON_FIFOFL(4) /* 2049-4095 */
+#define PCI224_DACCON_FIFOFL_FULL PCI224_DACCON_FIFOFL(6) /* 4096 */
/* (r-o) DAC busy flag. */
-#define PCI224_DACCON_BUSY (1 << 15)
+#define PCI224_DACCON_BUSY BIT(15)
/* (w-o) FIFO reset. */
-#define PCI224_DACCON_FIFORESET (1 << 12)
+#define PCI224_DACCON_FIFORESET BIT(12)
/* (w-o) Global reset (not sure what it does). */
-#define PCI224_DACCON_GLOBALRESET (1 << 13)
+#define PCI224_DACCON_GLOBALRESET BIT(13)
/*
* DAC FIFO size.
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 907c39cc8..42945de31 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -237,47 +237,50 @@
/*
* DACCON read-write values.
*/
-#define PCI230_DAC_OR_UNI (0 << 0) /* Output range unipolar */
-#define PCI230_DAC_OR_BIP (1 << 0) /* Output range bipolar */
-#define PCI230_DAC_OR_MASK (1 << 0)
+#define PCI230_DAC_OR(x) (((x) & 0x1) << 0)
+#define PCI230_DAC_OR_UNI PCI230_DAC_OR(0) /* Output unipolar */
+#define PCI230_DAC_OR_BIP PCI230_DAC_OR(1) /* Output bipolar */
+#define PCI230_DAC_OR_MASK PCI230_DAC_OR(1)
/*
* The following applies only if DAC FIFO support is enabled in the EXTFUNC
* register (and only for PCI230+ hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_EN (1 << 8) /* FIFO enable */
+#define PCI230P2_DAC_FIFO_EN BIT(8) /* FIFO enable */
/*
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_TRIG_NONE (0 << 2) /* No trigger */
-#define PCI230P2_DAC_TRIG_SW (1 << 2) /* Software trigger trigger */
-#define PCI230P2_DAC_TRIG_EXTP (2 << 2) /* EXTTRIG +ve edge trigger */
-#define PCI230P2_DAC_TRIG_EXTN (3 << 2) /* EXTTRIG -ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT0 (4 << 2) /* CT0-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT1 (5 << 2) /* CT1-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT2 (6 << 2) /* CT2-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_MASK (7 << 2)
-#define PCI230P2_DAC_FIFO_WRAP (1 << 7) /* FIFO wraparound mode */
-#define PCI230P2_DAC_INT_FIFO_EMPTY (0 << 9) /* FIFO interrupt empty */
-#define PCI230P2_DAC_INT_FIFO_NEMPTY (1 << 9)
-#define PCI230P2_DAC_INT_FIFO_NHALF (2 << 9) /* FIFO intr not half full */
-#define PCI230P2_DAC_INT_FIFO_HALF (3 << 9)
-#define PCI230P2_DAC_INT_FIFO_NFULL (4 << 9) /* FIFO interrupt not full */
-#define PCI230P2_DAC_INT_FIFO_FULL (5 << 9)
-#define PCI230P2_DAC_INT_FIFO_MASK (7 << 9)
+#define PCI230P2_DAC_TRIG(x) (((x) & 0x7) << 2)
+#define PCI230P2_DAC_TRIG_NONE PCI230P2_DAC_TRIG(0) /* none */
+#define PCI230P2_DAC_TRIG_SW PCI230P2_DAC_TRIG(1) /* soft trig */
+#define PCI230P2_DAC_TRIG_EXTP PCI230P2_DAC_TRIG(2) /* ext + edge */
+#define PCI230P2_DAC_TRIG_EXTN PCI230P2_DAC_TRIG(3) /* ext - edge */
+#define PCI230P2_DAC_TRIG_Z2CT0 PCI230P2_DAC_TRIG(4) /* Z2 CT0 out */
+#define PCI230P2_DAC_TRIG_Z2CT1 PCI230P2_DAC_TRIG(5) /* Z2 CT1 out */
+#define PCI230P2_DAC_TRIG_Z2CT2 PCI230P2_DAC_TRIG(6) /* Z2 CT2 out */
+#define PCI230P2_DAC_TRIG_MASK PCI230P2_DAC_TRIG(7)
+#define PCI230P2_DAC_FIFO_WRAP BIT(7) /* FIFO wraparound mode */
+#define PCI230P2_DAC_INT_FIFO(x) (((x) & 7) << 9)
+#define PCI230P2_DAC_INT_FIFO_EMPTY PCI230P2_DAC_INT_FIFO(0) /* empty */
+#define PCI230P2_DAC_INT_FIFO_NEMPTY PCI230P2_DAC_INT_FIFO(1) /* !empty */
+#define PCI230P2_DAC_INT_FIFO_NHALF PCI230P2_DAC_INT_FIFO(2) /* !half */
+#define PCI230P2_DAC_INT_FIFO_HALF PCI230P2_DAC_INT_FIFO(3) /* half */
+#define PCI230P2_DAC_INT_FIFO_NFULL PCI230P2_DAC_INT_FIFO(4) /* !full */
+#define PCI230P2_DAC_INT_FIFO_FULL PCI230P2_DAC_INT_FIFO(5) /* full */
+#define PCI230P2_DAC_INT_FIFO_MASK PCI230P2_DAC_INT_FIFO(7)
/*
* DACCON read-only values.
*/
-#define PCI230_DAC_BUSY (1 << 1) /* DAC busy. */
+#define PCI230_DAC_BUSY BIT(1) /* DAC busy. */
/*
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED (1 << 5) /* Underrun error */
-#define PCI230P2_DAC_FIFO_EMPTY (1 << 13) /* FIFO empty */
-#define PCI230P2_DAC_FIFO_FULL (1 << 14) /* FIFO full */
-#define PCI230P2_DAC_FIFO_HALF (1 << 15) /* FIFO half full */
+#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED BIT(5) /* Underrun error */
+#define PCI230P2_DAC_FIFO_EMPTY BIT(13) /* FIFO empty */
+#define PCI230P2_DAC_FIFO_FULL BIT(14) /* FIFO full */
+#define PCI230P2_DAC_FIFO_HALF BIT(15) /* FIFO half full */
/*
* DACCON write-only, transient values.
@@ -286,8 +289,8 @@
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR (1 << 5) /* Clear underrun */
-#define PCI230P2_DAC_FIFO_RESET (1 << 12) /* FIFO reset */
+#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR BIT(5) /* Clear underrun */
+#define PCI230P2_DAC_FIFO_RESET BIT(12) /* FIFO reset */
/*
* PCI230+ hardware version 2 DAC FIFO levels.
@@ -304,44 +307,48 @@
/*
* ADCCON read/write values.
*/
-#define PCI230_ADC_TRIG_NONE (0 << 0) /* No trigger */
-#define PCI230_ADC_TRIG_SW (1 << 0) /* Software trigger trigger */
-#define PCI230_ADC_TRIG_EXTP (2 << 0) /* EXTTRIG +ve edge trigger */
-#define PCI230_ADC_TRIG_EXTN (3 << 0) /* EXTTRIG -ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT0 (4 << 0) /* CT0-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT1 (5 << 0) /* CT1-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT2 (6 << 0) /* CT2-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_MASK (7 << 0)
-#define PCI230_ADC_IR_UNI (0 << 3) /* Input range unipolar */
-#define PCI230_ADC_IR_BIP (1 << 3) /* Input range bipolar */
-#define PCI230_ADC_IR_MASK (1 << 3)
-#define PCI230_ADC_IM_SE (0 << 4) /* Input mode single ended */
-#define PCI230_ADC_IM_DIF (1 << 4) /* Input mode differential */
-#define PCI230_ADC_IM_MASK (1 << 4)
-#define PCI230_ADC_FIFO_EN (1 << 8) /* FIFO enable */
-#define PCI230_ADC_INT_FIFO_EMPTY (0 << 9)
-#define PCI230_ADC_INT_FIFO_NEMPTY (1 << 9) /* FIFO interrupt not empty */
-#define PCI230_ADC_INT_FIFO_NHALF (2 << 9)
-#define PCI230_ADC_INT_FIFO_HALF (3 << 9) /* FIFO interrupt half full */
-#define PCI230_ADC_INT_FIFO_NFULL (4 << 9)
-#define PCI230_ADC_INT_FIFO_FULL (5 << 9) /* FIFO interrupt full */
-#define PCI230P_ADC_INT_FIFO_THRESH (7 << 9) /* FIFO interrupt threshold */
-#define PCI230_ADC_INT_FIFO_MASK (7 << 9)
+#define PCI230_ADC_TRIG(x) (((x) & 0x7) << 0)
+#define PCI230_ADC_TRIG_NONE PCI230_ADC_TRIG(0) /* none */
+#define PCI230_ADC_TRIG_SW PCI230_ADC_TRIG(1) /* soft trig */
+#define PCI230_ADC_TRIG_EXTP PCI230_ADC_TRIG(2) /* ext + edge */
+#define PCI230_ADC_TRIG_EXTN PCI230_ADC_TRIG(3) /* ext - edge */
+#define PCI230_ADC_TRIG_Z2CT0 PCI230_ADC_TRIG(4) /* Z2 CT0 out*/
+#define PCI230_ADC_TRIG_Z2CT1 PCI230_ADC_TRIG(5) /* Z2 CT1 out */
+#define PCI230_ADC_TRIG_Z2CT2 PCI230_ADC_TRIG(6) /* Z2 CT2 out */
+#define PCI230_ADC_TRIG_MASK PCI230_ADC_TRIG(7)
+#define PCI230_ADC_IR(x) (((x) & 0x1) << 3)
+#define PCI230_ADC_IR_UNI PCI230_ADC_IR(0) /* Input unipolar */
+#define PCI230_ADC_IR_BIP PCI230_ADC_IR(1) /* Input bipolar */
+#define PCI230_ADC_IR_MASK PCI230_ADC_IR(1)
+#define PCI230_ADC_IM(x) (((x) & 0x1) << 4)
+#define PCI230_ADC_IM_SE PCI230_ADC_IM(0) /* single ended */
+#define PCI230_ADC_IM_DIF PCI230_ADC_IM(1) /* differential */
+#define PCI230_ADC_IM_MASK PCI230_ADC_IM(1)
+#define PCI230_ADC_FIFO_EN BIT(8) /* FIFO enable */
+#define PCI230_ADC_INT_FIFO(x) (((x) & 0x7) << 9)
+#define PCI230_ADC_INT_FIFO_EMPTY PCI230_ADC_INT_FIFO(0) /* empty */
+#define PCI230_ADC_INT_FIFO_NEMPTY PCI230_ADC_INT_FIFO(1) /* !empty */
+#define PCI230_ADC_INT_FIFO_NHALF PCI230_ADC_INT_FIFO(2) /* !half */
+#define PCI230_ADC_INT_FIFO_HALF PCI230_ADC_INT_FIFO(3) /* half */
+#define PCI230_ADC_INT_FIFO_NFULL PCI230_ADC_INT_FIFO(4) /* !full */
+#define PCI230_ADC_INT_FIFO_FULL PCI230_ADC_INT_FIFO(5) /* full */
+#define PCI230P_ADC_INT_FIFO_THRESH PCI230_ADC_INT_FIFO(7) /* threshold */
+#define PCI230_ADC_INT_FIFO_MASK PCI230_ADC_INT_FIFO(7)
/*
* ADCCON write-only, transient values.
*/
-#define PCI230_ADC_FIFO_RESET (1 << 12) /* FIFO reset */
-#define PCI230_ADC_GLOB_RESET (1 << 13) /* Global reset */
+#define PCI230_ADC_FIFO_RESET BIT(12) /* FIFO reset */
+#define PCI230_ADC_GLOB_RESET BIT(13) /* Global reset */
/*
* ADCCON read-only values.
*/
-#define PCI230_ADC_BUSY (1 << 15) /* ADC busy */
-#define PCI230_ADC_FIFO_EMPTY (1 << 12) /* FIFO empty */
-#define PCI230_ADC_FIFO_FULL (1 << 13) /* FIFO full */
-#define PCI230_ADC_FIFO_HALF (1 << 14) /* FIFO half full */
-#define PCI230_ADC_FIFO_FULL_LATCHED (1 << 5) /* FIFO overrun occurred */
+#define PCI230_ADC_BUSY BIT(15) /* ADC busy */
+#define PCI230_ADC_FIFO_EMPTY BIT(12) /* FIFO empty */
+#define PCI230_ADC_FIFO_FULL BIT(13) /* FIFO full */
+#define PCI230_ADC_FIFO_HALF BIT(14) /* FIFO half full */
+#define PCI230_ADC_FIFO_FULL_LATCHED BIT(5) /* FIFO overrun occurred */
/*
* PCI230 ADC FIFO levels.
@@ -353,10 +360,10 @@
* PCI230+ EXTFUNC values.
*/
/* Route EXTTRIG pin to external gate inputs. */
-#define PCI230P_EXTFUNC_GAT_EXTTRIG (1 << 0)
+#define PCI230P_EXTFUNC_GAT_EXTTRIG BIT(0)
/* PCI230+ hardware version 2 values. */
/* Allow DAC FIFO to be enabled. */
-#define PCI230P2_EXTFUNC_DACFIFO (1 << 1)
+#define PCI230P2_EXTFUNC_DACFIFO BIT(1)
/*
* Counter/timer clock input configuration sources.
@@ -379,8 +386,12 @@
#define GAT_GND 1 /* GND (i.e. disabled) */
#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */
#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
-/* Macro to construct gate input configuration register value. */
-#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
+
+static inline unsigned int pci230_gat_config(unsigned int chan,
+ unsigned int src)
+{
+ return ((chan & 3) << 3) | (src & 7);
+}
/*
* Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI230 and PCI260:
@@ -398,20 +409,20 @@
* Interrupt enables/status register values.
*/
#define PCI230_INT_DISABLE 0
-#define PCI230_INT_PPI_C0 (1 << 0)
-#define PCI230_INT_PPI_C3 (1 << 1)
-#define PCI230_INT_ADC (1 << 2)
-#define PCI230_INT_ZCLK_CT1 (1 << 5)
+#define PCI230_INT_PPI_C0 BIT(0)
+#define PCI230_INT_PPI_C3 BIT(1)
+#define PCI230_INT_ADC BIT(2)
+#define PCI230_INT_ZCLK_CT1 BIT(5)
/* For PCI230+ hardware version 2 when DAC FIFO enabled. */
-#define PCI230P2_INT_DAC (1 << 4)
+#define PCI230P2_INT_DAC BIT(4)
/*
* (Potentially) shared resources and their owners
*/
enum {
- RES_Z2CT0 = (1U << 0), /* Z2-CT0 */
- RES_Z2CT1 = (1U << 1), /* Z2-CT1 */
- RES_Z2CT2 = (1U << 2) /* Z2-CT2 */
+ RES_Z2CT0 = BIT(0), /* Z2-CT0 */
+ RES_Z2CT1 = BIT(1), /* Z2-CT1 */
+ RES_Z2CT2 = BIT(2) /* Z2-CT2 */
};
enum {
@@ -626,10 +637,10 @@ static void pci230_release_all_resources(struct comedi_device *dev,
pci230_release_shared(dev, (unsigned char)~0, owner);
}
-static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
+static unsigned int pci230_divide_ns(u64 ns, unsigned int timebase,
unsigned int flags)
{
- uint64_t div;
+ u64 div;
unsigned int rem;
div = ns;
@@ -652,7 +663,7 @@ static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
* Given desired period in ns, returns the required internal clock source
* and gets the initial count.
*/
-static unsigned int pci230_choose_clk_count(uint64_t ns, unsigned int *count,
+static unsigned int pci230_choose_clk_count(u64 ns, unsigned int *count,
unsigned int flags)
{
unsigned int clk_src, cnt;
@@ -676,7 +687,7 @@ static void pci230_ns_to_single_timer(unsigned int *ns, unsigned int flags)
}
static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
- unsigned int mode, uint64_t ns,
+ unsigned int mode, u64 ns,
unsigned int flags)
{
unsigned int clk_src;
@@ -1263,7 +1274,8 @@ static void pci230_ao_start(struct comedi_device *dev,
irqflags);
}
/* Set CT1 gate high to start counting. */
- outb(GAT_CONFIG(1, GAT_VCC), dev->iobase + PCI230_ZGAT_SCE);
+ outb(pci230_gat_config(1, GAT_VCC),
+ dev->iobase + PCI230_ZGAT_SCE);
break;
case TRIG_INT:
async->inttrig = pci230_ao_inttrig_scan_begin;
@@ -1351,7 +1363,8 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* cmd->scan_begin_arg is sampling period in ns.
* Gate it off for now.
*/
- outb(GAT_CONFIG(1, GAT_GND), dev->iobase + PCI230_ZGAT_SCE);
+ outb(pci230_gat_config(1, GAT_GND),
+ dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
cmd->flags);
@@ -1792,9 +1805,9 @@ static int pci230_ai_inttrig_scan_begin(struct comedi_device *dev,
spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
if (devpriv->ai_cmd_started) {
/* Trigger scan by waggling CT0 gate source. */
- zgat = GAT_CONFIG(0, GAT_GND);
+ zgat = pci230_gat_config(0, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
}
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
@@ -1926,20 +1939,20 @@ static void pci230_ai_start(struct comedi_device *dev,
* Conversion timer CT2 needs to be gated by
* inverted output of monostable CT2.
*/
- zgat = GAT_CONFIG(2, GAT_NOUTNM2);
+ zgat = pci230_gat_config(2, GAT_NOUTNM2);
} else {
/*
* Conversion timer CT2 needs to be gated on
* continuously.
*/
- zgat = GAT_CONFIG(2, GAT_VCC);
+ zgat = pci230_gat_config(2, GAT_VCC);
}
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Set monostable CT0 trigger source. */
switch (cmd->scan_begin_src) {
default:
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
break;
case TRIG_EXT:
/*
@@ -1950,21 +1963,21 @@ static void pci230_ai_start(struct comedi_device *dev,
* input in order to use it as an external scan
* trigger.
*/
- zgat = GAT_CONFIG(0, GAT_EXT);
+ zgat = pci230_gat_config(0, GAT_EXT);
break;
case TRIG_TIMER:
/*
* Monostable CT0 triggered by rising edge on
* inverted output of CT1 (falling edge on CT1).
*/
- zgat = GAT_CONFIG(0, GAT_NOUTNM2);
+ zgat = pci230_gat_config(0, GAT_NOUTNM2);
break;
case TRIG_INT:
/*
* Monostable CT0 is triggered by inttrig
* function waggling the CT0 gate source.
*/
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
break;
}
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
@@ -1974,7 +1987,7 @@ static void pci230_ai_start(struct comedi_device *dev,
* Scan period timer CT1 needs to be
* gated on to start counting.
*/
- zgat = GAT_CONFIG(1, GAT_VCC);
+ zgat = pci230_gat_config(1, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
break;
case TRIG_INT:
@@ -2216,7 +2229,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* Note, counter/timer output 2 can be monitored on the
* connector: PCI230 pin 21, PCI260 pin 18.
*/
- zgat = GAT_CONFIG(2, GAT_GND);
+ zgat = pci230_gat_config(2, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
/* Set counter/timer 2 to the specified conversion period. */
pci230_ct_setup_ns_mode(dev, 2, I8254_MODE3, cmd->convert_arg,
@@ -2234,10 +2247,10 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* monostable to stop it triggering. The trigger
* source will be changed later.
*/
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 0, I8254_MODE1,
- ((uint64_t)cmd->convert_arg *
+ ((u64)cmd->convert_arg *
cmd->scan_end_arg),
CMDF_ROUND_UP);
if (cmd->scan_begin_src == TRIG_TIMER) {
@@ -2247,7 +2260,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
*
* Set up CT1 but gate it off for now.
*/
- zgat = GAT_CONFIG(1, GAT_GND);
+ zgat = pci230_gat_config(1, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index b6768aa90..8d4069bc5 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -1,49 +1,53 @@
/*
- comedi/drivers/amplc_pci263.c
- Driver for Amplicon PCI263 relay board.
+ * Driver for Amplicon PCI263 relay board.
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: amplc_pci263
-Description: Amplicon PCI263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PCI263 (amplc_pci263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options: not applicable, uses PCI auto config
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
+ * Driver: amplc_pci263
+ * Description: Amplicon PCI263
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Devices: [Amplicon] PCI263 (amplc_pci263)
+ * Updated: Fri, 12 Apr 2013 15:19:36 +0100
+ * Status: works
+ *
+ * Configuration options: not applicable, uses PCI auto config
+ *
+ * The board appears as one subdevice, with 16 digital outputs, each
+ * connected to a reed-relay. Relay contacts are closed when output is 1.
+ * The state of the outputs can be read.
+ */
#include <linux/module.h>
#include "../comedi_pci.h"
+/* PCI263 registers */
+#define PCI263_DO_0_7_REG 0x00
+#define PCI263_DO_8_15_REG 0x01
+
static int pci263_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
+ outb(s->state & 0xff, dev->iobase + PCI263_DO_0_7_REG);
+ outb((s->state >> 8) & 0xff, dev->iobase + PCI263_DO_8_15_REG);
}
data[1] = s->state;
@@ -67,16 +71,18 @@ static int pci263_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ /* Digital Output subdevice */
s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci263_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pci263_do_insn_bits;
+
/* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
+ s->state = inb(dev->iobase + PCI263_DO_0_7_REG) |
+ (inb(dev->iobase + PCI263_DO_8_15_REG) << 8);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 1a109e30d..8ee732571 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -47,8 +47,8 @@
*/
#define C6XDIGIO_DATA_REG 0x00
#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4)
-#define C6XDIGIO_DATA_PWM (1 << 5)
-#define C6XDIGIO_DATA_ENCODER (1 << 6)
+#define C6XDIGIO_DATA_PWM BIT(5)
+#define C6XDIGIO_DATA_ENCODER BIT(6)
#define C6XDIGIO_STATUS_REG 0x01
#define C6XDIGIO_CTRL_REG 0x02
diff --git a/drivers/staging/comedi/drivers/comedi_8254.h b/drivers/staging/comedi/drivers/comedi_8254.h
index f4610ead6..a12c29455 100644
--- a/drivers/staging/comedi/drivers/comedi_8254.h
+++ b/drivers/staging/comedi/drivers/comedi_8254.h
@@ -53,13 +53,15 @@ struct comedi_subdevice;
#define I8254_COUNTER2_REG 0x02
#define I8254_CTRL_REG 0x03
#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
-#define I8254_CTRL_READBACK_COUNT ((3 << 6) | (1 << 4))
-#define I8254_CTRL_READBACK_STATUS ((3 << 6) | (1 << 5))
+#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
+#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
+#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
-#define I8254_CTRL_LATCH (0 << 4)
-#define I8254_CTRL_LSB_ONLY (1 << 4)
-#define I8254_CTRL_MSB_ONLY (2 << 4)
-#define I8254_CTRL_LSB_MSB (3 << 4)
+#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
+#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
+#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
+#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
+#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
/* counter maps zero to 0x10000 */
#define I8254_MAX_COUNT 0x10000
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 87d86130d..2cd5aa687 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -26,7 +26,7 @@
* Much of the functionality of this driver was determined from reading
* the source code for the Windows driver.
*
- * The FPGA on the board requires fimware, which is available from
+ * The FPGA on the board requires firmware, which is available from
* http://www.comedi.org in the comedi_nonfree_firmware tarball.
*
* Configuration options: not applicable, uses PCI auto config
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 3be10963f..e0a34c268 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1,98 +1,82 @@
/*
- comedi/drivers/das1800.c
- Driver for Keitley das1700/das1800 series boards
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: das1800
-Description: Keithley Metrabyte DAS1800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
- DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
- DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
- DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
- DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
- DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
- DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
- DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
- DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
- DAS-1802AO (das-1802ao)
-Status: works
-
-The waveform analog output on the 'ao' cards is not supported.
-If you need it, send me (Frank Hess) an email.
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed or externally triggered conversions)
- [2] - DMA0 (optional, requires irq)
- [3] - DMA1 (optional, requires irq and dma0)
-*/
-/*
+ * Comedi driver for Keithley DAS-1700/DAS-1800 series boards
+ * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-This driver supports the following Keithley boards:
-
-das-1701st
-das-1701st-da
-das-1701ao
-das-1702st
-das-1702st-da
-das-1702hr
-das-1702hr-da
-das-1702ao
-das-1801st
-das-1801st-da
-das-1801hc
-das-1801ao
-das-1802st
-das-1802st-da
-das-1802hr
-das-1802hr-da
-das-1802hc
-das-1802ao
-
-Options:
- [0] - base io address
- [1] - irq (optional, required for timed or externally triggered conversions)
- [2] - dma0 (optional, requires irq)
- [3] - dma1 (optional, requires irq and dma0)
-
-irq can be omitted, although the cmd interface will not work without it.
-
-analog input cmd triggers supported:
- start_src: TRIG_NOW | TRIG_EXT
- scan_begin_src: TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT
- scan_end_src: TRIG_COUNT
- convert_src: TRIG_TIMER | TRIG_EXT (TRIG_EXT requires scan_begin_src == TRIG_FOLLOW)
- stop_src: TRIG_COUNT | TRIG_EXT | TRIG_NONE
-
-scan_begin_src triggers TRIG_TIMER and TRIG_EXT use the card's
-'burst mode' which limits the valid conversion time to 64 microseconds
-(convert_arg <= 64000). This limitation does not apply if scan_begin_src
-is TRIG_FOLLOW.
-
-NOTES:
-Only the DAS-1801ST has been tested by me.
-Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
-
-TODO:
- Make it automatically allocate irq and dma channels if they are not specified
- Add support for analog out on 'ao' cards
- read insn for analog out
-*/
+/*
+ * Driver: das1800
+ * Description: Keithley Metrabyte DAS1800 (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
+ * DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
+ * DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
+ * DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
+ * DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
+ * DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
+ * DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
+ * DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
+ * DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
+ * DAS-1802AO (das-1802ao)
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, required for analog input cmd support)
+ * [2] - DMA0 (optional, requires irq)
+ * [3] - DMA1 (optional, requires irq and dma0)
+ *
+ * analog input cmd triggers supported:
+ *
+ * start_src TRIG_NOW command starts immediately
+ * TRIG_EXT command starts on external pin TGIN
+ *
+ * scan_begin_src TRIG_FOLLOW paced/external scans start immediately
+ * TRIG_TIMER burst scans start periodically
+ * TRIG_EXT burst scans start on external pin XPCLK
+ *
+ * scan_end_src TRIG_COUNT scan ends after last channel
+ *
+ * convert_src TRIG_TIMER paced/burst conversions are timed
+ * TRIG_EXT conversions on external pin XPCLK
+ * (requires scan_begin_src == TRIG_FOLLOW)
+ *
+ * stop_src TRIG_COUNT command stops after stop_arg scans
+ * TRIG_EXT command stops on external pin TGIN
+ * TRIG_NONE command runs until canceled
+ *
+ * If TRIG_EXT is used for both the start_src and stop_src, the first TGIN
+ * trigger starts the command, and the second trigger will stop it. If only
+ * one is TRIG_EXT, the first trigger will either stop or start the command.
+ * The external pin TGIN is normally set for negative edge triggering. It
+ * can be set to positive edge with the CR_INVERT flag. If TRIG_EXT is used
+ * for both the start_src and stop_src they must have the same polarity.
+ *
+ * Minimum conversion speed is limited to 64 microseconds (convert_arg <= 64000)
+ * for 'burst' scans. This limitation does not apply for 'paced' scans. The
+ * maximum conversion speed is limited by the board (convert_arg >= ai_speed).
+ * Maximum conversion speeds are not always achievable depending on the
+ * board setup (see user manual).
+ *
+ * NOTES:
+ * Only the DAS-1801ST has been tested by me.
+ * Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
+ *
+ * The waveform analog output on the 'ao' cards is not supported.
+ * If you need it, send me (Frank Hess) an email.
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -107,7 +91,6 @@ TODO:
/* misc. defines */
#define DAS1800_SIZE 16 /* uses 16 io addresses */
#define FIFO_SIZE 1024 /* 1024 sample fifo */
-#define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */
#define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */
/* Registers for the das1800 */
@@ -125,6 +108,7 @@ TODO:
#define CGSL 0x8
#define TGEN 0x10
#define TGSL 0x20
+#define TGPL 0x40
#define ATEN 0x80
#define DAS1800_CONTROL_B 0x5
#define DMA_CH5 0x1
@@ -133,7 +117,7 @@ TODO:
#define DMA_CH5_CH6 0x5
#define DMA_CH6_CH7 0x6
#define DMA_CH7_CH5 0x7
-#define DMA_ENABLED 0x3 /* mask used to determine if dma is enabled */
+#define DMA_ENABLED 0x3
#define DMA_DUAL 0x4
#define IRQ3 0x8
#define IRQ5 0x10
@@ -151,319 +135,214 @@ TODO:
#define SD 0x40
#define UB 0x80
#define DAS1800_STATUS 0x7
-/* bits that prevent interrupt status bits (and CVEN) from being cleared on write */
-#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
#define INT 0x1
#define DMATC 0x2
#define CT0TC 0x8
#define OVF 0x10
#define FHF 0x20
#define FNE 0x40
-#define CVEN_MASK 0x40 /* masks CVEN on write */
#define CVEN 0x80
+#define CVEN_MASK 0x40
+#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
#define DAS1800_BURST_LENGTH 0x8
#define DAS1800_BURST_RATE 0x9
#define DAS1800_QRAM_ADDRESS 0xa
#define DAS1800_COUNTER 0xc
-#define IOBASE2 0x400 /* offset of additional ioports used on 'ao' cards */
+#define IOBASE2 0x400
-enum {
- das1701st, das1701st_da, das1702st, das1702st_da, das1702hr,
- das1702hr_da,
- das1701ao, das1702ao, das1801st, das1801st_da, das1802st, das1802st_da,
- das1802hr, das1802hr_da, das1801hc, das1802hc, das1801ao, das1802ao
-};
-
-/* analog input ranges */
-static const struct comedi_lrange range_ai_das1801 = {
+static const struct comedi_lrange das1801_ai_range = {
8, {
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02),
- UNI_RANGE(5),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
+ BIP_RANGE(5), /* bipolar gain = 1 */
+ BIP_RANGE(1), /* bipolar gain = 10 */
+ BIP_RANGE(0.1), /* bipolar gain = 50 */
+ BIP_RANGE(0.02), /* bipolar gain = 250 */
+ UNI_RANGE(5), /* unipolar gain = 1 */
+ UNI_RANGE(1), /* unipolar gain = 10 */
+ UNI_RANGE(0.1), /* unipolar gain = 50 */
+ UNI_RANGE(0.02) /* unipolar gain = 250 */
}
};
-static const struct comedi_lrange range_ai_das1802 = {
+static const struct comedi_lrange das1802_ai_range = {
8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
+ BIP_RANGE(10), /* bipolar gain = 1 */
+ BIP_RANGE(5), /* bipolar gain = 2 */
+ BIP_RANGE(2.5), /* bipolar gain = 4 */
+ BIP_RANGE(1.25), /* bipolar gain = 8 */
+ UNI_RANGE(10), /* unipolar gain = 1 */
+ UNI_RANGE(5), /* unipolar gain = 2 */
+ UNI_RANGE(2.5), /* unipolar gain = 4 */
+ UNI_RANGE(1.25) /* unipolar gain = 8 */
}
};
+/*
+ * The waveform analog outputs on the 'ao' boards are not currently
+ * supported. They have a comedi_lrange of:
+ * { 2, { BIP_RANGE(10), BIP_RANGE(5) } }
+ */
+
+enum das1800_boardid {
+ BOARD_DAS1701ST,
+ BOARD_DAS1701ST_DA,
+ BOARD_DAS1702ST,
+ BOARD_DAS1702ST_DA,
+ BOARD_DAS1702HR,
+ BOARD_DAS1702HR_DA,
+ BOARD_DAS1701AO,
+ BOARD_DAS1702AO,
+ BOARD_DAS1801ST,
+ BOARD_DAS1801ST_DA,
+ BOARD_DAS1802ST,
+ BOARD_DAS1802ST_DA,
+ BOARD_DAS1802HR,
+ BOARD_DAS1802HR_DA,
+ BOARD_DAS1801HC,
+ BOARD_DAS1802HC,
+ BOARD_DAS1801AO,
+ BOARD_DAS1802AO
+};
+
+/* board probe id values (hi byte of the digital input register) */
+#define DAS1800_ID_ST_DA 0x3
+#define DAS1800_ID_HR_DA 0x4
+#define DAS1800_ID_AO 0x5
+#define DAS1800_ID_HR 0x6
+#define DAS1800_ID_ST 0x7
+#define DAS1800_ID_HC 0x8
+
struct das1800_board {
const char *name;
- int ai_speed; /* max conversion period in nanoseconds */
- int resolution; /* bits of ai resolution */
- int qram_len; /* length of card's channel / gain queue */
- int common; /* supports AREF_COMMON flag */
- int do_n_chan; /* number of digital output channels */
- int ao_ability; /* 0 == no analog out, 1 == basic analog out, 2 == waveform analog out */
- int ao_n_chan; /* number of analog out channels */
- const struct comedi_lrange *range_ai; /* available input ranges */
+ unsigned char id;
+ unsigned int ai_speed;
+ unsigned int is_01_series:1;
};
-/* Warning: the maximum conversion speeds listed below are
- * not always achievable depending on board setup (see
- * user manual.)
- */
static const struct das1800_board das1800_boards[] = {
- {
- .name = "das-1701st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1701st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr-da",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1701ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1801st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr-da",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
+ [BOARD_DAS1701ST] = {
+ .name = "das-1701st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1701ST_DA] = {
+ .name = "das-1701st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1702ST] = {
+ .name = "das-1702st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1702ST_DA] = {
+ .name = "das-1702st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1702HR] = {
+ .name = "das-1702hr",
+ .id = DAS1800_ID_HR,
+ .ai_speed = 20000,
+ },
+ [BOARD_DAS1702HR_DA] = {
+ .name = "das-1702hr-da",
+ .id = DAS1800_ID_HR_DA,
+ .ai_speed = 20000,
+ },
+ [BOARD_DAS1701AO] = {
+ .name = "das-1701ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1702AO] = {
+ .name = "das-1702ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1801ST] = {
+ .name = "das-1801st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1801ST_DA] = {
+ .name = "das-1801st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802ST] = {
+ .name = "das-1802st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1802ST_DA] = {
+ .name = "das-1802st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1802HR] = {
+ .name = "das-1802hr",
+ .id = DAS1800_ID_HR,
+ .ai_speed = 10000,
+ },
+ [BOARD_DAS1802HR_DA] = {
+ .name = "das-1802hr-da",
+ .id = DAS1800_ID_HR_DA,
+ .ai_speed = 10000,
+ },
+ [BOARD_DAS1801HC] = {
+ .name = "das-1801hc",
+ .id = DAS1800_ID_HC,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802HC] = {
+ .name = "das-1802hc",
+ .id = DAS1800_ID_HC,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1801AO] = {
+ .name = "das-1801ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802AO] = {
+ .name = "das-1802ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 3000,
+ },
};
struct das1800_private {
struct comedi_isadma *dma;
- int irq_dma_bits; /* bits for control register b */
- /* dma bits for control register b, stored so that dma can be
- * turned on and off */
+ int irq_dma_bits;
int dma_bits;
- uint16_t *fifo_buf; /* bounce buffer for analog input FIFO */
- unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */
- unsigned short ao_update_bits; /* remembers the last write to the
- * 'update' dac */
-};
-
-/* analog out range for 'ao' boards */
-/*
-static const struct comedi_lrange range_ao_2 = {
- 2, {
- BIP_RANGE(10),
- BIP_RANGE(5)
- }
+ unsigned short *fifo_buf;
+ unsigned long iobase2;
+ bool ai_is_unipolar;
};
-*/
-
-static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev,
- uint16_t sample)
-{
- const struct das1800_board *board = dev->board_ptr;
-
- sample += 1 << (board->resolution - 1);
- return sample;
-}
-static void munge_data(struct comedi_device *dev, uint16_t *array,
- unsigned int num_elements)
+static void das1800_ai_munge(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index)
{
+ struct das1800_private *devpriv = dev->private;
+ unsigned short *array = data;
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
- int unipolar;
- /* see if card is using a unipolar or bipolar range so we can munge data correctly */
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
+ if (devpriv->ai_is_unipolar)
+ return;
- /* convert to unsigned type if we are in a bipolar mode */
- if (!unipolar) {
- for (i = 0; i < num_elements; i++)
- array[i] = munge_bipolar_sample(dev, array[i]);
- }
+ for (i = 0; i < num_samples; i++)
+ array[i] = comedi_offset_munge(s, array[i]);
}
static void das1800_handle_fifo_half_full(struct comedi_device *dev,
@@ -473,7 +352,6 @@ static void das1800_handle_fifo_half_full(struct comedi_device *dev,
unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
insw(dev->iobase + DAS1800_FIFO, devpriv->fifo_buf, nsamples);
- munge_data(dev, devpriv->fifo_buf, nsamples);
comedi_buf_write_samples(s, devpriv->fifo_buf, nsamples);
}
@@ -482,14 +360,9 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned short dpnt;
- int unipolar;
-
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
while (inb(dev->iobase + DAS1800_STATUS) & FNE) {
dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* convert to unsigned type */
- dpnt = munge_bipolar_sample(dev, dpnt);
comedi_buf_write_samples(s, &dpnt, 1);
if (cmd->stop_src == TRIG_COUNT &&
@@ -498,7 +371,6 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
}
}
-/* Utility function used by das1800_flush_dma() and das1800_handle_dma() */
static void das1800_flush_dma_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_isadma_desc *desc)
@@ -511,12 +383,9 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
nsamples = comedi_bytes_to_samples(s, nbytes);
nsamples = comedi_nsamples_left(s, nsamples);
- munge_data(dev, desc->virt_addr, nsamples);
comedi_buf_write_samples(s, desc->virt_addr, nsamples);
}
-/* flushes remaining data from board when external trigger has stopped acquisition
- * and we are using dma transfers */
static void das1800_flush_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
@@ -560,7 +429,8 @@ static void das1800_handle_dma(struct comedi_device *dev,
}
}
-static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das1800_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
struct comedi_isadma *dma = devpriv->dma;
@@ -583,7 +453,6 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-/* the guts of the interrupt handler, that is shared with das1800_ai_poll */
static void das1800_ai_handler(struct comedi_device *dev)
{
struct das1800_private *devpriv = dev->private;
@@ -592,17 +461,16 @@ static void das1800_ai_handler(struct comedi_device *dev)
struct comedi_cmd *cmd = &async->cmd;
unsigned int status = inb(dev->iobase + DAS1800_STATUS);
- /* select adc for base address + 0 */
+ /* select adc register (spinlock is already held) */
outb(ADC, dev->iobase + DAS1800_SELECT);
- /* dma buffer full */
- if (devpriv->irq_dma_bits & DMA_ENABLED) {
- /* look for data from dma transfer even if dma terminal count hasn't happened yet */
+
+ /* get samples with dma, fifo, or polled as necessary */
+ if (devpriv->irq_dma_bits & DMA_ENABLED)
das1800_handle_dma(dev, s, status);
- } else if (status & FHF) { /* if fifo half full */
+ else if (status & FHF)
das1800_handle_fifo_half_full(dev, s);
- } else if (status & FNE) { /* if fifo not empty */
+ else if (status & FNE)
das1800_handle_fifo_not_empty(dev, s);
- }
/* if the card's fifo has overflowed */
if (status & OVF) {
@@ -618,7 +486,7 @@ static void das1800_ai_handler(struct comedi_device *dev)
if (status & CT0TC) {
/* clear CT0TC interrupt bit */
outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS);
- /* make sure we get all remaining data from board before quitting */
+ /* get all remaining samples before quitting */
if (devpriv->irq_dma_bits & DMA_ENABLED)
das1800_flush_dma(dev, s);
else
@@ -637,9 +505,14 @@ static int das1800_ai_poll(struct comedi_device *dev,
{
unsigned long flags;
- /* prevent race with interrupt handler */
+ /*
+ * Protects the indirect addressing selected by DAS1800_SELECT
+ * in das1800_ai_handler() also prevents race with das1800_interrupt().
+ */
spin_lock_irqsave(&dev->spinlock, flags);
+
das1800_ai_handler(dev);
+
spin_unlock_irqrestore(&dev->spinlock, flags);
return comedi_buf_n_bytes_ready(s);
@@ -655,9 +528,12 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- /* Prevent race with das1800_ai_poll() on multi processor systems.
- * Also protects indirect addressing in das1800_ai_handler */
+ /*
+ * Protects the indirect addressing selected by DAS1800_SELECT
+ * in das1800_ai_handler() also prevents race with das1800_ai_poll().
+ */
spin_lock(&dev->spinlock);
+
status = inb(dev->iobase + DAS1800_STATUS);
/* if interrupt was not caused by das-1800 */
@@ -674,46 +550,87 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
-/* converts requested conversion timing to timing compatible with
- * hardware, used only when card is in 'burst mode'
- */
-static unsigned int burst_convert_arg(unsigned int convert_arg, int flags)
+static int das1800_ai_fixup_paced_timing(struct comedi_device *dev,
+ struct comedi_cmd *cmd)
{
- unsigned int micro_sec;
+ unsigned int arg = cmd->convert_arg;
+
+ /*
+ * Paced mode:
+ * scan_begin_src is TRIG_FOLLOW
+ * convert_src is TRIG_TIMER
+ *
+ * The convert_arg sets the pacer sample acquisition time.
+ * The max acquisition speed is limited to the boards
+ * 'ai_speed' (this was already verified). The min speed is
+ * limited by the cascaded 8254 timer.
+ */
+ comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
+ return comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+}
- /* in burst mode, the maximum conversion time is 64 microseconds */
- if (convert_arg > 64000)
- convert_arg = 64000;
+static int das1800_ai_fixup_burst_timing(struct comedi_device *dev,
+ struct comedi_cmd *cmd)
+{
+ unsigned int arg = cmd->convert_arg;
+ int err = 0;
- /* the conversion time must be an integral number of microseconds */
- switch (flags & CMDF_ROUND_MASK) {
+ /*
+ * Burst mode:
+ * scan_begin_src is TRIG_TIMER or TRIG_EXT
+ * convert_src is TRIG_TIMER
+ *
+ * The convert_arg sets burst sample acquisition time.
+ * The max acquisition speed is limited to the boards
+ * 'ai_speed' (this was already verified). The min speed is
+ * limiited to 64 microseconds,
+ */
+ err |= comedi_check_trigger_arg_max(&arg, 64000);
+
+ /* round to microseconds then verify */
+ switch (cmd->flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- micro_sec = (convert_arg + 500) / 1000;
+ arg = DIV_ROUND_CLOSEST(arg, 1000);
break;
case CMDF_ROUND_DOWN:
- micro_sec = convert_arg / 1000;
+ arg = arg / 1000;
break;
case CMDF_ROUND_UP:
- micro_sec = (convert_arg - 1) / 1000 + 1;
+ arg = DIV_ROUND_UP(arg, 1000);
break;
}
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg * 1000);
+
+ /*
+ * The pacer can be used to set the scan sample rate. The max scan
+ * speed is limited by the conversion speed and the number of channels
+ * to convert. The min speed is limited by the cascaded 8254 timer.
+ */
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ arg = cmd->convert_arg * cmd->chanlist_len;
+ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
+
+ arg = cmd->scan_begin_arg;
+ comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
+ }
- /* return number of nanoseconds */
- return micro_sec * 1000;
+ return err;
}
static int das1800_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- unsigned int unipolar0 = CR_RANGE(cmd->chanlist[0]) & UNIPOLAR;
+ unsigned int range = CR_RANGE(cmd->chanlist[0]);
+ bool unipolar0 = comedi_range_is_unipolar(s, range);
int i;
for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int unipolar = CR_RANGE(cmd->chanlist[i]) & UNIPOLAR;
+ range = CR_RANGE(cmd->chanlist[i]);
- if (unipolar != unipolar0) {
+ if (unipolar0 != comedi_range_is_unipolar(s, range)) {
dev_dbg(dev->class_dev,
"unipolar and bipolar ranges cannot be mixed in the chanlist\n");
return -EINVAL;
@@ -723,14 +640,12 @@ static int das1800_ai_check_chanlist(struct comedi_device *dev,
return 0;
}
-/* test analog input cmd */
-static int das1800_ai_do_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int das1800_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
const struct das1800_board *board = dev->board_ptr;
int err = 0;
- unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
@@ -755,16 +670,23 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
/* Step 2b : and mutually compatible */
+ /* burst scans must use timed conversions */
if (cmd->scan_begin_src != TRIG_FOLLOW &&
cmd->convert_src != TRIG_TIMER)
err |= -EINVAL;
+ /* the external pin TGIN must use the same polarity */
+ if (cmd->start_src == TRIG_EXT && cmd->stop_src == TRIG_EXT)
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg,
+ cmd->stop_arg);
+
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+ if (cmd->start_arg == TRIG_NOW)
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->convert_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
@@ -789,31 +711,13 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
if (err)
return 3;
- /* step 4: fix up any arguments */
+ /* Step 4: fix up any arguments */
- if (cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- /* we are not in burst mode */
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- } else if (cmd->convert_src == TRIG_TIMER) {
- /* we are in burst mode */
- arg = burst_convert_arg(cmd->convert_arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->chanlist_len;
- err |= comedi_check_trigger_arg_max(&cmd->
- scan_begin_arg,
- arg);
-
- arg = cmd->scan_begin_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg,
- cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg,
- arg);
- }
+ if (cmd->convert_src == TRIG_TIMER) {
+ if (cmd->scan_begin_src == TRIG_FOLLOW)
+ err |= das1800_ai_fixup_paced_timing(dev, cmd);
+ else /* TRIG_TIMER or TRIG_EXT */
+ err |= das1800_ai_fixup_burst_timing(dev, cmd);
}
if (err)
@@ -829,74 +733,22 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
return 0;
}
-/* returns appropriate bits for control register a, depending on command */
-static int control_a_bits(const struct comedi_cmd *cmd)
+static unsigned char das1800_ai_chanspec_bits(struct comedi_subdevice *s,
+ unsigned int chanspec)
{
- int control_a;
-
- control_a = FFEN; /* enable fifo */
- if (cmd->stop_src == TRIG_EXT)
- control_a |= ATEN;
- switch (cmd->start_src) {
- case TRIG_EXT:
- control_a |= TGEN | CGSL;
- break;
- case TRIG_NOW:
- control_a |= CGEN;
- break;
- default:
- break;
- }
-
- return control_a;
-}
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+ unsigned char bits;
-/* returns appropriate bits for control register c, depending on command */
-static int control_c_bits(const struct comedi_cmd *cmd)
-{
- int control_c;
- int aref;
-
- /* set clock source to internal or external, select analog reference,
- * select unipolar / bipolar
- */
- aref = CR_AREF(cmd->chanlist[0]);
- control_c = UQEN; /* enable upper qram addresses */
+ bits = UQEN;
if (aref != AREF_DIFF)
- control_c |= SD;
+ bits |= SD;
if (aref == AREF_COMMON)
- control_c |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(cmd->chanlist[0]) & UNIPOLAR)
- control_c |= UB;
- switch (cmd->scan_begin_src) {
- case TRIG_FOLLOW: /* not in burst mode */
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- /* trig on cascaded counters */
- control_c |= IPCLK;
- break;
- case TRIG_EXT:
- /* trig on falling edge of external trigger */
- control_c |= XPCLK;
- break;
- default:
- break;
- }
- break;
- case TRIG_TIMER:
- /* burst mode with internal pacer clock */
- control_c |= BMDE | IPCLK;
- break;
- case TRIG_EXT:
- /* burst mode with external trigger */
- control_c |= BMDE | XPCLK;
- break;
- default:
- break;
- }
+ bits |= CMEN;
+ if (comedi_range_is_unipolar(s, range))
+ bits |= UB;
- return control_c;
+ return bits;
}
static unsigned int das1800_ai_transfer_size(struct comedi_device *dev,
@@ -960,43 +812,48 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
}
}
-/* programs channel/gain list into card */
-static void program_chanlist(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
+static void das1800_ai_set_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist, unsigned int len)
{
- int i, n, chan_range;
- unsigned long irq_flags;
- const int range_mask = 0x3; /* masks unipolar/bipolar bit off range */
- const int range_bitshift = 8;
-
- n = cmd->chanlist_len;
- /* spinlock protects indirect addressing */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*set QRAM address start */
+ unsigned long flags;
+ unsigned int i;
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ /* select QRAM register and set start address */
+ outb(QRAM, dev->iobase + DAS1800_SELECT);
+ outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
+
/* make channel / gain list */
- for (i = 0; i < n; i++) {
- chan_range =
- CR_CHAN(cmd->chanlist[i]) |
- ((CR_RANGE(cmd->chanlist[i]) & range_mask) <<
- range_bitshift);
- outw(chan_range, dev->iobase + DAS1800_QRAM);
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(chanlist[i]);
+ unsigned int range = CR_RANGE(chanlist[i]);
+ unsigned short val;
+
+ val = chan | ((range & 0x3) << 8);
+ outw(val, dev->iobase + DAS1800_QRAM);
}
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+
+ /* finish write to QRAM */
+ outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
+
+ spin_unlock_irqrestore(&dev->spinlock, flags);
}
-/* analog input do_cmd */
-static int das1800_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int das1800_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
int control_a, control_c;
struct comedi_async *async = s->async;
const struct comedi_cmd *cmd = &async->cmd;
+ unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- /* disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY
- * (because dma in handler is unsafe at hard real-time priority) */
+ /*
+ * Disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY (because dma in
+ * handler is unsafe at hard real-time priority).
+ */
if (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY))
devpriv->irq_dma_bits &= ~DMA_ENABLED;
else
@@ -1010,14 +867,42 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
devpriv->irq_dma_bits |= FIMD;
}
- das1800_cancel(dev, s);
+ das1800_ai_cancel(dev, s);
- /* determine proper bits for control registers */
- control_a = control_a_bits(cmd);
- control_c = control_c_bits(cmd);
+ devpriv->ai_is_unipolar = comedi_range_is_unipolar(s, range0);
+
+ control_a = FFEN;
+ if (cmd->stop_src == TRIG_EXT)
+ control_a |= ATEN;
+ if (cmd->start_src == TRIG_EXT)
+ control_a |= TGEN | CGSL;
+ else /* TRIG_NOW */
+ control_a |= CGEN;
+ if (control_a & (ATEN | TGEN)) {
+ if ((cmd->start_arg & CR_INVERT) || (cmd->stop_arg & CR_INVERT))
+ control_a |= TGPL;
+ }
+
+ control_c = das1800_ai_chanspec_bits(s, cmd->chanlist[0]);
+ /* set clock source to internal or external */
+ if (cmd->scan_begin_src == TRIG_FOLLOW) {
+ /* not in burst mode */
+ if (cmd->convert_src == TRIG_TIMER) {
+ /* trig on cascaded counters */
+ control_c |= IPCLK;
+ } else { /* TRIG_EXT */
+ /* trig on falling edge of external trigger */
+ control_c |= XPCLK;
+ }
+ } else if (cmd->scan_begin_src == TRIG_TIMER) {
+ /* burst mode with internal pacer clock */
+ control_c |= BMDE | IPCLK;
+ } else { /* TRIG_EXT */
+ /* burst mode with external trigger */
+ control_c |= BMDE | XPCLK;
+ }
- /* setup card and start */
- program_chanlist(dev, cmd);
+ das1800_ai_set_chanlist(dev, cmd->chanlist, cmd->chanlist_len);
/* setup cascaded counters for conversion/scan frequency */
if ((cmd->scan_begin_src == TRIG_FOLLOW ||
@@ -1035,118 +920,117 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
outb(control_c, dev->iobase + DAS1800_CONTROL_C);
/* set conversion rate and length for burst mode */
if (control_c & BMDE) {
- /* program conversion period with number of microseconds minus 1 */
- outb(cmd->convert_arg / 1000 - 1,
+ outb(cmd->convert_arg / 1000 - 1, /* microseconds - 1 */
dev->iobase + DAS1800_BURST_RATE);
outb(cmd->chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH);
}
- outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B); /* enable irq/dma */
- outb(control_a, dev->iobase + DAS1800_CONTROL_A); /* enable fifo and triggering */
- outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
+
+ /* enable and start conversions */
+ outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B);
+ outb(control_a, dev->iobase + DAS1800_CONTROL_A);
+ outb(CVEN, dev->iobase + DAS1800_STATUS);
return 0;
}
-/* read analog input */
-static int das1800_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- const struct das1800_board *board = dev->board_ptr;
- int i, n;
- int chan, range, aref, chan_range;
- int timeout = 1000;
- unsigned short dpnt;
- int conv_flags = 0;
- unsigned long irq_flags;
+ unsigned char status;
- /* set up analog reference and unipolar / bipolar mode */
- aref = CR_AREF(insn->chanspec);
- conv_flags |= UQEN;
- if (aref != AREF_DIFF)
- conv_flags |= SD;
- if (aref == AREF_COMMON)
- conv_flags |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(insn->chanspec) & UNIPOLAR)
- conv_flags |= UB;
+ status = inb(dev->iobase + DAS1800_STATUS);
+ if (status & FNE)
+ return 0;
+ return -EBUSY;
+}
+
+static int das1800_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int range = CR_RANGE(insn->chanspec);
+ bool is_unipolar = comedi_range_is_unipolar(s, range);
+ int ret = 0;
+ int n;
+ unsigned short dpnt;
+ unsigned long flags;
- outb(conv_flags, dev->iobase + DAS1800_CONTROL_C); /* software conversion enabled */
+ outb(das1800_ai_chanspec_bits(s, insn->chanspec),
+ dev->iobase + DAS1800_CONTROL_C); /* software pacer */
outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */
outb(FFEN, dev->iobase + DAS1800_CONTROL_A);
- chan = CR_CHAN(insn->chanspec);
- /* mask of unipolar/bipolar bit from range */
- range = CR_RANGE(insn->chanspec) & 0x3;
- chan_range = chan | (range << 8);
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /* set QRAM address start */
- outw(chan_range, dev->iobase + DAS1800_QRAM);
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- outb(ADC, dev->iobase + DAS1800_SELECT); /* select ADC for baseAddress + 0x0 */
+ das1800_ai_set_chanlist(dev, &insn->chanspec, 1);
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ /* select ai fifo register */
+ outb(ADC, dev->iobase + DAS1800_SELECT);
for (n = 0; n < insn->n; n++) {
/* trigger conversion */
outb(0, dev->iobase + DAS1800_FIFO);
- for (i = 0; i < timeout; i++) {
- if (inb(dev->iobase + DAS1800_STATUS) & FNE)
- break;
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "timeout\n");
- n = -ETIME;
- goto exit;
- }
+
+ ret = comedi_timeout(dev, s, insn, das1800_ai_eoc, 0);
+ if (ret)
+ break;
+
dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* shift data to offset binary for bipolar ranges */
- if ((conv_flags & UB) == 0)
- dpnt += 1 << (board->resolution - 1);
+ if (!is_unipolar)
+ dpnt = comedi_offset_munge(s, dpnt);
data[n] = dpnt;
}
-exit:
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
- return n;
+ return ret ? ret : insn->n;
}
-/* writes to an analog output channel */
-static int das1800_ao_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct das1800_board *board = dev->board_ptr;
- struct das1800_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
-/* int range = CR_RANGE(insn->chanspec); */
- int update_chan = board->ao_n_chan - 1;
- unsigned short output;
- unsigned long irq_flags;
-
- /* card expects two's complement data */
- output = data[0] - (1 << (board->resolution - 1));
- /* if the write is to the 'update' channel, we need to remember its value */
- if (chan == update_chan)
- devpriv->ao_update_bits = output;
- /* write to channel */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(DAC(chan), dev->iobase + DAS1800_SELECT); /* select dac channel for baseAddress + 0x0 */
- outw(output, dev->iobase + DAS1800_DAC);
- /* now we need to write to 'update' channel to update all dac channels */
- if (chan != update_chan) {
- outb(DAC(update_chan), dev->iobase + DAS1800_SELECT); /* select 'update' channel for baseAddress + 0x0 */
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int update_chan = s->n_chan - 1;
+ unsigned long flags;
+ int i;
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val = data[i];
+
+ s->readback[chan] = val;
+
+ val = comedi_offset_munge(s, val);
+
+ /* load this channel (and update if it's the last channel) */
+ outb(DAC(chan), dev->iobase + DAS1800_SELECT);
+ outw(val, dev->iobase + DAS1800_DAC);
+
+ /* update all channels */
+ if (chan != update_chan) {
+ val = comedi_offset_munge(s, s->readback[update_chan]);
+
+ outb(DAC(update_chan), dev->iobase + DAS1800_SELECT);
+ outw(val, dev->iobase + DAS1800_DAC);
+ }
}
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
- return 1;
+ return insn->n;
}
-/* reads from digital input channels */
-static int das1800_di_rbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf;
data[0] = 0;
@@ -1154,10 +1038,10 @@ static int das1800_di_rbits(struct comedi_device *dev,
return insn->n;
}
-static int das1800_do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int das1800_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + DAS1800_DIGITAL);
@@ -1220,68 +1104,68 @@ static void das1800_free_dma(struct comedi_device *dev)
comedi_isadma_free(devpriv->dma);
}
-static const struct das1800_board *das1800_probe(struct comedi_device *dev)
+static int das1800_probe(struct comedi_device *dev)
{
const struct das1800_board *board = dev->board_ptr;
- int index = board ? board - das1800_boards : -EINVAL;
- int id;
+ unsigned char id;
+
+ id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
/*
* The dev->board_ptr will be set by comedi_device_attach() if the
* board name provided by the user matches a board->name in this
* driver. If so, this function sanity checks the id to verify that
* the board is correct.
- *
- * If the dev->board_ptr is not set, the user is trying to attach
- * an unspecified board to this driver. In this case the id is used
- * to 'probe' for the correct dev->board_ptr.
*/
- id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
+ if (board) {
+ if (board->id == id)
+ return 0;
+ dev_err(dev->class_dev,
+ "probed id does not match board id (0x%x != 0x%x)\n",
+ id, board->id);
+ return -ENODEV;
+ }
+
+ /*
+ * If the dev->board_ptr is not set, the user is trying to attach
+ * an unspecified board to this driver. In this case the id is used
+ * to 'probe' for the dev->board_ptr.
+ */
switch (id) {
- case 0x3:
- if (index == das1801st_da || index == das1802st_da ||
- index == das1701st_da || index == das1702st_da)
- return board;
- index = das1801st;
+ case DAS1800_ID_ST_DA:
+ /* das-1701st-da, das-1702st-da, das-1801st-da, das-1802st-da */
+ board = &das1800_boards[BOARD_DAS1801ST_DA];
break;
- case 0x4:
- if (index == das1802hr_da || index == das1702hr_da)
- return board;
- index = das1802hr;
+ case DAS1800_ID_HR_DA:
+ /* das-1702hr-da, das-1802hr-da */
+ board = &das1800_boards[BOARD_DAS1802HR_DA];
break;
- case 0x5:
- if (index == das1801ao || index == das1802ao ||
- index == das1701ao || index == das1702ao)
- return board;
- index = das1801ao;
+ case DAS1800_ID_AO:
+ /* das-1701ao, das-1702ao, das-1801ao, das-1802ao */
+ board = &das1800_boards[BOARD_DAS1801AO];
break;
- case 0x6:
- if (index == das1802hr || index == das1702hr)
- return board;
- index = das1802hr;
+ case DAS1800_ID_HR:
+ /* das-1702hr, das-1802hr */
+ board = &das1800_boards[BOARD_DAS1802HR];
break;
- case 0x7:
- if (index == das1801st || index == das1802st ||
- index == das1701st || index == das1702st)
- return board;
- index = das1801st;
+ case DAS1800_ID_ST:
+ /* das-1701st, das-1702st, das-1801st, das-1802st */
+ board = &das1800_boards[BOARD_DAS1801ST];
break;
- case 0x8:
- if (index == das1801hc || index == das1802hc)
- return board;
- index = das1801hc;
+ case DAS1800_ID_HC:
+ /* das-1801hc, das-1802hc */
+ board = &das1800_boards[BOARD_DAS1801HC];
break;
default:
- dev_err(dev->class_dev,
- "Board model: probe returned 0x%x (unknown, please report)\n",
- id);
- return NULL;
+ dev_err(dev->class_dev, "invalid probe id 0x%x\n", id);
+ return -ENODEV;
}
- dev_err(dev->class_dev,
- "Board model (probed, not recommended): %s series\n",
- das1800_boards[index].name);
-
- return &das1800_boards[index];
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+ dev_warn(dev->class_dev,
+ "probed id 0x%0x: %s series (not recommended)\n",
+ id, board->name);
+ return 0;
}
static int das1800_attach(struct comedi_device *dev,
@@ -1291,7 +1175,9 @@ static int das1800_attach(struct comedi_device *dev,
struct das1800_private *devpriv;
struct comedi_subdevice *s;
unsigned int irq = it->options[1];
+ bool is_16bit;
int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -1301,16 +1187,15 @@ static int das1800_attach(struct comedi_device *dev,
if (ret)
return ret;
- board = das1800_probe(dev);
- if (!board) {
- dev_err(dev->class_dev, "unable to determine board type\n");
- return -ENODEV;
- }
- dev->board_ptr = board;
- dev->board_name = board->name;
+ ret = das1800_probe(dev);
+ if (ret)
+ return ret;
+ board = dev->board_ptr;
- /* if it is an 'ao' board with fancy analog out then we need extra io ports */
- if (board->ao_ability == 2) {
+ is_16bit = board->id == DAS1800_ID_HR || board->id == DAS1800_ID_HR_DA;
+
+ /* waveform 'ao' boards have additional io ports */
+ if (board->id == DAS1800_ID_AO) {
unsigned long iobase2 = dev->iobase + IOBASE2;
ret = __comedi_request_region(dev, iobase2, DAS1800_SIZE);
@@ -1353,7 +1238,9 @@ static int das1800_attach(struct comedi_device *dev,
if (dev->irq & it->options[2])
das1800_init_dma(dev, it);
- devpriv->fifo_buf = kmalloc_array(FIFO_SIZE, sizeof(uint16_t), GFP_KERNEL);
+ devpriv->fifo_buf = kmalloc_array(FIFO_SIZE,
+ sizeof(*devpriv->fifo_buf),
+ GFP_KERNEL);
if (!devpriv->fifo_buf)
return -ENOMEM;
@@ -1366,70 +1253,94 @@ static int das1800_attach(struct comedi_device *dev,
if (ret)
return ret;
- /* analog input subdevice */
+ /*
+ * Analog Input subdevice
+ *
+ * The "hc" type boards have 64 analog input channels and a 64
+ * entry QRAM fifo.
+ *
+ * All the other board types have 16 on-board channels. Each channel
+ * can be expanded to 16 channels with the addition of an EXP-1800
+ * expansion board for a total of 256 channels. The QRAM fifo on
+ * these boards has 256 entries.
+ *
+ * From the datasheets it's not clear what the comedi channel to
+ * actual physical channel mapping is when EXP-1800 boards are used.
+ */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
- if (board->common)
- s->subdev_flags |= SDF_COMMON;
- s->n_chan = board->qram_len;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = board->range_ai;
- s->insn_read = das1800_ai_rinsn;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
+ if (board->id != DAS1800_ID_HC)
+ s->subdev_flags |= SDF_COMMON;
+ s->n_chan = (board->id == DAS1800_ID_HC) ? 64 : 256;
+ s->maxdata = is_16bit ? 0xffff : 0x0fff;
+ s->range_table = board->is_01_series ? &das1801_ai_range
+ : &das1802_ai_range;
+ s->insn_read = das1800_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmd = das1800_ai_do_cmd;
- s->do_cmdtest = das1800_ai_do_cmdtest;
- s->poll = das1800_ai_poll;
- s->cancel = das1800_cancel;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = das1800_ai_cmd;
+ s->do_cmdtest = das1800_ai_cmdtest;
+ s->poll = das1800_ai_poll;
+ s->cancel = das1800_ai_cancel;
+ s->munge = das1800_ai_munge;
}
- /* analog out */
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- if (board->ao_ability == 1) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->ao_n_chan;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = &range_bipolar10;
- s->insn_write = das1800_ao_winsn;
+ if (board->id == DAS1800_ID_ST_DA || board->id == DAS1800_ID_HR_DA) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (board->id == DAS1800_ID_ST_DA) ? 4 : 2;
+ s->maxdata = is_16bit ? 0xffff : 0x0fff;
+ s->range_table = &range_bipolar10;
+ s->insn_write = das1800_ao_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ /* initialize all channels to 0V */
+ for (i = 0; i < s->n_chan; i++) {
+ /* spinlock is not necessary during the attach */
+ outb(DAC(i), dev->iobase + DAS1800_SELECT);
+ outw(0, dev->iobase + DAS1800_DAC);
+ }
+ } else if (board->id == DAS1800_ID_AO) {
+ /*
+ * 'ao' boards have waveform analog outputs that are not
+ * currently supported.
+ */
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- /* di */
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_di_rbits;
-
- /* do */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das1800_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->do_n_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_do_wbits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (board->id == DAS1800_ID_HC) ? 8 : 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das1800_do_insn_bits;
- das1800_cancel(dev, dev->read_subdev);
+ das1800_ai_cancel(dev, dev->read_subdev);
/* initialize digital out channels */
outb(0, dev->iobase + DAS1800_DIGITAL);
- /* initialize analog out channels */
- if (board->ao_ability == 1) {
- /* select 'update' dac channel for baseAddress + 0x0 */
- outb(DAC(board->ao_n_chan - 1),
- dev->iobase + DAS1800_SELECT);
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
- }
-
return 0;
};
@@ -1458,5 +1369,5 @@ static struct comedi_driver das1800_driver = {
module_comedi_driver(das1800_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for DAS1800 compatible ISA boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 40bf00984..d5295bbdd 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -69,49 +69,61 @@
* Register map
*/
#define DT2821_ADCSR_REG 0x00
-#define DT2821_ADCSR_ADERR (1 << 15)
-#define DT2821_ADCSR_ADCLK (1 << 9)
-#define DT2821_ADCSR_MUXBUSY (1 << 8)
-#define DT2821_ADCSR_ADDONE (1 << 7)
-#define DT2821_ADCSR_IADDONE (1 << 6)
+#define DT2821_ADCSR_ADERR BIT(15)
+#define DT2821_ADCSR_ADCLK BIT(9)
+#define DT2821_ADCSR_MUXBUSY BIT(8)
+#define DT2821_ADCSR_ADDONE BIT(7)
+#define DT2821_ADCSR_IADDONE BIT(6)
#define DT2821_ADCSR_GS(x) (((x) & 0x3) << 4)
#define DT2821_ADCSR_CHAN(x) (((x) & 0xf) << 0)
#define DT2821_CHANCSR_REG 0x02
-#define DT2821_CHANCSR_LLE (1 << 15)
-#define DT2821_CHANCSR_PRESLA(x) (((x) & 0xf) >> 8)
+#define DT2821_CHANCSR_LLE BIT(15)
+#define DT2821_CHANCSR_TO_PRESLA(x) (((x) >> 8) & 0xf)
#define DT2821_CHANCSR_NUMB(x) ((((x) - 1) & 0xf) << 0)
#define DT2821_ADDAT_REG 0x04
#define DT2821_DACSR_REG 0x06
-#define DT2821_DACSR_DAERR (1 << 15)
+#define DT2821_DACSR_DAERR BIT(15)
#define DT2821_DACSR_YSEL(x) ((x) << 9)
-#define DT2821_DACSR_SSEL (1 << 8)
-#define DT2821_DACSR_DACRDY (1 << 7)
-#define DT2821_DACSR_IDARDY (1 << 6)
-#define DT2821_DACSR_DACLK (1 << 5)
-#define DT2821_DACSR_HBOE (1 << 1)
-#define DT2821_DACSR_LBOE (1 << 0)
+#define DT2821_DACSR_SSEL BIT(8)
+#define DT2821_DACSR_DACRDY BIT(7)
+#define DT2821_DACSR_IDARDY BIT(6)
+#define DT2821_DACSR_DACLK BIT(5)
+#define DT2821_DACSR_HBOE BIT(1)
+#define DT2821_DACSR_LBOE BIT(0)
#define DT2821_DADAT_REG 0x08
#define DT2821_DIODAT_REG 0x0a
#define DT2821_SUPCSR_REG 0x0c
-#define DT2821_SUPCSR_DMAD (1 << 15)
-#define DT2821_SUPCSR_ERRINTEN (1 << 14)
-#define DT2821_SUPCSR_CLRDMADNE (1 << 13)
-#define DT2821_SUPCSR_DDMA (1 << 12)
-#define DT2821_SUPCSR_DS_PIO (0 << 10)
-#define DT2821_SUPCSR_DS_AD_CLK (1 << 10)
-#define DT2821_SUPCSR_DS_DA_CLK (2 << 10)
-#define DT2821_SUPCSR_DS_AD_TRIG (3 << 10)
-#define DT2821_SUPCSR_BUFFB (1 << 9)
-#define DT2821_SUPCSR_SCDN (1 << 8)
-#define DT2821_SUPCSR_DACON (1 << 7)
-#define DT2821_SUPCSR_ADCINIT (1 << 6)
-#define DT2821_SUPCSR_DACINIT (1 << 5)
-#define DT2821_SUPCSR_PRLD (1 << 4)
-#define DT2821_SUPCSR_STRIG (1 << 3)
-#define DT2821_SUPCSR_XTRIG (1 << 2)
-#define DT2821_SUPCSR_XCLK (1 << 1)
-#define DT2821_SUPCSR_BDINIT (1 << 0)
+#define DT2821_SUPCSR_DMAD BIT(15)
+#define DT2821_SUPCSR_ERRINTEN BIT(14)
+#define DT2821_SUPCSR_CLRDMADNE BIT(13)
+#define DT2821_SUPCSR_DDMA BIT(12)
+#define DT2821_SUPCSR_DS(x) (((x) & 0x3) << 10)
+#define DT2821_SUPCSR_DS_PIO DT2821_SUPCSR_DS(0)
+#define DT2821_SUPCSR_DS_AD_CLK DT2821_SUPCSR_DS(1)
+#define DT2821_SUPCSR_DS_DA_CLK DT2821_SUPCSR_DS(2)
+#define DT2821_SUPCSR_DS_AD_TRIG DT2821_SUPCSR_DS(3)
+#define DT2821_SUPCSR_BUFFB BIT(9)
+#define DT2821_SUPCSR_SCDN BIT(8)
+#define DT2821_SUPCSR_DACON BIT(7)
+#define DT2821_SUPCSR_ADCINIT BIT(6)
+#define DT2821_SUPCSR_DACINIT BIT(5)
+#define DT2821_SUPCSR_PRLD BIT(4)
+#define DT2821_SUPCSR_STRIG BIT(3)
+#define DT2821_SUPCSR_XTRIG BIT(2)
+#define DT2821_SUPCSR_XCLK BIT(1)
+#define DT2821_SUPCSR_BDINIT BIT(0)
#define DT2821_TMRCTR_REG 0x0e
+#define DT2821_TMRCTR_PRESCALE(x) (((x) & 0xf) << 8)
+#define DT2821_TMRCTR_DIVIDER(x) ((255 - ((x) & 0xff)) << 0)
+
+/* Pacer Clock */
+#define DT2821_OSC_BASE 250 /* 4 MHz (in nanoseconds) */
+#define DT2821_PRESCALE(x) BIT(x)
+#define DT2821_PRESCALE_MAX 15
+#define DT2821_DIVIDER_MAX 255
+#define DT2821_OSC_MAX (DT2821_OSC_BASE * \
+ DT2821_PRESCALE(DT2821_PRESCALE_MAX) * \
+ DT2821_DIVIDER_MAX)
static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
4, {
@@ -364,10 +376,10 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
{
unsigned int prescale, base, divider;
- for (prescale = 0; prescale < 16; prescale++) {
- if (prescale == 1)
+ for (prescale = 0; prescale <= DT2821_PRESCALE_MAX; prescale++) {
+ if (prescale == 1) /* 0 and 1 are both divide by 1 */
continue;
- base = 250 * (1 << prescale);
+ base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
@@ -380,15 +392,17 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
divider = DIV_ROUND_UP(*ns, base);
break;
}
- if (divider < 256) {
- *ns = divider * base;
- return (prescale << 8) | (255 - divider);
- }
+ if (divider <= DT2821_DIVIDER_MAX)
+ break;
+ }
+ if (divider > DT2821_DIVIDER_MAX) {
+ prescale = DT2821_PRESCALE_MAX;
+ divider = DT2821_DIVIDER_MAX;
+ base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
}
- base = 250 * (1 << 15);
- divider = 255;
*ns = divider * base;
- return (15 << 8) | (255 - divider);
+ return DT2821_TMRCTR_PRESCALE(prescale) |
+ DT2821_TMRCTR_DIVIDER(divider);
}
static void dt282x_munge(struct comedi_device *dev,
@@ -683,13 +697,8 @@ static int dt282x_ai_cmdtest(struct comedi_device *dev,
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 4000);
-
-#define SLOWEST_TIMER (250*(1<<15)*255)
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER);
+ err |= comedi_check_trigger_arg_max(&cmd->convert_arg, DT2821_OSC_MAX);
err |= comedi_check_trigger_arg_min(&cmd->convert_arg, board->ai_speed);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
@@ -1084,20 +1093,6 @@ static int dt282x_initialize(struct comedi_device *dev)
return 0;
}
-/*
- options:
- 0 i/o base
- 1 irq
- 2 dma1
- 3 dma2
- 4 0=single ended, 1=differential
- 5 ai 0=straight binary, 1=2's comp
- 6 ao0 0=straight binary, 1=2's comp
- 7 ao1 0=straight binary, 1=2's comp
- 8 ai 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V
- 9 ao0 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- 10 ao1 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- */
static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct dt282x_board *board = dev->board_ptr;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 8f24702c3..b1c086013 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -46,355 +46,451 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/log2.h>
#include "../comedi_pci.h"
#include "mite.h"
-#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
+/*
+ * Mite registers
+ */
+#define MITE_UNKNOWN_DMA_BURST_REG 0x28
+#define UNKNOWN_DMA_BURST_ENABLE_BITS 0x600
+
+#define MITE_PCI_CONFIG_OFFSET 0x300
+#define MITE_CSIGR 0x460 /* chip signature */
+#define CSIGR_TO_IOWINS(x) (((x) >> 29) & 0x7)
+#define CSIGR_TO_WINS(x) (((x) >> 24) & 0x1f)
+#define CSIGR_TO_WPDEP(x) (((x) >> 20) & 0x7)
+#define CSIGR_TO_DMAC(x) (((x) >> 16) & 0xf)
+#define CSIGR_TO_IMODE(x) (((x) >> 12) & 0x3) /* pci=0x3 */
+#define CSIGR_TO_MMODE(x) (((x) >> 8) & 0x3) /* minimite=1 */
+#define CSIGR_TO_TYPE(x) (((x) >> 4) & 0xf) /* mite=0, minimite=1 */
+#define CSIGR_TO_VER(x) (((x) >> 0) & 0xf)
+
+#define MITE_CHAN(x) (0x500 + 0x100 * (x))
+#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
+#define CHOR_DMARESET BIT(31)
+#define CHOR_SET_SEND_TC BIT(11)
+#define CHOR_CLR_SEND_TC BIT(10)
+#define CHOR_SET_LPAUSE BIT(9)
+#define CHOR_CLR_LPAUSE BIT(8)
+#define CHOR_CLRDONE BIT(7)
+#define CHOR_CLRRB BIT(6)
+#define CHOR_CLRLC BIT(5)
+#define CHOR_FRESET BIT(4)
+#define CHOR_ABORT BIT(3) /* stop without emptying fifo */
+#define CHOR_STOP BIT(2) /* stop after emptying fifo */
+#define CHOR_CONT BIT(1)
+#define CHOR_START BIT(0)
+#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
+#define CHCR_SET_DMA_IE BIT(31)
+#define CHCR_CLR_DMA_IE BIT(30)
+#define CHCR_SET_LINKP_IE BIT(29)
+#define CHCR_CLR_LINKP_IE BIT(28)
+#define CHCR_SET_SAR_IE BIT(27)
+#define CHCR_CLR_SAR_IE BIT(26)
+#define CHCR_SET_DONE_IE BIT(25)
+#define CHCR_CLR_DONE_IE BIT(24)
+#define CHCR_SET_MRDY_IE BIT(23)
+#define CHCR_CLR_MRDY_IE BIT(22)
+#define CHCR_SET_DRDY_IE BIT(21)
+#define CHCR_CLR_DRDY_IE BIT(20)
+#define CHCR_SET_LC_IE BIT(19)
+#define CHCR_CLR_LC_IE BIT(18)
+#define CHCR_SET_CONT_RB_IE BIT(17)
+#define CHCR_CLR_CONT_RB_IE BIT(16)
+#define CHCR_FIFO(x) (((x) & 0x1) << 15)
+#define CHCR_FIFODIS CHCR_FIFO(1)
+#define CHCR_FIFO_ON CHCR_FIFO(0)
+#define CHCR_BURST(x) (((x) & 0x1) << 14)
+#define CHCR_BURSTEN CHCR_BURST(1)
+#define CHCR_NO_BURSTEN CHCR_BURST(0)
+#define CHCR_BYTE_SWAP_DEVICE BIT(6)
+#define CHCR_BYTE_SWAP_MEMORY BIT(4)
+#define CHCR_DIR(x) (((x) & 0x1) << 3)
+#define CHCR_DEV_TO_MEM CHCR_DIR(1)
+#define CHCR_MEM_TO_DEV CHCR_DIR(0)
+#define CHCR_MODE(x) (((x) & 0x7) << 0)
+#define CHCR_NORMAL CHCR_MODE(0)
+#define CHCR_CONTINUE CHCR_MODE(1)
+#define CHCR_RINGBUFF CHCR_MODE(2)
+#define CHCR_LINKSHORT CHCR_MODE(4)
+#define CHCR_LINKLONG CHCR_MODE(5)
+#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
+#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory config */
+#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
+#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device config */
+#define DCR_NORMAL BIT(29)
+#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
+#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link config */
+#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
+#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
+#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
+#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
+#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
+#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
+#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
+#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
+#define CHSR_INT BIT(31)
+#define CHSR_LPAUSES BIT(29)
+#define CHSR_SARS BIT(27)
+#define CHSR_DONE BIT(25)
+#define CHSR_MRDY BIT(23)
+#define CHSR_DRDY BIT(21)
+#define CHSR_LINKC BIT(19)
+#define CHSR_CONTS_RB BIT(17)
+#define CHSR_ERROR BIT(15)
+#define CHSR_SABORT BIT(14)
+#define CHSR_HABORT BIT(13)
+#define CHSR_STOPS BIT(12)
+#define CHSR_OPERR(x) (((x) & 0x3) << 10)
+#define CHSR_OPERR_MASK CHSR_OPERR(3)
+#define CHSR_OPERR_NOERROR CHSR_OPERR(0)
+#define CHSR_OPERR_FIFOERROR CHSR_OPERR(1)
+#define CHSR_OPERR_LINKERROR CHSR_OPERR(1) /* ??? */
+#define CHSR_XFERR BIT(9)
+#define CHSR_END BIT(8)
+#define CHSR_DRQ1 BIT(7)
+#define CHSR_DRQ0 BIT(6)
+#define CHSR_LERR(x) (((x) & 0x3) << 4)
+#define CHSR_LERR_MASK CHSR_LERR(3)
+#define CHSR_LBERR CHSR_LERR(1)
+#define CHSR_LRERR CHSR_LERR(2)
+#define CHSR_LOERR CHSR_LERR(3)
+#define CHSR_MERR(x) (((x) & 0x3) << 2)
+#define CHSR_MERR_MASK CHSR_MERR(3)
+#define CHSR_MBERR CHSR_MERR(1)
+#define CHSR_MRERR CHSR_MERR(2)
+#define CHSR_MOERR CHSR_MERR(3)
+#define CHSR_DERR(x) (((x) & 0x3) << 0)
+#define CHSR_DERR_MASK CHSR_DERR(3)
+#define CHSR_DBERR CHSR_DERR(1)
+#define CHSR_DRERR CHSR_DERR(2)
+#define CHSR_DOERR CHSR_DERR(3)
+#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
+
+/* common bits for the memory/device/link config registers */
+#define CR_RL(x) (((x) & 0x7) << 21)
+#define CR_REQS(x) (((x) & 0x7) << 16)
+#define CR_REQS_MASK CR_REQS(7)
+#define CR_ASEQ(x) (((x) & 0x3) << 10)
+#define CR_ASEQDONT CR_ASEQ(0)
+#define CR_ASEQUP CR_ASEQ(1)
+#define CR_ASEQDOWN CR_ASEQ(2)
+#define CR_ASEQ_MASK CR_ASEQ(3)
+#define CR_PSIZE(x) (((x) & 0x3) << 8)
+#define CR_PSIZE8 CR_PSIZE(1)
+#define CR_PSIZE16 CR_PSIZE(2)
+#define CR_PSIZE32 CR_PSIZE(3)
+#define CR_PORT(x) (((x) & 0x3) << 6)
+#define CR_PORTCPU CR_PORT(0)
+#define CR_PORTIO CR_PORT(1)
+#define CR_PORTVXI CR_PORT(2)
+#define CR_PORTMXI CR_PORT(3)
+#define CR_AMDEVICE BIT(0)
+
+static unsigned int MITE_IODWBSR_1_WSIZE_bits(unsigned int size)
+{
+ return (ilog2(size) - 1) & 0x1f;
+}
-struct mite_struct *mite_alloc(struct pci_dev *pcidev)
+static unsigned int mite_retry_limit(unsigned int retry_limit)
{
- struct mite_struct *mite;
- unsigned int i;
+ unsigned int value = 0;
- mite = kzalloc(sizeof(*mite), GFP_KERNEL);
- if (mite) {
- spin_lock_init(&mite->lock);
- mite->pcidev = pcidev;
- for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
- mite->channels[i].mite = mite;
- mite->channels[i].channel = i;
- mite->channels[i].done = 1;
- }
- }
- return mite;
+ if (retry_limit)
+ value = 1 + ilog2(retry_limit);
+ if (value > 0x7)
+ value = 0x7;
+ return CR_RL(value);
}
-EXPORT_SYMBOL_GPL(mite_alloc);
-static void dump_chip_signature(u32 csigr_bits)
+static unsigned int mite_drq_reqs(unsigned int drq_line)
{
- pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
- mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
- mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
- pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
- mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
- mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
+ /* This also works on m-series when using channels (drq_line) 4 or 5. */
+ return CR_REQS((drq_line & 0x3) | 0x4);
}
-static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
+static unsigned int mite_fifo_size(struct mite *mite, unsigned int channel)
{
- unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
- unsigned empty_count = (fcr_bits >> 16) & 0xff;
- unsigned full_count = fcr_bits & 0xff;
+ unsigned int fcr_bits = readl(mite->mmio + MITE_FCR(channel));
+ unsigned int empty_count = (fcr_bits >> 16) & 0xff;
+ unsigned int full_count = fcr_bits & 0xff;
return empty_count + full_count;
}
-int mite_setup2(struct comedi_device *dev,
- struct mite_struct *mite, bool use_win1)
+static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
{
- unsigned long length;
- int i;
- u32 csigr_bits;
- unsigned unknown_dma_burst_bits;
+ struct mite *mite = mite_chan->mite;
- pci_set_master(mite->pcidev);
+ return readl(mite->mmio + MITE_DAR(mite_chan->channel));
+}
- mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
- if (!mite->mite_io_addr) {
- dev_err(dev->class_dev,
- "Failed to remap mite io memory address\n");
- return -ENOMEM;
- }
- mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
+/**
+ * mite_bytes_in_transit() - Returns the number of unread bytes in the fifo.
+ * @mite_chan: MITE dma channel.
+ */
+u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
- dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
- if (!dev->mmio) {
- dev_err(dev->class_dev,
- "Failed to remap daq io memory address\n");
- return -ENOMEM;
- }
- mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
- length = pci_resource_len(mite->pcidev, 1);
+ return readl(mite->mmio + MITE_FCR(mite_chan->channel)) & 0xff;
+}
+EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
- if (use_win1) {
- writel(0, mite->mite_io_addr + MITE_IODWBSR);
- dev_info(dev->class_dev,
- "using I/O Window Base Size register 1\n");
- writel(mite->daq_phys_addr | WENAB |
- MITE_IODWBSR_1_WSIZE_bits(length),
- mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0, mite->mite_io_addr + MITE_IODWCR_1);
- } else {
- writel(mite->daq_phys_addr | WENAB,
- mite->mite_io_addr + MITE_IODWBSR);
- }
- /*
- * Make sure dma bursts work. I got this from running a bus analyzer
- * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
- * of 0x61f and bursts worked. 6281 powered up with register value of
- * 0x1f and bursts didn't work. The NI windows driver reads the
- * register, then does a bitwise-or of 0x600 with it and writes it back.
- */
- unknown_dma_burst_bits =
- readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
- unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
- writel(unknown_dma_burst_bits,
- mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+/* returns lower bound for number of bytes transferred from device to memory */
+static u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
+{
+ u32 device_byte_count;
- csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
- mite->num_channels = mite_csigr_dmac(csigr_bits);
- if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
- dev_warn(dev->class_dev,
- "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
- mite->num_channels, MAX_MITE_DMA_CHANNELS);
- mite->num_channels = MAX_MITE_DMA_CHANNELS;
- }
- dump_chip_signature(csigr_bits);
- for (i = 0; i < mite->num_channels; i++) {
- writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
- /* disable interrupts */
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(i));
- }
- mite->fifo_size = mite_fifo_size(mite, 0);
- dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
- return 0;
+ device_byte_count = mite_device_bytes_transferred(mite_chan);
+ return device_byte_count - mite_bytes_in_transit(mite_chan);
}
-EXPORT_SYMBOL_GPL(mite_setup2);
-void mite_detach(struct mite_struct *mite)
+/* returns upper bound for number of bytes transferred from device to memory */
+static u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
{
- if (!mite)
- return;
-
- if (mite->mite_io_addr)
- iounmap(mite->mite_io_addr);
+ u32 in_transit_count;
- kfree(mite);
+ in_transit_count = mite_bytes_in_transit(mite_chan);
+ return mite_device_bytes_transferred(mite_chan) - in_transit_count;
}
-EXPORT_SYMBOL_GPL(mite_detach);
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
+/* returns lower bound for number of bytes read from memory to device */
+static u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
{
- struct mite_dma_descriptor_ring *ring =
- kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
+ u32 device_byte_count;
- if (!ring)
- return NULL;
- ring->hw_dev = get_device(&mite->pcidev->dev);
- if (!ring->hw_dev) {
- kfree(ring);
- return NULL;
- }
- ring->n_links = 0;
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- return ring;
-};
-EXPORT_SYMBOL_GPL(mite_alloc_ring);
+ device_byte_count = mite_device_bytes_transferred(mite_chan);
+ return device_byte_count + mite_bytes_in_transit(mite_chan);
+}
-void mite_free_ring(struct mite_dma_descriptor_ring *ring)
+/* returns upper bound for number of bytes read from memory to device */
+static u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
{
- if (ring) {
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
- }
- put_device(ring->hw_dev);
- kfree(ring);
- }
-};
-EXPORT_SYMBOL_GPL(mite_free_ring);
+ u32 in_transit_count;
+
+ in_transit_count = mite_bytes_in_transit(mite_chan);
+ return mite_device_bytes_transferred(mite_chan) + in_transit_count;
+}
-struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
- struct
- mite_dma_descriptor_ring
- *ring, unsigned min_channel,
- unsigned max_channel)
+static void mite_sync_input_dma(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s)
{
- int i;
- unsigned long flags;
- struct mite_channel *channel = NULL;
+ struct comedi_async *async = s->async;
+ int count;
+ unsigned int nbytes, old_alloc_count;
+
+ old_alloc_count = async->buf_write_alloc_count;
+ /* write alloc as much as we can */
+ comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ nbytes = mite_bytes_written_to_memory_lb(mite_chan);
+ if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
+ old_alloc_count) > 0) {
+ dev_warn(s->device->class_dev,
+ "mite: DMA overwrite of free area\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
+ }
+
+ count = nbytes - async->buf_write_count;
/*
- * spin lock so mite_release_channel can be called safely
- * from interrupts
+ * it's possible count will be negative due to conservative value
+ * returned by mite_bytes_written_to_memory_lb
*/
- spin_lock_irqsave(&mite->lock, flags);
- for (i = min_channel; i <= max_channel; ++i) {
- if (mite->channel_allocated[i] == 0) {
- mite->channel_allocated[i] = 1;
- channel = &mite->channels[i];
- channel->ring = ring;
- break;
- }
+ if (count > 0) {
+ comedi_buf_write_free(s, count);
+ comedi_inc_scan_progress(s, count);
+ async->events |= COMEDI_CB_BLOCK;
}
- spin_unlock_irqrestore(&mite->lock, flags);
- return channel;
}
-EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
-void mite_release_channel(struct mite_channel *mite_chan)
+static void mite_sync_output_dma(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
+ unsigned int old_alloc_count = async->buf_read_alloc_count;
+ u32 nbytes_ub, nbytes_lb;
+ int count;
+ bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
- /* spin lock to prevent races with mite_request_channel */
- spin_lock_irqsave(&mite->lock, flags);
- if (mite->channel_allocated[mite_chan->channel]) {
- mite_dma_disarm(mite_chan);
- mite_dma_reset(mite_chan);
+ /* read alloc as much as we can */
+ comedi_buf_read_alloc(s, async->prealloc_bufsz);
+ nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
+ if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
+ nbytes_lb = stop_count;
+ nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
+ if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
+ nbytes_ub = stop_count;
+
+ if ((!finite_regen || stop_count > old_alloc_count) &&
+ ((int)(nbytes_ub - old_alloc_count) > 0)) {
+ dev_warn(s->device->class_dev, "mite: DMA underrun\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
+ }
+
+ if (finite_regen) {
/*
- * disable all channel's interrupts (do it after disarm/reset so
- * MITE_CHCR reg isn't changed while dma is still active!)
+ * This is a special case where we continuously output a finite
+ * buffer. In this case, we do not free any of the memory,
+ * hence we expect that old_alloc_count will reach a maximum of
+ * stop_count bytes.
*/
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
- CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
- CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
- mite->channel_allocated[mite_chan->channel] = 0;
- mite_chan->ring = NULL;
- mmiowb();
+ return;
+ }
+
+ count = nbytes_lb - async->buf_read_count;
+ if (count > 0) {
+ comedi_buf_read_free(s, count);
+ async->events |= COMEDI_CB_BLOCK;
}
- spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_release_channel);
-void mite_dma_arm(struct mite_channel *mite_chan)
+/**
+ * mite_sync_dma() - Sync the MITE dma with the COMEDI async buffer.
+ * @mite_chan: MITE dma channel.
+ * @s: COMEDI subdevice.
+ */
+void mite_sync_dma(struct mite_channel *mite_chan, struct comedi_subdevice *s)
+{
+ if (mite_chan->dir == COMEDI_INPUT)
+ mite_sync_input_dma(mite_chan, s);
+ else
+ mite_sync_output_dma(mite_chan, s);
+}
+EXPORT_SYMBOL_GPL(mite_sync_dma);
+
+static unsigned int mite_get_status(struct mite_channel *mite_chan)
{
- struct mite_struct *mite = mite_chan->mite;
- int chor;
+ struct mite *mite = mite_chan->mite;
+ unsigned int status;
unsigned long flags;
- /*
- * memory barrier is intended to insure any twiddling with the buffer
- * is done before writing to the mite to arm dma transfer
- */
- smp_mb();
- /* arm */
- chor = CHOR_START;
spin_lock_irqsave(&mite->lock, flags);
- mite_chan->done = 0;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ status = readl(mite->mmio + MITE_CHSR(mite_chan->channel));
+ if (status & CHSR_DONE) {
+ mite_chan->done = 1;
+ writel(CHOR_CLRDONE,
+ mite->mmio + MITE_CHOR(mite_chan->channel));
+ }
mmiowb();
spin_unlock_irqrestore(&mite->lock, flags);
- /* mite_dma_tcr(mite, channel); */
+ return status;
}
-EXPORT_SYMBOL_GPL(mite_dma_arm);
-/**************************************/
-
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s)
+/**
+ * mite_ack_linkc() - Check and ack the LINKC interrupt,
+ * @mite_chan: MITE dma channel.
+ * @s: COMEDI subdevice.
+ * @sync: flag to force a mite_sync_dma().
+ *
+ * This will also ack the DONE interrupt if active.
+ */
+void mite_ack_linkc(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s,
+ bool sync)
{
- struct comedi_async *async = s->async;
- unsigned int n_links;
+ struct mite *mite = mite_chan->mite;
+ unsigned int status;
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
+ status = mite_get_status(mite_chan);
+ if (status & CHSR_LINKC) {
+ writel(CHOR_CLRLC, mite->mmio + MITE_CHOR(mite_chan->channel));
+ sync = true;
}
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- ring->n_links = 0;
+ if (sync)
+ mite_sync_dma(mite_chan, s);
- if (async->prealloc_bufsz == 0)
- return 0;
-
- n_links = async->prealloc_bufsz >> PAGE_SHIFT;
-
- ring->descriptors =
- dma_alloc_coherent(ring->hw_dev,
- n_links * sizeof(struct mite_dma_descriptor),
- &ring->descriptors_dma_addr, GFP_KERNEL);
- if (!ring->descriptors) {
+ if (status & CHSR_XFERR) {
dev_err(s->device->class_dev,
- "mite: ring buffer allocation failed\n");
- return -ENOMEM;
+ "mite: transfer error %08x\n", status);
+ s->async->events |= COMEDI_CB_ERROR;
}
- ring->n_links = n_links;
-
- return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
}
-EXPORT_SYMBOL_GPL(mite_buf_change);
+EXPORT_SYMBOL_GPL(mite_ack_linkc);
-/*
- * initializes the ring buffer descriptors to provide correct DMA transfer links
- * to the exact amount of memory required. When the ring buffer is allocated in
- * mite_buf_change, the default is to initialize the ring to refer to the entire
- * DMA data buffer. A command may call this function later to re-initialize and
- * shorten the amount of memory that will be transferred.
+/**
+ * mite_done() - Check is a MITE dma transfer is complete.
+ * @mite_chan: MITE dma channel.
+ *
+ * This will also ack the DONE interrupt if active.
*/
-int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s,
- unsigned int nbytes)
+int mite_done(struct mite_channel *mite_chan)
{
- struct comedi_async *async = s->async;
- unsigned int n_full_links = nbytes >> PAGE_SHIFT;
- unsigned int remainder = nbytes % PAGE_SIZE;
- int i;
-
- dev_dbg(s->device->class_dev,
- "mite: init ring buffer to %u bytes\n", nbytes);
-
- if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
- dev_err(s->device->class_dev,
- "mite: ring buffer too small for requested init\n");
- return -ENOMEM;
- }
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
+ int done;
- /* We set the descriptors for all full links. */
- for (i = 0; i < n_full_links; ++i) {
- ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
- ring->descriptors[i].addr =
- cpu_to_le32(async->buf_map->page_list[i].dma_addr);
- ring->descriptors[i].next =
- cpu_to_le32(ring->descriptors_dma_addr +
- (i + 1) * sizeof(struct mite_dma_descriptor));
- }
+ mite_get_status(mite_chan);
+ spin_lock_irqsave(&mite->lock, flags);
+ done = mite_chan->done;
+ spin_unlock_irqrestore(&mite->lock, flags);
+ return done;
+}
+EXPORT_SYMBOL_GPL(mite_done);
- /* the last link is either a remainder or was a full link. */
- if (remainder > 0) {
- /* set the lesser count for the remainder link */
- ring->descriptors[i].count = cpu_to_le32(remainder);
- ring->descriptors[i].addr =
- cpu_to_le32(async->buf_map->page_list[i].dma_addr);
- /* increment i so that assignment below refs last link */
- ++i;
- }
+static void mite_dma_reset(struct mite_channel *mite_chan)
+{
+ writel(CHOR_DMARESET | CHOR_FRESET,
+ mite_chan->mite->mmio + MITE_CHOR(mite_chan->channel));
+}
- /* Assign the last link->next to point back to the head of the list. */
- ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr);
+/**
+ * mite_dma_arm() - Start a MITE dma transfer.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_dma_arm(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
/*
- * barrier is meant to insure that all the writes to the dma descriptors
- * have completed before the dma controller is commanded to read them
+ * memory barrier is intended to insure any twiddling with the buffer
+ * is done before writing to the mite to arm dma transfer
*/
- smp_wmb();
- return 0;
+ smp_mb();
+ spin_lock_irqsave(&mite->lock, flags);
+ mite_chan->done = 0;
+ /* arm */
+ writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
+ mmiowb();
+ spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
+EXPORT_SYMBOL_GPL(mite_dma_arm);
+
+/**
+ * mite_dma_disarm() - Stop a MITE dma transfer.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_dma_disarm(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
+
+ /* disarm */
+ writel(CHOR_ABORT, mite->mmio + MITE_CHOR(mite_chan->channel));
+}
+EXPORT_SYMBOL_GPL(mite_dma_disarm);
+/**
+ * mite_prep_dma() - Prepare a MITE dma channel for transfers.
+ * @mite_chan: MITE dma channel.
+ * @num_device_bits: device transfer size (8, 16, or 32-bits).
+ * @num_memory_bits: memory transfer size (8, 16, or 32-bits).
+ */
void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int num_device_bits, unsigned int num_memory_bits)
{
- unsigned int chor, chcr, mcr, dcr, lkcr;
- struct mite_struct *mite = mite_chan->mite;
+ struct mite *mite = mite_chan->mite;
+ unsigned int chcr, mcr, dcr, lkcr;
- /* reset DMA and FIFO */
- chor = CHOR_DMARESET | CHOR_FRESET;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ mite_dma_reset(mite_chan);
/* short link chaining mode */
chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
@@ -421,10 +517,10 @@ void mite_prep_dma(struct mite_channel *mite_chan,
if (mite_chan->dir == COMEDI_INPUT)
chcr |= CHCR_DEV_TO_MEM;
- writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+ writel(chcr, mite->mmio + MITE_CHCR(mite_chan->channel));
/* to/from memory */
- mcr = CR_RL(64) | CR_ASEQUP;
+ mcr = mite_retry_limit(64) | CR_ASEQUP;
switch (num_memory_bits) {
case 8:
mcr |= CR_PSIZE8;
@@ -439,11 +535,11 @@ void mite_prep_dma(struct mite_channel *mite_chan,
pr_warn("bug! invalid mem bit width for dma transfer\n");
break;
}
- writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
+ writel(mcr, mite->mmio + MITE_MCR(mite_chan->channel));
/* from/to device */
- dcr = CR_RL(64) | CR_ASEQUP;
- dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
+ dcr = mite_retry_limit(64) | CR_ASEQUP;
+ dcr |= CR_PORTIO | CR_AMDEVICE | mite_drq_reqs(mite_chan->channel);
switch (num_device_bits) {
case 8:
dcr |= CR_PSIZE8;
@@ -458,223 +554,402 @@ void mite_prep_dma(struct mite_channel *mite_chan,
pr_warn("bug! invalid dev bit width for dma transfer\n");
break;
}
- writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
+ writel(dcr, mite->mmio + MITE_DCR(mite_chan->channel));
/* reset the DAR */
- writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+ writel(0, mite->mmio + MITE_DAR(mite_chan->channel));
/* the link is 32bits */
- lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
- writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
+ lkcr = mite_retry_limit(64) | CR_ASEQUP | CR_PSIZE32;
+ writel(lkcr, mite->mmio + MITE_LKCR(mite_chan->channel));
/* starting address for link chaining */
- writel(mite_chan->ring->descriptors_dma_addr,
- mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
+ writel(mite_chan->ring->dma_addr,
+ mite->mmio + MITE_LKAR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_prep_dma);
-static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
+static struct mite_channel *__mite_request_channel(struct mite *mite,
+ struct mite_ring *ring,
+ unsigned int min_channel,
+ unsigned int max_channel)
{
- struct mite_struct *mite = mite_chan->mite;
+ struct mite_channel *mite_chan = NULL;
+ unsigned long flags;
+ int i;
- return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+ /*
+ * spin lock so mite_release_channel can be called safely
+ * from interrupts
+ */
+ spin_lock_irqsave(&mite->lock, flags);
+ for (i = min_channel; i <= max_channel; ++i) {
+ mite_chan = &mite->channels[i];
+ if (!mite_chan->ring) {
+ mite_chan->ring = ring;
+ break;
+ }
+ mite_chan = NULL;
+ }
+ spin_unlock_irqrestore(&mite->lock, flags);
+ return mite_chan;
}
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
+/**
+ * mite_request_channel_in_range() - Request a MITE dma channel.
+ * @mite: MITE device.
+ * @ring: MITE dma ring.
+ * @min_channel: minimum channel index to use.
+ * @max_channel: maximum channel index to use.
+ */
+struct mite_channel *mite_request_channel_in_range(struct mite *mite,
+ struct mite_ring *ring,
+ unsigned int min_channel,
+ unsigned int max_channel)
{
- struct mite_struct *mite = mite_chan->mite;
-
- return readl(mite->mite_io_addr +
- MITE_FCR(mite_chan->channel)) & 0x000000FF;
+ return __mite_request_channel(mite, ring, min_channel, max_channel);
}
-EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
+EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
-/* returns lower bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
+/**
+ * mite_request_channel() - Request a MITE dma channel.
+ * @mite: MITE device.
+ * @ring: MITE dma ring.
+ */
+struct mite_channel *mite_request_channel(struct mite *mite,
+ struct mite_ring *ring)
{
- u32 device_byte_count;
-
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count - mite_bytes_in_transit(mite_chan);
+ return __mite_request_channel(mite, ring, 0, mite->num_channels - 1);
}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
+EXPORT_SYMBOL_GPL(mite_request_channel);
-/* returns upper bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
+/**
+ * mite_release_channel() - Release a MITE dma channel.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_release_channel(struct mite_channel *mite_chan)
{
- u32 in_transit_count;
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) - in_transit_count;
+ /* spin lock to prevent races with mite_request_channel */
+ spin_lock_irqsave(&mite->lock, flags);
+ if (mite_chan->ring) {
+ mite_dma_disarm(mite_chan);
+ mite_dma_reset(mite_chan);
+ /*
+ * disable all channel's interrupts (do it after disarm/reset so
+ * MITE_CHCR reg isn't changed while dma is still active!)
+ */
+ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
+ CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
+ CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+ CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+ mite->mmio + MITE_CHCR(mite_chan->channel));
+ mite_chan->ring = NULL;
+ mmiowb();
+ }
+ spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
+EXPORT_SYMBOL_GPL(mite_release_channel);
-/* returns lower bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
+/**
+ * mite_init_ring_descriptors() - Initialize a MITE dma ring descriptors.
+ * @ring: MITE dma ring.
+ * @s: COMEDI subdevice.
+ * @nbytes: the size of the dma ring (in bytes).
+ *
+ * Initializes the ring buffer descriptors to provide correct DMA transfer
+ * links to the exact amount of memory required. When the ring buffer is
+ * allocated by mite_buf_change(), the default is to initialize the ring
+ * to refer to the entire DMA data buffer. A command may call this function
+ * later to re-initialize and shorten the amount of memory that will be
+ * transferred.
+ */
+int mite_init_ring_descriptors(struct mite_ring *ring,
+ struct comedi_subdevice *s,
+ unsigned int nbytes)
{
- u32 device_byte_count;
+ struct comedi_async *async = s->async;
+ struct mite_dma_desc *desc = NULL;
+ unsigned int n_full_links = nbytes >> PAGE_SHIFT;
+ unsigned int remainder = nbytes % PAGE_SIZE;
+ int i;
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count + mite_bytes_in_transit(mite_chan);
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
+ dev_dbg(s->device->class_dev,
+ "mite: init ring buffer to %u bytes\n", nbytes);
-/* returns upper bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
-{
- u32 in_transit_count;
+ if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
+ dev_err(s->device->class_dev,
+ "mite: ring buffer too small for requested init\n");
+ return -ENOMEM;
+ }
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) + in_transit_count;
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
+ /* We set the descriptors for all full links. */
+ for (i = 0; i < n_full_links; ++i) {
+ desc = &ring->descs[i];
+ desc->count = cpu_to_le32(PAGE_SIZE);
+ desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+ desc->next = cpu_to_le32(ring->dma_addr +
+ (i + 1) * sizeof(*desc));
+ }
-unsigned mite_dma_tcr(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
+ /* the last link is either a remainder or was a full link. */
+ if (remainder > 0) {
+ desc = &ring->descs[i];
+ /* set the lesser count for the remainder link */
+ desc->count = cpu_to_le32(remainder);
+ desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+ }
+
+ /* Assign the last link->next to point back to the head of the list. */
+ desc->next = cpu_to_le32(ring->dma_addr);
- return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
+ /*
+ * barrier is meant to insure that all the writes to the dma descriptors
+ * have completed before the dma controller is commanded to read them
+ */
+ smp_wmb();
+ return 0;
}
-EXPORT_SYMBOL_GPL(mite_dma_tcr);
+EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
-void mite_dma_disarm(struct mite_channel *mite_chan)
+static void mite_free_dma_descs(struct mite_ring *ring)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned chor;
+ struct mite_dma_desc *descs = ring->descs;
- /* disarm */
- chor = CHOR_ABORT;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ if (descs) {
+ dma_free_coherent(ring->hw_dev,
+ ring->n_links * sizeof(*descs),
+ descs, ring->dma_addr);
+ ring->descs = NULL;
+ ring->dma_addr = 0;
+ ring->n_links = 0;
+ }
}
-EXPORT_SYMBOL_GPL(mite_dma_disarm);
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
+/**
+ * mite_buf_change() - COMEDI subdevice (*buf_change) for a MITE dma ring.
+ * @ring: MITE dma ring.
+ * @s: COMEDI subdevice.
+ */
+int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
- int count;
- unsigned int nbytes, old_alloc_count;
+ struct mite_dma_desc *descs;
+ unsigned int n_links;
- old_alloc_count = async->buf_write_alloc_count;
- /* write alloc as much as we can */
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ mite_free_dma_descs(ring);
- nbytes = mite_bytes_written_to_memory_lb(mite_chan);
- if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
- old_alloc_count) > 0) {
- dev_warn(s->device->class_dev,
- "mite: DMA overwrite of free area\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
+ if (async->prealloc_bufsz == 0)
+ return 0;
+
+ n_links = async->prealloc_bufsz >> PAGE_SHIFT;
+
+ descs = dma_alloc_coherent(ring->hw_dev,
+ n_links * sizeof(*descs),
+ &ring->dma_addr, GFP_KERNEL);
+ if (!descs) {
+ dev_err(s->device->class_dev,
+ "mite: ring buffer allocation failed\n");
+ return -ENOMEM;
}
+ ring->descs = descs;
+ ring->n_links = n_links;
- count = nbytes - async->buf_write_count;
- /*
- * it's possible count will be negative due to conservative value
- * returned by mite_bytes_written_to_memory_lb
- */
- if (count <= 0)
- return 0;
+ return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(mite_buf_change);
- comedi_buf_write_free(s, count);
- comedi_inc_scan_progress(s, count);
- async->events |= COMEDI_CB_BLOCK;
- return 0;
+/**
+ * mite_alloc_ring() - Allocate a MITE dma ring.
+ * @mite: MITE device.
+ */
+struct mite_ring *mite_alloc_ring(struct mite *mite)
+{
+ struct mite_ring *ring;
+
+ ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+ ring->hw_dev = get_device(&mite->pcidev->dev);
+ if (!ring->hw_dev) {
+ kfree(ring);
+ return NULL;
+ }
+ ring->n_links = 0;
+ ring->descs = NULL;
+ ring->dma_addr = 0;
+ return ring;
}
-EXPORT_SYMBOL_GPL(mite_sync_input_dma);
+EXPORT_SYMBOL_GPL(mite_alloc_ring);
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
+/**
+ * mite_free_ring() - Free a MITE dma ring and its descriptors.
+ * @ring: MITE dma ring.
+ */
+void mite_free_ring(struct mite_ring *ring)
{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
- unsigned int old_alloc_count = async->buf_read_alloc_count;
- u32 nbytes_ub, nbytes_lb;
- int count;
- bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
+ if (ring) {
+ mite_free_dma_descs(ring);
+ put_device(ring->hw_dev);
+ kfree(ring);
+ }
+}
+EXPORT_SYMBOL_GPL(mite_free_ring);
- /* read alloc as much as we can */
- comedi_buf_read_alloc(s, async->prealloc_bufsz);
- nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
- nbytes_lb = stop_count;
- nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
- nbytes_ub = stop_count;
+static int mite_setup(struct comedi_device *dev, struct mite *mite,
+ bool use_win1)
+{
+ resource_size_t daq_phys_addr;
+ unsigned long length;
+ int i;
+ u32 csigr_bits;
+ unsigned int unknown_dma_burst_bits;
+ unsigned int wpdep;
- if ((!finite_regen || stop_count > old_alloc_count) &&
- ((int)(nbytes_ub - old_alloc_count) > 0)) {
- dev_warn(s->device->class_dev, "mite: DMA underrun\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
+ pci_set_master(mite->pcidev);
+
+ mite->mmio = pci_ioremap_bar(mite->pcidev, 0);
+ if (!mite->mmio)
+ return -ENOMEM;
+
+ dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
+ if (!dev->mmio)
+ return -ENOMEM;
+ daq_phys_addr = pci_resource_start(mite->pcidev, 1);
+ length = pci_resource_len(mite->pcidev, 1);
+
+ if (use_win1) {
+ writel(0, mite->mmio + MITE_IODWBSR);
+ dev_dbg(dev->class_dev,
+ "mite: using I/O Window Base Size register 1\n");
+ writel(daq_phys_addr | WENAB |
+ MITE_IODWBSR_1_WSIZE_bits(length),
+ mite->mmio + MITE_IODWBSR_1);
+ writel(0, mite->mmio + MITE_IODWCR_1);
+ } else {
+ writel(daq_phys_addr | WENAB, mite->mmio + MITE_IODWBSR);
}
+ /*
+ * Make sure dma bursts work. I got this from running a bus analyzer
+ * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
+ * of 0x61f and bursts worked. 6281 powered up with register value of
+ * 0x1f and bursts didn't work. The NI windows driver reads the
+ * register, then does a bitwise-or of 0x600 with it and writes it back.
+ *
+ * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
+ * written and read back. The bits 0x1f always read as 1.
+ * The rest always read as zero.
+ */
+ unknown_dma_burst_bits = readl(mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
+ unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
+ writel(unknown_dma_burst_bits, mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
- if (finite_regen) {
- /*
- * This is a special case where we continuously output a finite
- * buffer. In this case, we do not free any of the memory,
- * hence we expect that old_alloc_count will reach a maximum of
- * stop_count bytes.
- */
- return 0;
+ csigr_bits = readl(mite->mmio + MITE_CSIGR);
+ mite->num_channels = CSIGR_TO_DMAC(csigr_bits);
+ if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
+ dev_warn(dev->class_dev,
+ "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
+ mite->num_channels, MAX_MITE_DMA_CHANNELS);
+ mite->num_channels = MAX_MITE_DMA_CHANNELS;
}
- count = nbytes_lb - async->buf_read_count;
- if (count <= 0)
- return 0;
+ /* get the wpdep bits and convert it to the write port fifo depth */
+ wpdep = CSIGR_TO_WPDEP(csigr_bits);
+ if (wpdep)
+ wpdep = BIT(wpdep);
- if (count) {
- comedi_buf_read_free(s, count);
- async->events |= COMEDI_CB_BLOCK;
+ dev_dbg(dev->class_dev,
+ "mite: version = %i, type = %i, mite mode = %i, interface mode = %i\n",
+ CSIGR_TO_VER(csigr_bits), CSIGR_TO_TYPE(csigr_bits),
+ CSIGR_TO_MMODE(csigr_bits), CSIGR_TO_IMODE(csigr_bits));
+ dev_dbg(dev->class_dev,
+ "mite: num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
+ CSIGR_TO_DMAC(csigr_bits), wpdep,
+ CSIGR_TO_WINS(csigr_bits), CSIGR_TO_IOWINS(csigr_bits));
+
+ for (i = 0; i < mite->num_channels; i++) {
+ writel(CHOR_DMARESET, mite->mmio + MITE_CHOR(i));
+ /* disable interrupts */
+ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+ CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+ CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+ mite->mmio + MITE_CHCR(i));
}
+ mite->fifo_size = mite_fifo_size(mite, 0);
+ dev_dbg(dev->class_dev, "mite: fifo size is %i.\n", mite->fifo_size);
return 0;
}
-EXPORT_SYMBOL_GPL(mite_sync_output_dma);
-unsigned mite_get_status(struct mite_channel *mite_chan)
+/**
+ * mite_attach() - Allocate and initialize a MITE device for a comedi driver.
+ * @dev: COMEDI device.
+ * @use_win1: flag to use I/O Window 1 instead of I/O Window 0.
+ *
+ * Called by a COMEDI drivers (*auto_attach).
+ *
+ * Returns a pointer to the MITE device on success, or NULL if the MITE cannot
+ * be allocated or remapped.
+ */
+struct mite *mite_attach(struct comedi_device *dev, bool use_win1)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned status;
- unsigned long flags;
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct mite *mite;
+ unsigned int i;
+ int ret;
- spin_lock_irqsave(&mite->lock, flags);
- status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
- if (status & CHSR_DONE) {
- mite_chan->done = 1;
- writel(CHOR_CLRDONE,
- mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ mite = kzalloc(sizeof(*mite), GFP_KERNEL);
+ if (!mite)
+ return NULL;
+
+ spin_lock_init(&mite->lock);
+ mite->pcidev = pcidev;
+ for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
+ mite->channels[i].mite = mite;
+ mite->channels[i].channel = i;
+ mite->channels[i].done = 1;
}
- mmiowb();
- spin_unlock_irqrestore(&mite->lock, flags);
- return status;
+
+ ret = mite_setup(dev, mite, use_win1);
+ if (ret) {
+ if (mite->mmio)
+ iounmap(mite->mmio);
+ kfree(mite);
+ return NULL;
+ }
+
+ return mite;
}
-EXPORT_SYMBOL_GPL(mite_get_status);
+EXPORT_SYMBOL_GPL(mite_attach);
-int mite_done(struct mite_channel *mite_chan)
+/**
+ * mite_detach() - Unmap and free a MITE device for a comedi driver.
+ * @mite: MITE device.
+ *
+ * Called by a COMEDI drivers (*detach).
+ */
+void mite_detach(struct mite *mite)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
- int done;
+ if (!mite)
+ return;
- mite_get_status(mite_chan);
- spin_lock_irqsave(&mite->lock, flags);
- done = mite_chan->done;
- spin_unlock_irqrestore(&mite->lock, flags);
- return done;
+ if (mite->mmio)
+ iounmap(mite->mmio);
+
+ kfree(mite);
}
-EXPORT_SYMBOL_GPL(mite_done);
+EXPORT_SYMBOL_GPL(mite_detach);
static int __init mite_module_init(void)
{
return 0;
}
+module_init(mite_module_init);
static void __exit mite_module_exit(void)
{
}
-
-module_init(mite_module_init);
module_exit(mite_module_exit);
MODULE_AUTHOR("Comedi http://www.comedi.org");
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 87534b07e..b6349aed9 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -19,8 +19,6 @@
#ifndef _MITE_H_
#define _MITE_H_
-#include <linux/io.h>
-#include <linux/log2.h>
#include <linux/spinlock.h>
#define MAX_MITE_DMA_CHANNELS 8
@@ -30,323 +28,74 @@ struct comedi_subdevice;
struct device;
struct pci_dev;
-struct mite_dma_descriptor {
+struct mite_dma_desc {
__le32 count;
__le32 addr;
__le32 next;
u32 dar;
};
-struct mite_dma_descriptor_ring {
+struct mite_ring {
struct device *hw_dev;
unsigned int n_links;
- struct mite_dma_descriptor *descriptors;
- dma_addr_t descriptors_dma_addr;
+ struct mite_dma_desc *descs;
+ dma_addr_t dma_addr;
};
struct mite_channel {
- struct mite_struct *mite;
- unsigned channel;
+ struct mite *mite;
+ unsigned int channel;
int dir;
int done;
- struct mite_dma_descriptor_ring *ring;
+ struct mite_ring *ring;
};
-struct mite_struct {
+struct mite {
struct pci_dev *pcidev;
- resource_size_t mite_phys_addr;
- void __iomem *mite_io_addr;
- resource_size_t daq_phys_addr;
+ void __iomem *mmio;
struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
- short channel_allocated[MAX_MITE_DMA_CHANNELS];
int num_channels;
- unsigned fifo_size;
+ unsigned int fifo_size;
+ /* protects mite_channel from being released by the driver */
spinlock_t lock;
};
-struct mite_struct *mite_alloc(struct pci_dev *pcidev);
+u32 mite_bytes_in_transit(struct mite_channel *);
-int mite_setup2(struct comedi_device *, struct mite_struct *, bool use_win1);
+void mite_sync_dma(struct mite_channel *, struct comedi_subdevice *);
+void mite_ack_linkc(struct mite_channel *, struct comedi_subdevice *s,
+ bool sync);
+int mite_done(struct mite_channel *);
-static inline int mite_setup(struct comedi_device *dev,
- struct mite_struct *mite)
-{
- return mite_setup2(dev, mite, false);
-}
+void mite_dma_arm(struct mite_channel *);
+void mite_dma_disarm(struct mite_channel *);
-void mite_detach(struct mite_struct *mite);
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite);
-void mite_free_ring(struct mite_dma_descriptor_ring *ring);
-struct mite_channel *
-mite_request_channel_in_range(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring,
- unsigned min_channel, unsigned max_channel);
-static inline struct mite_channel *
-mite_request_channel(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring)
-{
- return mite_request_channel_in_range(mite, ring, 0,
- mite->num_channels - 1);
-}
-
-void mite_release_channel(struct mite_channel *mite_chan);
-
-unsigned mite_dma_tcr(struct mite_channel *mite_chan);
-void mite_dma_arm(struct mite_channel *mite_chan);
-void mite_dma_disarm(struct mite_channel *mite_chan);
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan);
-unsigned mite_get_status(struct mite_channel *mite_chan);
-int mite_done(struct mite_channel *mite_chan);
-
-void mite_prep_dma(struct mite_channel *mite_chan,
+void mite_prep_dma(struct mite_channel *,
unsigned int num_device_bits, unsigned int num_memory_bits);
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s);
-int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s,
- unsigned int nbytes);
-
-enum mite_registers {
- /*
- * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
- * written and read back. The bits 0x1f always read as 1.
- * The rest always read as zero.
- */
- MITE_UNKNOWN_DMA_BURST_REG = 0x28,
- MITE_IODWBSR = 0xc0, /* IO Device Window Base Size Register */
- MITE_IODWBSR_1 = 0xc4, /* IO Device Window Base Size Register 1 */
- MITE_IODWCR_1 = 0xf4,
- MITE_PCI_CONFIG_OFFSET = 0x300,
- MITE_CSIGR = 0x460 /* chip signature */
-};
-
-#define MITE_CHAN(x) (0x500 + 0x100 * (x))
-#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
-#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
-#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
-#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory configuration */
-#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
-#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device configuration */
-#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
-#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link configuration */
-#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
-#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
-#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
-#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
-#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
-#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
-#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
-#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
-#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
-
-enum MITE_IODWBSR_bits {
- WENAB = 0x80, /* window enable */
-};
-
-static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
-{
- unsigned order = 0;
-
- BUG_ON(size == 0);
- order = ilog2(size);
- BUG_ON(order < 1);
- return (order - 1) & 0x1f;
-}
-
-enum MITE_UNKNOWN_DMA_BURST_bits {
- UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
-};
-
-static inline int mite_csigr_version(u32 csigr_bits)
-{
- return csigr_bits & 0xf;
-};
-
-static inline int mite_csigr_type(u32 csigr_bits)
-{ /* original mite = 0, minimite = 1 */
- return (csigr_bits >> 4) & 0xf;
-};
-
-static inline int mite_csigr_mmode(u32 csigr_bits)
-{ /* mite mode, minimite = 1 */
- return (csigr_bits >> 8) & 0x3;
-};
-
-static inline int mite_csigr_imode(u32 csigr_bits)
-{ /* cpu port interface mode, pci = 0x3 */
- return (csigr_bits >> 12) & 0x3;
-};
-
-static inline int mite_csigr_dmac(u32 csigr_bits)
-{ /* number of dma channels */
- return (csigr_bits >> 16) & 0xf;
-};
-static inline int mite_csigr_wpdep(u32 csigr_bits)
-{ /* write post fifo depth */
- unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7;
+struct mite_channel *mite_request_channel_in_range(struct mite *,
+ struct mite_ring *,
+ unsigned int min_channel,
+ unsigned int max_channel);
+struct mite_channel *mite_request_channel(struct mite *, struct mite_ring *);
+void mite_release_channel(struct mite_channel *);
- return (wpdep_bits) ? (1 << (wpdep_bits - 1)) : 0;
-}
-
-static inline int mite_csigr_wins(u32 csigr_bits)
-{
- return (csigr_bits >> 24) & 0x1f;
-};
-
-static inline int mite_csigr_iowins(u32 csigr_bits)
-{ /* number of io windows */
- return (csigr_bits >> 29) & 0x7;
-};
-
-enum MITE_MCR_bits {
- MCRPON = 0,
-};
-
-enum MITE_DCR_bits {
- DCR_NORMAL = (1 << 29),
- DCRPON = 0,
-};
-
-enum MITE_CHOR_bits {
- CHOR_DMARESET = (1 << 31),
- CHOR_SET_SEND_TC = (1 << 11),
- CHOR_CLR_SEND_TC = (1 << 10),
- CHOR_SET_LPAUSE = (1 << 9),
- CHOR_CLR_LPAUSE = (1 << 8),
- CHOR_CLRDONE = (1 << 7),
- CHOR_CLRRB = (1 << 6),
- CHOR_CLRLC = (1 << 5),
- CHOR_FRESET = (1 << 4),
- CHOR_ABORT = (1 << 3), /* stop without emptying fifo */
- CHOR_STOP = (1 << 2), /* stop after emptying fifo */
- CHOR_CONT = (1 << 1),
- CHOR_START = (1 << 0),
- CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
-};
-
-enum MITE_CHCR_bits {
- CHCR_SET_DMA_IE = (1 << 31),
- CHCR_CLR_DMA_IE = (1 << 30),
- CHCR_SET_LINKP_IE = (1 << 29),
- CHCR_CLR_LINKP_IE = (1 << 28),
- CHCR_SET_SAR_IE = (1 << 27),
- CHCR_CLR_SAR_IE = (1 << 26),
- CHCR_SET_DONE_IE = (1 << 25),
- CHCR_CLR_DONE_IE = (1 << 24),
- CHCR_SET_MRDY_IE = (1 << 23),
- CHCR_CLR_MRDY_IE = (1 << 22),
- CHCR_SET_DRDY_IE = (1 << 21),
- CHCR_CLR_DRDY_IE = (1 << 20),
- CHCR_SET_LC_IE = (1 << 19),
- CHCR_CLR_LC_IE = (1 << 18),
- CHCR_SET_CONT_RB_IE = (1 << 17),
- CHCR_CLR_CONT_RB_IE = (1 << 16),
- CHCR_FIFODIS = (1 << 15),
- CHCR_FIFO_ON = 0,
- CHCR_BURSTEN = (1 << 14),
- CHCR_NO_BURSTEN = 0,
- CHCR_BYTE_SWAP_DEVICE = (1 << 6),
- CHCR_BYTE_SWAP_MEMORY = (1 << 4),
- CHCR_DIR = (1 << 3),
- CHCR_DEV_TO_MEM = CHCR_DIR,
- CHCR_MEM_TO_DEV = 0,
- CHCR_NORMAL = (0 << 0),
- CHCR_CONTINUE = (1 << 0),
- CHCR_RINGBUFF = (2 << 0),
- CHCR_LINKSHORT = (4 << 0),
- CHCR_LINKLONG = (5 << 0),
- CHCRPON =
- (CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
-};
-
-enum ConfigRegister_bits {
- CR_REQS_MASK = 0x7 << 16,
- CR_ASEQDONT = 0x0 << 10,
- CR_ASEQUP = 0x1 << 10,
- CR_ASEQDOWN = 0x2 << 10,
- CR_ASEQ_MASK = 0x3 << 10,
- CR_PSIZE8 = (1 << 8),
- CR_PSIZE16 = (2 << 8),
- CR_PSIZE32 = (3 << 8),
- CR_PORTCPU = (0 << 6),
- CR_PORTIO = (1 << 6),
- CR_PORTVXI = (2 << 6),
- CR_PORTMXI = (3 << 6),
- CR_AMDEVICE = (1 << 0),
-};
-
-static inline int CR_REQS(int source)
-{
- return (source & 0x7) << 16;
-};
-
-static inline int CR_REQSDRQ(unsigned drq_line)
-{
- /* This also works on m-series when using channels (drq_line) 4 or 5. */
- return CR_REQS((drq_line & 0x3) | 0x4);
-}
-
-static inline int CR_RL(unsigned int retry_limit)
-{
- int value = 0;
+int mite_init_ring_descriptors(struct mite_ring *, struct comedi_subdevice *,
+ unsigned int nbytes);
+int mite_buf_change(struct mite_ring *, struct comedi_subdevice *);
- if (retry_limit)
- value = 1 + ilog2(retry_limit);
- if (value > 0x7)
- value = 0x7;
- return (value & 0x7) << 21;
-}
+struct mite_ring *mite_alloc_ring(struct mite *);
+void mite_free_ring(struct mite_ring *);
-enum CHSR_bits {
- CHSR_INT = (1 << 31),
- CHSR_LPAUSES = (1 << 29),
- CHSR_SARS = (1 << 27),
- CHSR_DONE = (1 << 25),
- CHSR_MRDY = (1 << 23),
- CHSR_DRDY = (1 << 21),
- CHSR_LINKC = (1 << 19),
- CHSR_CONTS_RB = (1 << 17),
- CHSR_ERROR = (1 << 15),
- CHSR_SABORT = (1 << 14),
- CHSR_HABORT = (1 << 13),
- CHSR_STOPS = (1 << 12),
- CHSR_OPERR_mask = (3 << 10),
- CHSR_OPERR_NOERROR = (0 << 10),
- CHSR_OPERR_FIFOERROR = (1 << 10),
- CHSR_OPERR_LINKERROR = (1 << 10), /* ??? */
- CHSR_XFERR = (1 << 9),
- CHSR_END = (1 << 8),
- CHSR_DRQ1 = (1 << 7),
- CHSR_DRQ0 = (1 << 6),
- CHSR_LxERR_mask = (3 << 4),
- CHSR_LBERR = (1 << 4),
- CHSR_LRERR = (2 << 4),
- CHSR_LOERR = (3 << 4),
- CHSR_MxERR_mask = (3 << 2),
- CHSR_MBERR = (1 << 2),
- CHSR_MRERR = (2 << 2),
- CHSR_MOERR = (3 << 2),
- CHSR_DxERR_mask = (3 << 0),
- CHSR_DBERR = (1 << 0),
- CHSR_DRERR = (2 << 0),
- CHSR_DOERR = (3 << 0),
-};
+struct mite *mite_attach(struct comedi_device *, bool use_win1);
+void mite_detach(struct mite *);
-static inline void mite_dma_reset(struct mite_channel *mite_chan)
-{
- writel(CHOR_DMARESET | CHOR_FRESET,
- mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
-};
+/*
+ * Mite registers (used outside of the mite driver)
+ */
+#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size */
+#define MITE_IODWBSR_1 0xc4 /* IO Device Window1 Base Size */
+#define WENAB BIT(7) /* window enable */
+#define MITE_IODWCR_1 0xf4
#endif
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 46647c64f..0dcb826a9 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1,17 +1,16 @@
/*
- comedi/drivers/ni_660x.c
- Hardware driver for NI 660x devices
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Hardware driver for NI 660x devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: ni_660x
@@ -42,91 +41,13 @@
#include "mite.h"
#include "ni_tio.h"
-enum ni_660x_constants {
- min_counter_pfi_chan = 8,
- max_dio_pfi_chan = 31,
- counters_per_chip = 4
-};
-
-#define NUM_PFI_CHANNELS 40
-/* really there are only up to 3 dma channels, but the register layout allows
-for 4 */
-#define MAX_DMA_CHANNEL 4
-
/* See Register-Level Programmer Manual page 3.1 */
enum ni_660x_register {
- NI660X_G0_INT_ACK,
- NI660X_G0_STATUS,
- NI660X_G1_INT_ACK,
- NI660X_G1_STATUS,
- NI660X_G01_STATUS,
- NI660X_G0_CMD,
- NI660X_STC_DIO_PARALLEL_INPUT,
- NI660X_G1_CMD,
- NI660X_G0_HW_SAVE,
- NI660X_G1_HW_SAVE,
+ /* see enum ni_gpct_register */
+ NI660X_STC_DIO_PARALLEL_INPUT = NITIO_NUM_REGS,
NI660X_STC_DIO_OUTPUT,
NI660X_STC_DIO_CONTROL,
- NI660X_G0_SW_SAVE,
- NI660X_G1_SW_SAVE,
- NI660X_G0_MODE,
- NI660X_G01_STATUS1,
- NI660X_G1_MODE,
NI660X_STC_DIO_SERIAL_INPUT,
- NI660X_G0_LOADA,
- NI660X_G01_STATUS2,
- NI660X_G0_LOADB,
- NI660X_G1_LOADA,
- NI660X_G1_LOADB,
- NI660X_G0_INPUT_SEL,
- NI660X_G1_INPUT_SEL,
- NI660X_G0_AUTO_INC,
- NI660X_G1_AUTO_INC,
- NI660X_G01_RESET,
- NI660X_G0_INT_ENA,
- NI660X_G1_INT_ENA,
- NI660X_G0_CNT_MODE,
- NI660X_G1_CNT_MODE,
- NI660X_G0_GATE2,
- NI660X_G1_GATE2,
- NI660X_G0_DMA_CFG,
- NI660X_G0_DMA_STATUS,
- NI660X_G1_DMA_CFG,
- NI660X_G1_DMA_STATUS,
- NI660X_G2_INT_ACK,
- NI660X_G2_STATUS,
- NI660X_G3_INT_ACK,
- NI660X_G3_STATUS,
- NI660X_G23_STATUS,
- NI660X_G2_CMD,
- NI660X_G3_CMD,
- NI660X_G2_HW_SAVE,
- NI660X_G3_HW_SAVE,
- NI660X_G2_SW_SAVE,
- NI660X_G3_SW_SAVE,
- NI660X_G2_MODE,
- NI660X_G23_STATUS1,
- NI660X_G3_MODE,
- NI660X_G2_LOADA,
- NI660X_G23_STATUS2,
- NI660X_G2_LOADB,
- NI660X_G3_LOADA,
- NI660X_G3_LOADB,
- NI660X_G2_INPUT_SEL,
- NI660X_G3_INPUT_SEL,
- NI660X_G2_AUTO_INC,
- NI660X_G3_AUTO_INC,
- NI660X_G23_RESET,
- NI660X_G2_INT_ENA,
- NI660X_G3_INT_ENA,
- NI660X_G2_CNT_MODE,
- NI660X_G3_CNT_MODE,
- NI660X_G3_GATE2,
- NI660X_G2_GATE2,
- NI660X_G2_DMA_CFG,
- NI660X_G2_DMA_STATUS,
- NI660X_G3_DMA_CFG,
- NI660X_G3_DMA_STATUS,
NI660X_DIO32_INPUT,
NI660X_DIO32_OUTPUT,
NI660X_CLK_CFG,
@@ -156,224 +77,134 @@ enum ni_660x_register {
NI660X_NUM_REGS,
};
-static inline unsigned IOConfigReg(unsigned pfi_channel)
-{
- unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2;
-
- BUG_ON(reg > NI660X_IO_CFG_38_39);
- return reg;
-}
-
-enum ni_660x_register_width {
- DATA_1B,
- DATA_2B,
- DATA_4B
-};
+#define NI660X_CLK_CFG_COUNTER_SWAP BIT(21)
-enum ni_660x_register_direction {
- NI_660x_READ,
- NI_660x_WRITE,
- NI_660x_READ_WRITE
-};
+#define NI660X_GLOBAL_INT_COUNTER0 BIT(8)
+#define NI660X_GLOBAL_INT_COUNTER1 BIT(9)
+#define NI660X_GLOBAL_INT_COUNTER2 BIT(10)
+#define NI660X_GLOBAL_INT_COUNTER3 BIT(11)
+#define NI660X_GLOBAL_INT_CASCADE BIT(29)
+#define NI660X_GLOBAL_INT_GLOBAL_POL BIT(30)
+#define NI660X_GLOBAL_INT_GLOBAL BIT(31)
-enum ni_660x_pfi_output_select {
- pfi_output_select_high_Z = 0,
- pfi_output_select_counter = 1,
- pfi_output_select_do = 2,
- num_pfi_output_selects
-};
+#define NI660X_DMA_CFG_SEL(_c, _s) (((_s) & 0x1f) << (8 * (_c)))
+#define NI660X_DMA_CFG_SEL_MASK(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
+#define NI660X_DMA_CFG_SEL_NONE(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
+#define NI660X_DMA_CFG_RESET(_c) NI660X_DMA_CFG_SEL((_c), 0x80)
-enum ni_660x_subdevices {
- NI_660X_DIO_SUBDEV = 1,
- NI_660X_GPCT_SUBDEV_0 = 2
-};
-static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index)
-{
- return NI_660X_GPCT_SUBDEV_0 + index;
-}
+#define NI660X_IO_CFG(x) (NI660X_IO_CFG_0_1 + ((x) / 2))
+#define NI660X_IO_CFG_OUT_SEL(_c, _s) (((_s) & 0x3) << (((_c) % 2) ? 0 : 8))
+#define NI660X_IO_CFG_OUT_SEL_MASK(_c) NI660X_IO_CFG_OUT_SEL((_c), 0x3)
+#define NI660X_IO_CFG_IN_SEL(_c, _s) (((_s) & 0x7) << (((_c) % 2) ? 4 : 12))
+#define NI660X_IO_CFG_IN_SEL_MASK(_c) NI660X_IO_CFG_IN_SEL((_c), 0x7)
-struct NI_660xRegisterData {
- const char *name; /* Register Name */
+struct ni_660x_register_data {
int offset; /* Offset from base address from GPCT chip */
- enum ni_660x_register_direction direction;
- enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */
-};
-
-static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = {
- {"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
- {"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
- {"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
- {"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
- {"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
- {"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
- {"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
- {"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
- {"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
- {"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
- {"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
- {"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
- {"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
- {"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
- {"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
- {"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
- {"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
- {"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
- {"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
- {"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
- {"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
- {"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
- {"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
- {"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
- {"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
- {"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
- {"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
- {"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
- {"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
- {"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
- {"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
- {"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
- {"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
- {"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
- {"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
- {"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
- {"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
- {"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
- {"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
- {"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
- {"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
- {"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
- {"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
- {"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
- {"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
- {"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
- {"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
- {"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
- {"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
- {"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
- {"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
- {"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
- {"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
- {"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
- {"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
- {"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
- {"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
- {"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
- {"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
- {"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
- {"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
- {"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
- {"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
- {"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
- {"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
- {"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
- {"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
- {"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
- {"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
-};
-
-/* kind of ENABLE for the second counter */
-enum clock_config_register_bits {
- CounterSwap = 0x1 << 21
-};
-
-/* ioconfigreg */
-static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
-{
- return (pfi_channel % 2) ? 0 : 8;
-}
-
-static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
-{
- return 0x3 << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
- unsigned output_select)
-{
- return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
-{
- return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
- unsigned input_select)
-{
- return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-/* dma configuration register bits */
-static inline unsigned dma_select_mask(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x1f << (8 * dma_channel);
-}
-
-enum dma_selection {
- dma_selection_none = 0x1f,
-};
-
-static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
-}
-
-static inline unsigned dma_reset_bit(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x80 << (8 * dma_channel);
-}
-
-enum global_interrupt_status_register_bits {
- Counter_0_Int_Bit = 0x100,
- Counter_1_Int_Bit = 0x200,
- Counter_2_Int_Bit = 0x400,
- Counter_3_Int_Bit = 0x800,
- Cascade_Int_Bit = 0x20000000,
- Global_Int_Bit = 0x80000000
+ char size; /* 2 or 4 bytes */
};
-enum global_interrupt_config_register_bits {
- Cascade_Int_Enable_Bit = 0x20000000,
- Global_Int_Polarity_Bit = 0x40000000,
- Global_Int_Enable_Bit = 0x80000000
+static const struct ni_660x_register_data ni_660x_reg_data[NI660X_NUM_REGS] = {
+ [NITIO_G0_INT_ACK] = { 0x004, 2 }, /* write */
+ [NITIO_G0_STATUS] = { 0x004, 2 }, /* read */
+ [NITIO_G1_INT_ACK] = { 0x006, 2 }, /* write */
+ [NITIO_G1_STATUS] = { 0x006, 2 }, /* read */
+ [NITIO_G01_STATUS] = { 0x008, 2 }, /* read */
+ [NITIO_G0_CMD] = { 0x00c, 2 }, /* write */
+ [NI660X_STC_DIO_PARALLEL_INPUT] = { 0x00e, 2 }, /* read */
+ [NITIO_G1_CMD] = { 0x00e, 2 }, /* write */
+ [NITIO_G0_HW_SAVE] = { 0x010, 4 }, /* read */
+ [NITIO_G1_HW_SAVE] = { 0x014, 4 }, /* read */
+ [NI660X_STC_DIO_OUTPUT] = { 0x014, 2 }, /* write */
+ [NI660X_STC_DIO_CONTROL] = { 0x016, 2 }, /* write */
+ [NITIO_G0_SW_SAVE] = { 0x018, 4 }, /* read */
+ [NITIO_G1_SW_SAVE] = { 0x01c, 4 }, /* read */
+ [NITIO_G0_MODE] = { 0x034, 2 }, /* write */
+ [NITIO_G01_STATUS1] = { 0x036, 2 }, /* read */
+ [NITIO_G1_MODE] = { 0x036, 2 }, /* write */
+ [NI660X_STC_DIO_SERIAL_INPUT] = { 0x038, 2 }, /* read */
+ [NITIO_G0_LOADA] = { 0x038, 4 }, /* write */
+ [NITIO_G01_STATUS2] = { 0x03a, 2 }, /* read */
+ [NITIO_G0_LOADB] = { 0x03c, 4 }, /* write */
+ [NITIO_G1_LOADA] = { 0x040, 4 }, /* write */
+ [NITIO_G1_LOADB] = { 0x044, 4 }, /* write */
+ [NITIO_G0_INPUT_SEL] = { 0x048, 2 }, /* write */
+ [NITIO_G1_INPUT_SEL] = { 0x04a, 2 }, /* write */
+ [NITIO_G0_AUTO_INC] = { 0x088, 2 }, /* write */
+ [NITIO_G1_AUTO_INC] = { 0x08a, 2 }, /* write */
+ [NITIO_G01_RESET] = { 0x090, 2 }, /* write */
+ [NITIO_G0_INT_ENA] = { 0x092, 2 }, /* write */
+ [NITIO_G1_INT_ENA] = { 0x096, 2 }, /* write */
+ [NITIO_G0_CNT_MODE] = { 0x0b0, 2 }, /* write */
+ [NITIO_G1_CNT_MODE] = { 0x0b2, 2 }, /* write */
+ [NITIO_G0_GATE2] = { 0x0b4, 2 }, /* write */
+ [NITIO_G1_GATE2] = { 0x0b6, 2 }, /* write */
+ [NITIO_G0_DMA_CFG] = { 0x0b8, 2 }, /* write */
+ [NITIO_G0_DMA_STATUS] = { 0x0b8, 2 }, /* read */
+ [NITIO_G1_DMA_CFG] = { 0x0ba, 2 }, /* write */
+ [NITIO_G1_DMA_STATUS] = { 0x0ba, 2 }, /* read */
+ [NITIO_G2_INT_ACK] = { 0x104, 2 }, /* write */
+ [NITIO_G2_STATUS] = { 0x104, 2 }, /* read */
+ [NITIO_G3_INT_ACK] = { 0x106, 2 }, /* write */
+ [NITIO_G3_STATUS] = { 0x106, 2 }, /* read */
+ [NITIO_G23_STATUS] = { 0x108, 2 }, /* read */
+ [NITIO_G2_CMD] = { 0x10c, 2 }, /* write */
+ [NITIO_G3_CMD] = { 0x10e, 2 }, /* write */
+ [NITIO_G2_HW_SAVE] = { 0x110, 4 }, /* read */
+ [NITIO_G3_HW_SAVE] = { 0x114, 4 }, /* read */
+ [NITIO_G2_SW_SAVE] = { 0x118, 4 }, /* read */
+ [NITIO_G3_SW_SAVE] = { 0x11c, 4 }, /* read */
+ [NITIO_G2_MODE] = { 0x134, 2 }, /* write */
+ [NITIO_G23_STATUS1] = { 0x136, 2 }, /* read */
+ [NITIO_G3_MODE] = { 0x136, 2 }, /* write */
+ [NITIO_G2_LOADA] = { 0x138, 4 }, /* write */
+ [NITIO_G23_STATUS2] = { 0x13a, 2 }, /* read */
+ [NITIO_G2_LOADB] = { 0x13c, 4 }, /* write */
+ [NITIO_G3_LOADA] = { 0x140, 4 }, /* write */
+ [NITIO_G3_LOADB] = { 0x144, 4 }, /* write */
+ [NITIO_G2_INPUT_SEL] = { 0x148, 2 }, /* write */
+ [NITIO_G3_INPUT_SEL] = { 0x14a, 2 }, /* write */
+ [NITIO_G2_AUTO_INC] = { 0x188, 2 }, /* write */
+ [NITIO_G3_AUTO_INC] = { 0x18a, 2 }, /* write */
+ [NITIO_G23_RESET] = { 0x190, 2 }, /* write */
+ [NITIO_G2_INT_ENA] = { 0x192, 2 }, /* write */
+ [NITIO_G3_INT_ENA] = { 0x196, 2 }, /* write */
+ [NITIO_G2_CNT_MODE] = { 0x1b0, 2 }, /* write */
+ [NITIO_G3_CNT_MODE] = { 0x1b2, 2 }, /* write */
+ [NITIO_G2_GATE2] = { 0x1b4, 2 }, /* write */
+ [NITIO_G3_GATE2] = { 0x1b6, 2 }, /* write */
+ [NITIO_G2_DMA_CFG] = { 0x1b8, 2 }, /* write */
+ [NITIO_G2_DMA_STATUS] = { 0x1b8, 2 }, /* read */
+ [NITIO_G3_DMA_CFG] = { 0x1ba, 2 }, /* write */
+ [NITIO_G3_DMA_STATUS] = { 0x1ba, 2 }, /* read */
+ [NI660X_DIO32_INPUT] = { 0x414, 4 }, /* read */
+ [NI660X_DIO32_OUTPUT] = { 0x510, 4 }, /* write */
+ [NI660X_CLK_CFG] = { 0x73c, 4 }, /* write */
+ [NI660X_GLOBAL_INT_STATUS] = { 0x754, 4 }, /* read */
+ [NI660X_DMA_CFG] = { 0x76c, 4 }, /* write */
+ [NI660X_GLOBAL_INT_CFG] = { 0x770, 4 }, /* write */
+ [NI660X_IO_CFG_0_1] = { 0x77c, 2 }, /* read/write */
+ [NI660X_IO_CFG_2_3] = { 0x77e, 2 }, /* read/write */
+ [NI660X_IO_CFG_4_5] = { 0x780, 2 }, /* read/write */
+ [NI660X_IO_CFG_6_7] = { 0x782, 2 }, /* read/write */
+ [NI660X_IO_CFG_8_9] = { 0x784, 2 }, /* read/write */
+ [NI660X_IO_CFG_10_11] = { 0x786, 2 }, /* read/write */
+ [NI660X_IO_CFG_12_13] = { 0x788, 2 }, /* read/write */
+ [NI660X_IO_CFG_14_15] = { 0x78a, 2 }, /* read/write */
+ [NI660X_IO_CFG_16_17] = { 0x78c, 2 }, /* read/write */
+ [NI660X_IO_CFG_18_19] = { 0x78e, 2 }, /* read/write */
+ [NI660X_IO_CFG_20_21] = { 0x790, 2 }, /* read/write */
+ [NI660X_IO_CFG_22_23] = { 0x792, 2 }, /* read/write */
+ [NI660X_IO_CFG_24_25] = { 0x794, 2 }, /* read/write */
+ [NI660X_IO_CFG_26_27] = { 0x796, 2 }, /* read/write */
+ [NI660X_IO_CFG_28_29] = { 0x798, 2 }, /* read/write */
+ [NI660X_IO_CFG_30_31] = { 0x79a, 2 }, /* read/write */
+ [NI660X_IO_CFG_32_33] = { 0x79c, 2 }, /* read/write */
+ [NI660X_IO_CFG_34_35] = { 0x79e, 2 }, /* read/write */
+ [NI660X_IO_CFG_36_37] = { 0x7a0, 2 }, /* read/write */
+ [NI660X_IO_CFG_38_39] = { 0x7a2, 2 } /* read/write */
};
-/* Offset of the GPCT chips from the base-address of the card */
-/* First chip is at base-address + 0x00, etc. */
-static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
+#define NI660X_CHIP_OFFSET 0x800
enum ni_660x_boardid {
BOARD_PCI6601,
@@ -385,7 +216,7 @@ enum ni_660x_boardid {
struct ni_660x_board {
const char *name;
- unsigned n_chips; /* total number of TIO chips */
+ unsigned int n_chips; /* total number of TIO chips */
};
static const struct ni_660x_board ni_660x_boards[] = {
@@ -411,280 +242,95 @@ static const struct ni_660x_board ni_660x_boards[] = {
},
};
-#define NI_660X_MAX_NUM_CHIPS 2
-#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
+#define NI660X_NUM_PFI_CHANNELS 40
+
+/* there are only up to 3 dma channels, but the register layout allows for 4 */
+#define NI660X_MAX_DMA_CHANNEL 4
+
+#define NI660X_COUNTERS_PER_CHIP 4
+#define NI660X_MAX_CHIPS 2
+#define NI660X_MAX_COUNTERS (NI660X_MAX_CHIPS * \
+ NI660X_COUNTERS_PER_CHIP)
struct ni_660x_private {
- struct mite_struct *mite;
+ struct mite *mite;
struct ni_gpct_device *counter_dev;
- uint64_t pfi_direction_bits;
- struct mite_dma_descriptor_ring
- *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
+ struct mite_ring *ring[NI660X_MAX_CHIPS][NI660X_COUNTERS_PER_CHIP];
+ /* protects mite channel request/release */
spinlock_t mite_channel_lock;
- /* interrupt_lock prevents races between interrupt and comedi_poll */
+ /* prevents races between interrupt and comedi_poll */
spinlock_t interrupt_lock;
- unsigned dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS];
- spinlock_t soft_reg_copy_lock;
- unsigned short pfi_output_selects[NUM_PFI_CHANNELS];
+ unsigned int dma_cfg[NI660X_MAX_CHIPS];
+ unsigned int io_cfg[NI660X_NUM_PFI_CHANNELS];
+ u64 io_dir;
};
-static inline unsigned ni_660x_num_counters(struct comedi_device *dev)
-{
- const struct ni_660x_board *board = dev->board_ptr;
-
- return board->n_chips * counters_per_chip;
-}
-
-static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg)
-{
- switch (reg) {
- case NITIO_G0_AUTO_INC:
- return NI660X_G0_AUTO_INC;
- case NITIO_G1_AUTO_INC:
- return NI660X_G1_AUTO_INC;
- case NITIO_G2_AUTO_INC:
- return NI660X_G2_AUTO_INC;
- case NITIO_G3_AUTO_INC:
- return NI660X_G3_AUTO_INC;
- case NITIO_G0_CMD:
- return NI660X_G0_CMD;
- case NITIO_G1_CMD:
- return NI660X_G1_CMD;
- case NITIO_G2_CMD:
- return NI660X_G2_CMD;
- case NITIO_G3_CMD:
- return NI660X_G3_CMD;
- case NITIO_G0_HW_SAVE:
- return NI660X_G0_HW_SAVE;
- case NITIO_G1_HW_SAVE:
- return NI660X_G1_HW_SAVE;
- case NITIO_G2_HW_SAVE:
- return NI660X_G2_HW_SAVE;
- case NITIO_G3_HW_SAVE:
- return NI660X_G3_HW_SAVE;
- case NITIO_G0_SW_SAVE:
- return NI660X_G0_SW_SAVE;
- case NITIO_G1_SW_SAVE:
- return NI660X_G1_SW_SAVE;
- case NITIO_G2_SW_SAVE:
- return NI660X_G2_SW_SAVE;
- case NITIO_G3_SW_SAVE:
- return NI660X_G3_SW_SAVE;
- case NITIO_G0_MODE:
- return NI660X_G0_MODE;
- case NITIO_G1_MODE:
- return NI660X_G1_MODE;
- case NITIO_G2_MODE:
- return NI660X_G2_MODE;
- case NITIO_G3_MODE:
- return NI660X_G3_MODE;
- case NITIO_G0_LOADA:
- return NI660X_G0_LOADA;
- case NITIO_G1_LOADA:
- return NI660X_G1_LOADA;
- case NITIO_G2_LOADA:
- return NI660X_G2_LOADA;
- case NITIO_G3_LOADA:
- return NI660X_G3_LOADA;
- case NITIO_G0_LOADB:
- return NI660X_G0_LOADB;
- case NITIO_G1_LOADB:
- return NI660X_G1_LOADB;
- case NITIO_G2_LOADB:
- return NI660X_G2_LOADB;
- case NITIO_G3_LOADB:
- return NI660X_G3_LOADB;
- case NITIO_G0_INPUT_SEL:
- return NI660X_G0_INPUT_SEL;
- case NITIO_G1_INPUT_SEL:
- return NI660X_G1_INPUT_SEL;
- case NITIO_G2_INPUT_SEL:
- return NI660X_G2_INPUT_SEL;
- case NITIO_G3_INPUT_SEL:
- return NI660X_G3_INPUT_SEL;
- case NITIO_G01_STATUS:
- return NI660X_G01_STATUS;
- case NITIO_G23_STATUS:
- return NI660X_G23_STATUS;
- case NITIO_G01_RESET:
- return NI660X_G01_RESET;
- case NITIO_G23_RESET:
- return NI660X_G23_RESET;
- case NITIO_G01_STATUS1:
- return NI660X_G01_STATUS1;
- case NITIO_G23_STATUS1:
- return NI660X_G23_STATUS1;
- case NITIO_G01_STATUS2:
- return NI660X_G01_STATUS2;
- case NITIO_G23_STATUS2:
- return NI660X_G23_STATUS2;
- case NITIO_G0_CNT_MODE:
- return NI660X_G0_CNT_MODE;
- case NITIO_G1_CNT_MODE:
- return NI660X_G1_CNT_MODE;
- case NITIO_G2_CNT_MODE:
- return NI660X_G2_CNT_MODE;
- case NITIO_G3_CNT_MODE:
- return NI660X_G3_CNT_MODE;
- case NITIO_G0_GATE2:
- return NI660X_G0_GATE2;
- case NITIO_G1_GATE2:
- return NI660X_G1_GATE2;
- case NITIO_G2_GATE2:
- return NI660X_G2_GATE2;
- case NITIO_G3_GATE2:
- return NI660X_G3_GATE2;
- case NITIO_G0_DMA_CFG:
- return NI660X_G0_DMA_CFG;
- case NITIO_G0_DMA_STATUS:
- return NI660X_G0_DMA_STATUS;
- case NITIO_G1_DMA_CFG:
- return NI660X_G1_DMA_CFG;
- case NITIO_G1_DMA_STATUS:
- return NI660X_G1_DMA_STATUS;
- case NITIO_G2_DMA_CFG:
- return NI660X_G2_DMA_CFG;
- case NITIO_G2_DMA_STATUS:
- return NI660X_G2_DMA_STATUS;
- case NITIO_G3_DMA_CFG:
- return NI660X_G3_DMA_CFG;
- case NITIO_G3_DMA_STATUS:
- return NI660X_G3_DMA_STATUS;
- case NITIO_G0_INT_ACK:
- return NI660X_G0_INT_ACK;
- case NITIO_G1_INT_ACK:
- return NI660X_G1_INT_ACK;
- case NITIO_G2_INT_ACK:
- return NI660X_G2_INT_ACK;
- case NITIO_G3_INT_ACK:
- return NI660X_G3_INT_ACK;
- case NITIO_G0_STATUS:
- return NI660X_G0_STATUS;
- case NITIO_G1_STATUS:
- return NI660X_G1_STATUS;
- case NITIO_G2_STATUS:
- return NI660X_G2_STATUS;
- case NITIO_G3_STATUS:
- return NI660X_G3_STATUS;
- case NITIO_G0_INT_ENA:
- return NI660X_G0_INT_ENA;
- case NITIO_G1_INT_ENA:
- return NI660X_G1_INT_ENA;
- case NITIO_G2_INT_ENA:
- return NI660X_G2_INT_ENA;
- case NITIO_G3_INT_ENA:
- return NI660X_G3_INT_ENA;
- default:
- BUG();
- return 0;
- }
-}
-
-static inline void ni_660x_write_register(struct comedi_device *dev,
- unsigned chip, unsigned bits,
- enum ni_660x_register reg)
+static void ni_660x_write(struct comedi_device *dev, unsigned int chip,
+ unsigned int bits, unsigned int reg)
{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
+ unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
+ ni_660x_reg_data[reg].offset;
- switch (registerData[reg].size) {
- case DATA_2B:
+ if (ni_660x_reg_data[reg].size == 2)
writew(bits, dev->mmio + addr);
- break;
- case DATA_4B:
+ else
writel(bits, dev->mmio + addr);
- break;
- default:
- BUG();
- break;
- }
}
-static inline unsigned ni_660x_read_register(struct comedi_device *dev,
- unsigned chip,
- enum ni_660x_register reg)
+static unsigned int ni_660x_read(struct comedi_device *dev,
+ unsigned int chip, unsigned int reg)
{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
+ unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
+ ni_660x_reg_data[reg].offset;
- switch (registerData[reg].size) {
- case DATA_2B:
+ if (ni_660x_reg_data[reg].size == 2)
return readw(dev->mmio + addr);
- case DATA_4B:
- return readl(dev->mmio + addr);
- default:
- BUG();
- break;
- }
- return 0;
+ return readl(dev->mmio + addr);
}
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
+static void ni_660x_gpct_write(struct ni_gpct *counter, unsigned int bits,
+ enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
- ni_660x_write_register(dev, chip, bits, ni_660x_register);
+ ni_660x_write(dev, counter->chip_index, bits, reg);
}
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
+static unsigned int ni_660x_gpct_read(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
-
- return ni_660x_read_register(dev, chip, ni_660x_register);
-}
-
-static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
- *priv,
- struct ni_gpct
- *counter)
-{
- unsigned chip = counter->chip_index;
- return priv->mite_rings[chip][counter->counter_index];
+ return ni_660x_read(dev, counter->chip_index, reg);
}
static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
+ unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, counter->counter_index);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip] |
- dma_reset_bit(mite_channel), NI660X_DMA_CFG);
+ unsigned int chip = counter->chip_index;
+
+ devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL(mite_channel,
+ counter->counter_index);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
+ NI660X_DMA_CFG_RESET(mite_channel),
+ NI660X_DMA_CFG);
mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
+ unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
+ unsigned int chip = counter->chip_index;
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, dma_selection_none);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip],
- NI660X_DMA_CFG);
+ devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
@@ -692,13 +338,13 @@ static int ni_660x_request_mite_channel(struct comedi_device *dev,
enum comedi_io_direction direction)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned long flags;
+ struct mite_ring *ring;
struct mite_channel *mite_chan;
+ unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(counter->mite_chan);
- mite_chan = mite_request_channel(devpriv->mite,
- mite_ring(devpriv, counter));
+ ring = devpriv->ring[counter->chip_index][counter->counter_index];
+ mite_chan = mite_request_channel(devpriv->mite, ring);
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
@@ -757,7 +403,7 @@ static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void set_tio_counterswap(struct comedi_device *dev, int chip)
{
- unsigned bits = 0;
+ unsigned int bits = 0;
/*
* See P. 3.5 of the Register-Level Programming manual.
@@ -766,9 +412,9 @@ static void set_tio_counterswap(struct comedi_device *dev, int chip)
* first chip.
*/
if (chip)
- bits = CounterSwap;
+ bits = NI660X_CLK_CFG_COUNTER_SWAP;
- ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG);
+ ni_660x_write(dev, chip, bits, NI660X_CLK_CFG);
}
static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
@@ -785,17 +431,20 @@ static irqreturn_t ni_660x_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct ni_660x_private *devpriv = dev->private;
struct comedi_subdevice *s;
- unsigned i;
+ unsigned int i;
unsigned long flags;
if (!dev->attached)
return IRQ_NONE;
+ /* make sure dev->attached is checked before doing anything else */
+ smp_mb();
+
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- smp_mb();
- for (i = 0; i < ni_660x_num_counters(dev); ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- ni_660x_handle_gpct_interrupt(dev, s);
+ for (i = 0; i < dev->n_subdevices; ++i) {
+ s = &dev->subdevices[i];
+ if (s->type == COMEDI_SUBD_COUNTER)
+ ni_660x_handle_gpct_interrupt(dev, s);
}
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return IRQ_HANDLED;
@@ -810,7 +459,7 @@ static int ni_660x_input_poll(struct comedi_device *dev,
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- mite_sync_input_dma(counter->mite_chan, s);
+ mite_sync_dma(counter->mite_chan, s);
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return comedi_buf_read_n_available(s);
}
@@ -820,9 +469,11 @@ static int ni_660x_buf_change(struct comedi_device *dev,
{
struct ni_660x_private *devpriv = dev->private;
struct ni_gpct *counter = s->private;
+ struct mite_ring *ring;
int ret;
- ret = mite_buf_change(mite_ring(devpriv, counter), s);
+ ring = devpriv->ring[counter->chip_index][counter->counter_index];
+ ret = mite_buf_change(ring, s);
if (ret < 0)
return ret;
@@ -832,7 +483,7 @@ static int ni_660x_buf_change(struct comedi_device *dev,
static int ni_660x_allocate_private(struct comedi_device *dev)
{
struct ni_660x_private *devpriv;
- unsigned i;
+ unsigned int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -840,9 +491,8 @@ static int ni_660x_allocate_private(struct comedi_device *dev)
spin_lock_init(&devpriv->mite_channel_lock);
spin_lock_init(&devpriv->interrupt_lock);
- spin_lock_init(&devpriv->soft_reg_copy_lock);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- devpriv->pfi_output_selects[i] = pfi_output_select_counter;
+ for (i = 0; i < NI660X_NUM_PFI_CHANNELS; ++i)
+ devpriv->io_cfg[i] = NI_660X_PFI_OUTPUT_COUNTER;
return 0;
}
@@ -851,14 +501,13 @@ static int ni_660x_alloc_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
+ unsigned int i;
+ unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j) {
- devpriv->mite_rings[i][j] =
- mite_alloc_ring(devpriv->mite);
- if (!devpriv->mite_rings[i][j])
+ for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j) {
+ devpriv->ring[i][j] = mite_alloc_ring(devpriv->mite);
+ if (!devpriv->ring[i][j])
return -ENOMEM;
}
}
@@ -869,120 +518,101 @@ static void ni_660x_free_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
+ unsigned int i;
+ unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j)
- mite_free_ring(devpriv->mite_rings[i][j]);
- }
-}
-
-static void init_tio_chip(struct comedi_device *dev, int chipset)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned i;
-
- /* init dma configuration register */
- devpriv->dma_configuration_soft_copies[chipset] = 0;
- for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
- devpriv->dma_configuration_soft_copies[chipset] |=
- dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
+ for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j)
+ mite_free_ring(devpriv->ring[i][j]);
}
- ni_660x_write_register(dev, chipset,
- devpriv->dma_configuration_soft_copies[chipset],
- NI660X_DMA_CFG);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
}
static int ni_660x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned base_bitfield_channel = CR_CHAN(insn->chanspec);
-
- /* Check if we have to write some bits */
- if (data[0]) {
- s->state &= ~(data[0] << base_bitfield_channel);
- s->state |= (data[0] & data[1]) << base_bitfield_channel;
- /* Write out the new digital output lines */
- ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT);
+ unsigned int shift = CR_CHAN(insn->chanspec);
+ unsigned int mask = data[0] << shift;
+ unsigned int bits = data[1] << shift;
+
+ /*
+ * There are 40 channels in this subdevice but only 32 are usable
+ * as DIO. The shift adjusts the mask/bits to account for the base
+ * channel in insn->chanspec. The state update can then be handled
+ * normally for the 32 usable channels.
+ */
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+ ni_660x_write(dev, 0, s->state, NI660X_DIO32_OUTPUT);
}
- /* on return, data[1] contains the value of the digital
- * input and output lines. */
- data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >>
- base_bitfield_channel);
+
+ /*
+ * Return the input channels, shifted back to account for the base
+ * channel.
+ */
+ data[1] = ni_660x_read(dev, 0, NI660X_DIO32_INPUT) >> shift;
return insn->n;
}
static void ni_660x_select_pfi_output(struct comedi_device *dev,
- unsigned pfi_channel,
- unsigned output_select)
+ unsigned int chan, unsigned int out_sel)
{
const struct ni_660x_board *board = dev->board_ptr;
- static const unsigned counter_4_7_first_pfi = 8;
- static const unsigned counter_4_7_last_pfi = 23;
- unsigned active_chipset = 0;
- unsigned idle_chipset = 0;
- unsigned active_bits;
- unsigned idle_bits;
+ unsigned int active_chip = 0;
+ unsigned int idle_chip = 0;
+ unsigned int bits;
if (board->n_chips > 1) {
- if (output_select == pfi_output_select_counter &&
- pfi_channel >= counter_4_7_first_pfi &&
- pfi_channel <= counter_4_7_last_pfi) {
- active_chipset = 1;
- idle_chipset = 0;
+ if (out_sel == NI_660X_PFI_OUTPUT_COUNTER &&
+ chan >= 8 && chan <= 23) {
+ /* counters 4-7 pfi channels */
+ active_chip = 1;
+ idle_chip = 0;
} else {
- active_chipset = 0;
- idle_chipset = 1;
+ /* counters 0-3 pfi channels */
+ active_chip = 0;
+ idle_chip = 1;
}
}
- if (idle_chipset != active_chipset) {
- idle_bits =
- ni_660x_read_register(dev, idle_chipset,
- IOConfigReg(pfi_channel));
- idle_bits &= ~pfi_output_select_mask(pfi_channel);
- idle_bits |=
- pfi_output_select_bits(pfi_channel,
- pfi_output_select_high_Z);
- ni_660x_write_register(dev, idle_chipset, idle_bits,
- IOConfigReg(pfi_channel));
+ if (idle_chip != active_chip) {
+ /* set the pfi channel to high-z on the inactive chip */
+ bits = ni_660x_read(dev, idle_chip, NI660X_IO_CFG(chan));
+ bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
+ bits |= NI660X_IO_CFG_OUT_SEL(chan, 0); /* high-z */
+ ni_660x_write(dev, idle_chip, bits, NI660X_IO_CFG(chan));
}
- active_bits =
- ni_660x_read_register(dev, active_chipset,
- IOConfigReg(pfi_channel));
- active_bits &= ~pfi_output_select_mask(pfi_channel);
- active_bits |= pfi_output_select_bits(pfi_channel, output_select);
- ni_660x_write_register(dev, active_chipset, active_bits,
- IOConfigReg(pfi_channel));
+ /* set the pfi channel output on the active chip */
+ bits = ni_660x_read(dev, active_chip, NI660X_IO_CFG(chan));
+ bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
+ bits |= NI660X_IO_CFG_OUT_SEL(chan, out_sel);
+ ni_660x_write(dev, active_chip, bits, NI660X_IO_CFG(chan));
}
-static int ni_660x_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
+static int ni_660x_set_pfi_routing(struct comedi_device *dev,
+ unsigned int chan, unsigned int source)
{
struct ni_660x_private *devpriv = dev->private;
- if (source > num_pfi_output_selects)
- return -EINVAL;
- if (source == pfi_output_select_high_Z)
- return -EINVAL;
- if (chan < min_counter_pfi_chan) {
- if (source == pfi_output_select_counter)
+ switch (source) {
+ case NI_660X_PFI_OUTPUT_COUNTER:
+ if (chan < 8)
return -EINVAL;
- } else if (chan > max_dio_pfi_chan) {
- if (source == pfi_output_select_do)
+ break;
+ case NI_660X_PFI_OUTPUT_DIO:
+ if (chan > 31)
return -EINVAL;
+ default:
+ return -EINVAL;
}
- devpriv->pfi_output_selects[chan] = source;
- if (devpriv->pfi_direction_bits & (((uint64_t) 1) << chan))
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
+ devpriv->io_cfg[chan] = source;
+ if (devpriv->io_dir & (1ULL << chan))
+ ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
return 0;
}
@@ -993,25 +623,24 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
{
struct ni_660x_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- uint64_t bit = 1ULL << chan;
+ u64 bit = 1ULL << chan;
unsigned int val;
int ret;
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
- devpriv->pfi_direction_bits |= bit;
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
+ devpriv->io_dir |= bit;
+ ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
break;
case INSN_CONFIG_DIO_INPUT:
- devpriv->pfi_direction_bits &= ~bit;
- ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z);
+ devpriv->io_dir &= ~bit;
+ ni_660x_select_pfi_output(dev, chan, 0); /* high-z */
break;
case INSN_CONFIG_DIO_QUERY:
- data[1] = (devpriv->pfi_direction_bits & bit) ? COMEDI_OUTPUT
- : COMEDI_INPUT;
+ data[1] = (devpriv->io_dir & bit) ? COMEDI_OUTPUT
+ : COMEDI_INPUT;
break;
case INSN_CONFIG_SET_ROUTING:
@@ -1021,14 +650,14 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
break;
case INSN_CONFIG_GET_ROUTING:
- data[1] = devpriv->pfi_output_selects[chan];
+ data[1] = devpriv->io_cfg[chan];
break;
case INSN_CONFIG_FILTER:
- val = ni_660x_read_register(dev, 0, IOConfigReg(chan));
- val &= ~pfi_input_select_mask(chan);
- val |= pfi_input_select_bits(chan, data[1]);
- ni_660x_write_register(dev, 0, val, IOConfigReg(chan));
+ val = ni_660x_read(dev, 0, NI660X_IO_CFG(chan));
+ val &= ~NI660X_IO_CFG_IN_SEL_MASK(chan);
+ val |= NI660X_IO_CFG_IN_SEL(chan, data[1]);
+ ni_660x_write(dev, 0, val, NI660X_IO_CFG(chan));
break;
default:
@@ -1038,6 +667,33 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
+static void ni_660x_init_tio_chips(struct comedi_device *dev,
+ unsigned int n_chips)
+{
+ struct ni_660x_private *devpriv = dev->private;
+ unsigned int chip;
+ unsigned int chan;
+
+ /*
+ * We use the ioconfig registers to control dio direction, so zero
+ * output enables in stc dio control reg.
+ */
+ ni_660x_write(dev, 0, 0, NI660X_STC_DIO_CONTROL);
+
+ for (chip = 0; chip < n_chips; ++chip) {
+ /* init dma configuration register */
+ devpriv->dma_cfg[chip] = 0;
+ for (chan = 0; chan < NI660X_MAX_DMA_CHANNEL; ++chan)
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(chan);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip],
+ NI660X_DMA_CFG);
+
+ /* init ioconfig registers */
+ for (chan = 0; chan < NI660X_NUM_PFI_CHANNELS; ++chan)
+ ni_660x_write(dev, chip, 0, NI660X_IO_CFG(chan));
+ }
+}
+
static int ni_660x_auto_attach(struct comedi_device *dev,
unsigned long context)
{
@@ -1045,9 +701,12 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
const struct ni_660x_board *board = NULL;
struct ni_660x_private *devpriv;
struct comedi_subdevice *s;
+ struct ni_gpct_device *gpct_dev;
+ unsigned int n_counters;
+ int subdev;
int ret;
- unsigned i;
- unsigned global_interrupt_config_bits;
+ unsigned int i;
+ unsigned int global_interrupt_config_bits;
if (context < ARRAY_SIZE(ni_660x_boards))
board = &ni_660x_boards[context];
@@ -1065,91 +724,147 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
return ret;
devpriv = dev->private;
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, true); /* use win1 */
if (!devpriv->mite)
return -ENOMEM;
- ret = mite_setup2(dev, devpriv->mite, true);
- if (ret < 0)
- return ret;
-
ret = ni_660x_alloc_mite_rings(dev);
if (ret < 0)
return ret;
- ret = comedi_alloc_subdevices(dev, 2 + NI_660X_MAX_NUM_COUNTERS);
+ ni_660x_init_tio_chips(dev, board->n_chips);
+
+ n_counters = board->n_chips * NI660X_COUNTERS_PER_CHIP;
+ gpct_dev = ni_gpct_device_construct(dev,
+ ni_660x_gpct_write,
+ ni_660x_gpct_read,
+ ni_gpct_variant_660x,
+ n_counters);
+ if (!gpct_dev)
+ return -ENOMEM;
+ devpriv->counter_dev = gpct_dev;
+
+ ret = comedi_alloc_subdevices(dev, 2 + NI660X_MAX_COUNTERS);
if (ret)
return ret;
- s = &dev->subdevices[0];
+ subdev = 0;
+
+ s = &dev->subdevices[subdev++];
/* Old GENERAL-PURPOSE COUNTER/TIME (GPCT) subdevice, no longer used */
s->type = COMEDI_SUBD_UNUSED;
- s = &dev->subdevices[NI_660X_DIO_SUBDEV];
- /* DIGITAL I/O SUBDEVICE */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = NUM_PFI_CHANNELS;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_660x_dio_insn_bits;
- s->insn_config = ni_660x_dio_insn_config;
- /* we use the ioconfig registers to control dio direction, so zero
- output enables in stc dio control reg */
- ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL);
-
- devpriv->counter_dev = ni_gpct_device_construct(dev,
- &ni_gpct_write_register,
- &ni_gpct_read_register,
- ni_gpct_variant_660x,
- ni_660x_num_counters
- (dev));
- if (!devpriv->counter_dev)
- return -ENOMEM;
- for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- if (i < ni_660x_num_counters(dev)) {
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
+ /*
+ * Digital I/O subdevice
+ *
+ * There are 40 channels but only the first 32 can be digital I/Os.
+ * The last 8 are dedicated to counters 0 and 1.
+ *
+ * Counter 0-3 signals are from the first TIO chip.
+ * Counter 4-7 signals are from the second TIO chip.
+ *
+ * Comedi External
+ * PFI Chan DIO Chan Counter Signal
+ * ------- -------- --------------
+ * 0 0
+ * 1 1
+ * 2 2
+ * 3 3
+ * 4 4
+ * 5 5
+ * 6 6
+ * 7 7
+ * 8 8 CTR 7 OUT
+ * 9 9 CTR 7 AUX
+ * 10 10 CTR 7 GATE
+ * 11 11 CTR 7 SOURCE
+ * 12 12 CTR 6 OUT
+ * 13 13 CTR 6 AUX
+ * 14 14 CTR 6 GATE
+ * 15 15 CTR 6 SOURCE
+ * 16 16 CTR 5 OUT
+ * 17 17 CTR 5 AUX
+ * 18 18 CTR 5 GATE
+ * 19 19 CTR 5 SOURCE
+ * 20 20 CTR 4 OUT
+ * 21 21 CTR 4 AUX
+ * 22 22 CTR 4 GATE
+ * 23 23 CTR 4 SOURCE
+ * 24 24 CTR 3 OUT
+ * 25 25 CTR 3 AUX
+ * 26 26 CTR 3 GATE
+ * 27 27 CTR 3 SOURCE
+ * 28 28 CTR 2 OUT
+ * 29 29 CTR 2 AUX
+ * 30 30 CTR 2 GATE
+ * 31 31 CTR 2 SOURCE
+ * 32 CTR 1 OUT
+ * 33 CTR 1 AUX
+ * 34 CTR 1 GATE
+ * 35 CTR 1 SOURCE
+ * 36 CTR 0 OUT
+ * 37 CTR 0 AUX
+ * 38 CTR 0 GATE
+ * 39 CTR 0 SOURCE
+ */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = NI660X_NUM_PFI_CHANNELS;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ni_660x_dio_insn_bits;
+ s->insn_config = ni_660x_dio_insn_config;
+
+ /*
+ * Default the DIO channels as:
+ * chan 0-7: DIO inputs
+ * chan 8-39: counter signal inputs
+ */
+ for (i = 0; i < s->n_chan; ++i) {
+ unsigned int source = (i < 8) ? NI_660X_PFI_OUTPUT_DIO
+ : NI_660X_PFI_OUTPUT_COUNTER;
+
+ ni_660x_set_pfi_routing(dev, i, source);
+ ni_660x_select_pfi_output(dev, i, 0); /* high-z */
+ }
+
+ /* Counter subdevices (4 NI TIO General Purpose Counters per chip) */
+ for (i = 0; i < NI660X_MAX_COUNTERS; ++i) {
+ s = &dev->subdevices[subdev++];
+ if (i < n_counters) {
+ struct ni_gpct *counter = &gpct_dev->counters[i];
+
+ counter->chip_index = i / NI660X_COUNTERS_PER_CHIP;
+ counter->counter_index = i % NI660X_COUNTERS_PER_CHIP;
+
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
SDF_LSAMPL | SDF_CMD_READ;
- s->n_chan = 3;
- s->maxdata = 0xffffffff;
- s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_write;
- s->insn_config = ni_tio_insn_config;
- s->do_cmd = &ni_660x_cmd;
- s->len_chanlist = 1;
- s->do_cmdtest = ni_tio_cmdtest;
- s->cancel = &ni_660x_cancel;
- s->poll = &ni_660x_input_poll;
+ s->n_chan = 3;
+ s->maxdata = 0xffffffff;
+ s->insn_read = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
+ s->insn_config = ni_tio_insn_config;
+ s->len_chanlist = 1;
+ s->do_cmd = ni_660x_cmd;
+ s->do_cmdtest = ni_tio_cmdtest;
+ s->cancel = ni_660x_cancel;
+ s->poll = ni_660x_input_poll;
+ s->buf_change = ni_660x_buf_change;
s->async_dma_dir = DMA_BIDIRECTIONAL;
- s->buf_change = &ni_660x_buf_change;
- s->private = &devpriv->counter_dev->counters[i];
+ s->private = counter;
- devpriv->counter_dev->counters[i].chip_index =
- i / counters_per_chip;
- devpriv->counter_dev->counters[i].counter_index =
- i % counters_per_chip;
+ ni_tio_init_counter(counter);
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
}
- for (i = 0; i < board->n_chips; ++i)
- init_tio_chip(dev, i);
-
- for (i = 0; i < ni_660x_num_counters(dev); ++i)
- ni_tio_init_counter(&devpriv->counter_dev->counters[i]);
-
- for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
- if (i < min_counter_pfi_chan)
- ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
- else
- ni_660x_set_pfi_routing(dev, i,
- pfi_output_select_counter);
- ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
- }
- /* to be safe, set counterswap bits on tio chips after all the counter
- outputs have been set to high impedance mode */
+
+ /*
+ * To be safe, set counterswap bits on tio chips after all the counter
+ * outputs have been set to high impedance mode.
+ */
for (i = 0; i < board->n_chips; ++i)
set_tio_counterswap(dev, i);
@@ -1160,11 +875,11 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
return ret;
}
dev->irq = pcidev->irq;
- global_interrupt_config_bits = Global_Int_Enable_Bit;
+ global_interrupt_config_bits = NI660X_GLOBAL_INT_GLOBAL;
if (board->n_chips > 1)
- global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
- ni_660x_write_register(dev, 0, global_interrupt_config_bits,
- NI660X_GLOBAL_INT_CFG);
+ global_interrupt_config_bits |= NI660X_GLOBAL_INT_CASCADE;
+ ni_660x_write(dev, 0, global_interrupt_config_bits,
+ NI660X_GLOBAL_INT_CFG);
return 0;
}
@@ -1173,11 +888,12 @@ static void ni_660x_detach(struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
- if (dev->irq)
+ if (dev->irq) {
+ ni_660x_write(dev, 0, 0, NI660X_GLOBAL_INT_CFG);
free_irq(dev->irq, dev);
+ }
if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
+ ni_gpct_device_destroy(devpriv->counter_dev);
ni_660x_free_mite_rings(dev);
mite_detach(devpriv->mite);
}
@@ -1218,5 +934,5 @@ static struct pci_driver ni_660x_pci_driver = {
module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for NI 660x counter/timer boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index 83f878adb..be8d5cd3f 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -1,27 +1,22 @@
/*
- ni_labpc.h
-
- Header for ni_labpc.c and ni_labpc_cs.c
-
- Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header for ni_labpc ISA/PCMCIA/PCI drivers
+ *
+ * Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _NI_LABPC_H
#define _NI_LABPC_H
-#define EEPROM_SIZE 256 /* 256 byte eeprom */
-#define NUM_AO_CHAN 2 /* boards have two analog output channels */
-
enum transfer_type { fifo_not_empty_transfer, fifo_half_full_transfer,
isa_dma_transfer
};
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index 863afb28e..b0dfb8eed 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -84,8 +84,10 @@ static const struct comedi_lrange range_labpc_ao = {
}
};
-/* functions that do inb/outb and readb/writeb so we can use
- * function pointers to decide which to use */
+/*
+ * functions that do inb/outb and readb/writeb so we can use
+ * function pointers to decide which to use
+ */
static unsigned int labpc_inb(struct comedi_device *dev, unsigned long reg)
{
return inb(dev->iobase + reg);
@@ -656,19 +658,24 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* figure out what method we will use to transfer data */
if (devpriv->dma &&
- /* dma unsafe at RT priority,
- * and too much setup time for CMDF_WAKE_EOS */
- (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0)
+ (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0) {
+ /*
+ * dma unsafe at RT priority,
+ * and too much setup time for CMDF_WAKE_EOS
+ */
xfer = isa_dma_transfer;
- else if (/* pc-plus has no fifo-half full interrupt */
- board->is_labpc1200 &&
- /* wake-end-of-scan should interrupt on fifo not empty */
- (cmd->flags & CMDF_WAKE_EOS) == 0 &&
- /* make sure we are taking more than just a few points */
- (cmd->stop_src != TRIG_COUNT || devpriv->count > 256))
+ } else if (board->is_labpc1200 &&
+ (cmd->flags & CMDF_WAKE_EOS) == 0 &&
+ (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) {
+ /*
+ * pc-plus has no fifo-half full interrupt
+ * wake-end-of-scan should interrupt on fifo not empty
+ * make sure we are taking more than just a few points
+ */
xfer = fifo_half_full_transfer;
- else
+ } else {
xfer = fifo_not_empty_transfer;
+ }
devpriv->current_transfer = xfer;
labpc_ai_set_chan_and_gain(dev, mode, chan, range, aref);
@@ -679,9 +686,11 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* manual says to set scan enable bit on second pass */
if (mode == MODE_MULT_CHAN_UP || mode == MODE_MULT_CHAN_DOWN) {
devpriv->cmd1 |= CMD1_SCANEN;
- /* need a brief delay before enabling scan, or scan
- * list will get screwed when you switch
- * between scan up to scan down mode - dunno why */
+ /*
+ * Need a brief delay before enabling scan, or scan
+ * list will get screwed when you switch between
+ * scan up to scan down mode - dunno why.
+ */
udelay(1);
devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG);
}
@@ -728,8 +737,10 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->cmd4 = 0;
if (cmd->convert_src != TRIG_EXT)
devpriv->cmd4 |= CMD4_ECLKRCV;
- /* XXX should discard first scan when using interval scanning
- * since manual says it is not synced with scan clock */
+ /*
+ * XXX should discard first scan when using interval scanning
+ * since manual says it is not synced with scan clock.
+ */
if (!labpc_use_continuous_mode(cmd, mode)) {
devpriv->cmd4 |= CMD4_INTSCAN;
if (cmd->scan_begin_src == TRIG_EXT)
@@ -795,8 +806,10 @@ static int labpc_drain_fifo(struct comedi_device *dev)
return 0;
}
-/* makes sure all data acquired by board is transferred to comedi (used
- * when acquisition is terminated by stop_src == TRIG_EXT). */
+/*
+ * Makes sure all data acquired by board is transferred to comedi (used
+ * when acquisition is terminated by stop_src == TRIG_EXT).
+ */
static void labpc_drain_dregs(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
@@ -907,9 +920,11 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
channel = CR_CHAN(insn->chanspec);
- /* turn off pacing of analog output channel */
- /* note: hardware bug in daqcard-1200 means pacing cannot
- * be independently enabled/disabled for its the two channels */
+ /*
+ * Turn off pacing of analog output channel.
+ * NOTE: hardware bug in daqcard-1200 means pacing cannot
+ * be independently enabled/disabled for its the two channels.
+ */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->cmd2 &= ~CMD2_LDAC(channel);
devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
@@ -1261,7 +1276,7 @@ int labpc_common_attach(struct comedi_device *dev,
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = NUM_AO_CHAN;
+ s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = &range_labpc_ao;
s->insn_write = labpc_ao_insn_write;
@@ -1307,12 +1322,12 @@ int labpc_common_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- /* EEPROM */
+ /* EEPROM (256 bytes) */
s = &dev->subdevices[4];
if (board->is_labpc1200) {
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = EEPROM_SIZE;
+ s->n_chan = 256;
s->maxdata = 0xff;
s->insn_write = labpc_eeprom_insn_write;
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index a1c69ac07..3d4d0b9ad 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -1,57 +1,50 @@
/*
- comedi/drivers/ni_labpc_cs.c
- Driver for National Instruments daqcard-1200 boards
- Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
- from the pcmcia package.
- The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_labpc_cs
-Description: National Instruments Lab-PC (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
-Status: works
-
-Thanks go to Fredrik Lingvall for much testing and perseverance in
-helping to debug daqcard-1200 support.
-
-The 1200 series boards have onboard calibration dacs for correcting
-analog input/output offsets and gains. The proper settings for these
-caldacs are stored on the board's eeprom. To read the caldac values
-from the eeprom and store them into a file that can be then be used by
-comedilib, use the comedi_calibrate program.
-
-Configuration options:
- none
-
-The daqcard-1200 has quirky chanlist requirements
-when scanning multiple channels. Multiple channel scan
-sequence must start at highest channel, then decrement down to
-channel 0. Chanlists consisting of all one channel
-are also legal, and allow you to pace conversions in bursts.
-
-*/
+ * Driver for National Instruments daqcard-1200 boards
+ * Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
+ * from the pcmcia package.
+ * The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * General Public License for more details.
+ */
/*
-
-NI manuals:
-340988a (daqcard-1200)
-
-*/
+ * Driver: ni_labpc_cs
+ * Description: National Instruments Lab-PC (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
+ * Status: works
+ *
+ * Thanks go to Fredrik Lingvall for much testing and perseverance in
+ * helping to debug daqcard-1200 support.
+ *
+ * The 1200 series boards have onboard calibration dacs for correcting
+ * analog input/output offsets and gains. The proper settings for these
+ * caldacs are stored on the board's eeprom. To read the caldac values
+ * from the eeprom and store them into a file that can be then be used by
+ * comedilib, use the comedi_calibrate program.
+ *
+ * Configuration options: none
+ *
+ * The daqcard-1200 has quirky chanlist requirements when scanning multiple
+ * channels. Multiple channel scan sequence must start at highest channel,
+ * then decrement down to channel 0. Chanlists consisting of all one channel
+ * are also legal, and allow you to pace conversions in bursts.
+ *
+ * NI manuals:
+ * 340988a (daqcard-1200)
+ */
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 77d403801..cac089193 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -51,8 +51,8 @@ static const struct labpc_boardinfo labpc_pci_boards[] = {
};
/* ripped from mite.h and mite_setup2() to avoid mite dependency */
-#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
-#define WENAB (1 << 7) /* window enable */
+#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
+#define WENAB BIT(7) /* window enable */
static int labpc_pci_mite_init(struct pci_dev *pcidev)
{
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
index 2a274a3e4..8c52179e3 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_regs.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -9,32 +9,32 @@
* Register map (all registers are 8-bit)
*/
#define STAT1_REG 0x00 /* R: Status 1 reg */
-#define STAT1_DAVAIL (1 << 0)
-#define STAT1_OVERRUN (1 << 1)
-#define STAT1_OVERFLOW (1 << 2)
-#define STAT1_CNTINT (1 << 3)
-#define STAT1_GATA0 (1 << 5)
-#define STAT1_EXTGATA0 (1 << 6)
+#define STAT1_DAVAIL BIT(0)
+#define STAT1_OVERRUN BIT(1)
+#define STAT1_OVERFLOW BIT(2)
+#define STAT1_CNTINT BIT(3)
+#define STAT1_GATA0 BIT(5)
+#define STAT1_EXTGATA0 BIT(6)
#define CMD1_REG 0x00 /* W: Command 1 reg */
#define CMD1_MA(x) (((x) & 0x7) << 0)
-#define CMD1_TWOSCMP (1 << 3)
+#define CMD1_TWOSCMP BIT(3)
#define CMD1_GAIN(x) (((x) & 0x7) << 4)
-#define CMD1_SCANEN (1 << 7)
+#define CMD1_SCANEN BIT(7)
#define CMD2_REG 0x01 /* W: Command 2 reg */
-#define CMD2_PRETRIG (1 << 0)
-#define CMD2_HWTRIG (1 << 1)
-#define CMD2_SWTRIG (1 << 2)
-#define CMD2_TBSEL (1 << 3)
-#define CMD2_2SDAC0 (1 << 4)
-#define CMD2_2SDAC1 (1 << 5)
-#define CMD2_LDAC(x) (1 << (6 + (x)))
+#define CMD2_PRETRIG BIT(0)
+#define CMD2_HWTRIG BIT(1)
+#define CMD2_SWTRIG BIT(2)
+#define CMD2_TBSEL BIT(3)
+#define CMD2_2SDAC0 BIT(4)
+#define CMD2_2SDAC1 BIT(5)
+#define CMD2_LDAC(x) BIT(6 + ((x) & 0x1))
#define CMD3_REG 0x02 /* W: Command 3 reg */
-#define CMD3_DMAEN (1 << 0)
-#define CMD3_DIOINTEN (1 << 1)
-#define CMD3_DMATCINTEN (1 << 2)
-#define CMD3_CNTINTEN (1 << 3)
-#define CMD3_ERRINTEN (1 << 4)
-#define CMD3_FIFOINTEN (1 << 5)
+#define CMD3_DMAEN BIT(0)
+#define CMD3_DIOINTEN BIT(1)
+#define CMD3_DMATCINTEN BIT(2)
+#define CMD3_CNTINTEN BIT(3)
+#define CMD3_ERRINTEN BIT(4)
+#define CMD3_FIFOINTEN BIT(5)
#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
@@ -43,32 +43,32 @@
#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
#define CMD6_REG 0x0e /* W: Command 6 reg */
-#define CMD6_NRSE (1 << 0)
-#define CMD6_ADCUNI (1 << 1)
-#define CMD6_DACUNI(x) (1 << (2 + (x)))
-#define CMD6_HFINTEN (1 << 5)
-#define CMD6_DQINTEN (1 << 6)
-#define CMD6_SCANUP (1 << 7)
+#define CMD6_NRSE BIT(0)
+#define CMD6_ADCUNI BIT(1)
+#define CMD6_DACUNI(x) BIT(2 + ((x) & 0x1))
+#define CMD6_HFINTEN BIT(5)
+#define CMD6_DQINTEN BIT(6)
+#define CMD6_SCANUP BIT(7)
#define CMD4_REG 0x0f /* W: Command 3 reg */
-#define CMD4_INTSCAN (1 << 0)
-#define CMD4_EOIRCV (1 << 1)
-#define CMD4_ECLKDRV (1 << 2)
-#define CMD4_SEDIFF (1 << 3)
-#define CMD4_ECLKRCV (1 << 4)
+#define CMD4_INTSCAN BIT(0)
+#define CMD4_EOIRCV BIT(1)
+#define CMD4_ECLKDRV BIT(2)
+#define CMD4_SEDIFF BIT(3)
+#define CMD4_ECLKRCV BIT(4)
#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
#define CMD5_REG 0x1c /* W: Command 5 reg */
-#define CMD5_WRTPRT (1 << 2)
-#define CMD5_DITHEREN (1 << 3)
-#define CMD5_CALDACLD (1 << 4)
-#define CMD5_SCLK (1 << 5)
-#define CMD5_SDATA (1 << 6)
-#define CMD5_EEPROMCS (1 << 7)
+#define CMD5_WRTPRT BIT(2)
+#define CMD5_DITHEREN BIT(3)
+#define CMD5_CALDACLD BIT(4)
+#define CMD5_SCLK BIT(5)
+#define CMD5_SDATA BIT(6)
+#define CMD5_EEPROMCS BIT(7)
#define STAT2_REG 0x1d /* R: Status 2 reg */
-#define STAT2_PROMOUT (1 << 0)
-#define STAT2_OUTA1 (1 << 1)
-#define STAT2_FIFONHF (1 << 2)
+#define STAT2_PROMOUT BIT(0)
+#define STAT2_OUTA1 BIT(1)
+#define STAT2_FIFONHF BIT(2)
#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
diff --git a/drivers/staging/comedi/drivers/ni_mio_c_common.c b/drivers/staging/comedi/drivers/ni_mio_c_common.c
deleted file mode 100644
index e69de29bb..000000000
--- a/drivers/staging/comedi/drivers/ni_mio_c_common.c
+++ /dev/null
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index dcaf7e89f..8dabb1951 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1,56 +1,53 @@
/*
- comedi/drivers/ni_mio_common.c
- Hardware driver for DAQ-STC based boards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
- Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Hardware driver for DAQ-STC based boards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- This file is meant to be included by another file, e.g.,
- ni_atmio.c or ni_pcimio.c.
-
- Interrupt support originally added by Truxton Fulton
- <trux@truxton.com>
-
- References (from ftp://ftp.natinst.com/support/manuals):
-
- 340747b.pdf AT-MIO E series Register Level Programmer Manual
- 341079b.pdf PCI E Series RLPM
- 340934b.pdf DAQ-STC reference manual
- 67xx and 611x registers (from ftp://ftp.ni.com/support/daq/mhddk/documentation/)
- release_ni611x.pdf
- release_ni67xx.pdf
- Other possibly relevant info:
-
- 320517c.pdf User manual (obsolete)
- 320517f.pdf User manual (new)
- 320889a.pdf delete
- 320906c.pdf maximum signal ratings
- 321066a.pdf about 16x
- 321791a.pdf discontinuation of at-mio-16e-10 rev. c
- 321808a.pdf about at-mio-16e-10 rev P
- 321837a.pdf discontinuation of at-mio-16de-10 rev d
- 321838a.pdf about at-mio-16de-10 rev N
-
- ISSUES:
-
- - the interrupt routine needs to be cleaned up
-
- 2006-02-07: S-Series PCI-6143: Support has been added but is not
- fully tested as yet. Terry Barnaby, BEAM Ltd.
-*/
+ * This file is meant to be included by another file, e.g.,
+ * ni_atmio.c or ni_pcimio.c.
+ *
+ * Interrupt support originally added by Truxton Fulton <trux@truxton.com>
+ *
+ * References (ftp://ftp.natinst.com/support/manuals):
+ * 340747b.pdf AT-MIO E series Register Level Programmer Manual
+ * 341079b.pdf PCI E Series RLPM
+ * 340934b.pdf DAQ-STC reference manual
+ *
+ * 67xx and 611x registers (ftp://ftp.ni.com/support/daq/mhddk/documentation/)
+ * release_ni611x.pdf
+ * release_ni67xx.pdf
+ *
+ * Other possibly relevant info:
+ * 320517c.pdf User manual (obsolete)
+ * 320517f.pdf User manual (new)
+ * 320889a.pdf delete
+ * 320906c.pdf maximum signal ratings
+ * 321066a.pdf about 16x
+ * 321791a.pdf discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf about at-mio-16e-10 rev P
+ * 321837a.pdf discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf about at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ * - the interrupt routine needs to be cleaned up
+ *
+ * 2006-02-07: S-Series PCI-6143: Support has been added but is not
+ * fully tested as yet. Terry Barnaby, BEAM Ltd.
+ */
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -216,19 +213,8 @@ enum ni_common_subdevices {
NI_FREQ_OUT_SUBDEV,
NI_NUM_SUBDEVICES
};
-static inline unsigned NI_GPCT_SUBDEV(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NI_GPCT0_SUBDEV;
- case 1:
- return NI_GPCT1_SUBDEV;
- default:
- break;
- }
- BUG();
- return NI_GPCT0_SUBDEV;
-}
+
+#define NI_GPCT_SUBDEV(x) (NI_GPCT0_SUBDEV + (x))
enum timebase_nanoseconds {
TIMEBASE_1_NS = 50,
@@ -242,7 +228,7 @@ enum timebase_nanoseconds {
static const int num_adc_stages_611x = 3;
-static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
+static void ni_writel(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writel(data, dev->mmio + reg);
@@ -250,7 +236,7 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
outl(data, dev->iobase + reg);
}
-static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
+static void ni_writew(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writew(data, dev->mmio + reg);
@@ -258,7 +244,7 @@ static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
outw(data, dev->iobase + reg);
}
-static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
+static void ni_writeb(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writeb(data, dev->mmio + reg);
@@ -266,7 +252,7 @@ static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
outb(data, dev->iobase + reg);
}
-static uint32_t ni_readl(struct comedi_device *dev, int reg)
+static unsigned int ni_readl(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readl(dev->mmio + reg);
@@ -274,7 +260,7 @@ static uint32_t ni_readl(struct comedi_device *dev, int reg)
return inl(dev->iobase + reg);
}
-static uint16_t ni_readw(struct comedi_device *dev, int reg)
+static unsigned int ni_readw(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readw(dev->mmio + reg);
@@ -282,7 +268,7 @@ static uint16_t ni_readw(struct comedi_device *dev, int reg)
return inw(dev->iobase + reg);
}
-static uint8_t ni_readb(struct comedi_device *dev, int reg)
+static unsigned int ni_readb(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readb(dev->mmio + reg);
@@ -457,7 +443,8 @@ static unsigned int m_series_stc_read(struct comedi_device *dev,
}
}
-static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
+static void ni_stc_writew(struct comedi_device *dev,
+ unsigned int data, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -476,7 +463,8 @@ static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
}
}
-static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
+static void ni_stc_writel(struct comedi_device *dev,
+ unsigned int data, int reg)
{
struct ni_private *devpriv = dev->private;
@@ -488,11 +476,11 @@ static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
}
}
-static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
+static unsigned int ni_stc_readw(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
- uint16_t val;
+ unsigned int val;
if (devpriv->is_m_series) {
val = m_series_stc_read(dev, reg);
@@ -509,10 +497,10 @@ static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
return val;
}
-static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
+static unsigned int ni_stc_readl(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
- uint32_t val;
+ unsigned int val;
if (devpriv->is_m_series) {
val = m_series_stc_read(dev, reg);
@@ -524,7 +512,8 @@ static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
}
static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
- unsigned bit_mask, unsigned bit_values)
+ unsigned int bit_mask,
+ unsigned int bit_values)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -556,6 +545,11 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
devpriv->g0_g1_select_reg |= bit_values & bit_mask;
ni_writeb(dev, devpriv->g0_g1_select_reg, reg);
break;
+ case NI_M_CDIO_DMA_SEL_REG:
+ devpriv->cdio_dma_select_reg &= ~bit_mask;
+ devpriv->cdio_dma_select_reg |= bit_values & bit_mask;
+ ni_writeb(dev, devpriv->cdio_dma_select_reg, reg);
+ break;
default:
dev_err(dev->class_dev, "called with invalid register %d\n",
reg);
@@ -566,116 +560,35 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
}
#ifdef PCIDMA
-/* DMA channel setup */
-static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
-{
- if (channel < 4)
- return 1 << channel;
- if (channel == 4)
- return 0x3;
- if (channel == 5)
- return 0x5;
- BUG();
- return 0;
-}
-
-static inline void ni_set_ai_dma_channel(struct comedi_device *dev,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
-}
-
-static inline void ni_set_ai_dma_no_channel(struct comedi_device *dev)
-{
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AI_SEL_MASK, 0);
-}
-
-static inline void ni_set_ao_dma_channel(struct comedi_device *dev,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
-}
-
-static inline void ni_set_ao_dma_no_channel(struct comedi_device *dev)
-{
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AO_SEL_MASK, 0);
-}
-
-static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
- unsigned gpct_index,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
- NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
- NI_E_DMA_G0_G1_SEL(gpct_index, bits));
-}
-
-static inline void ni_set_gpct_dma_no_channel(struct comedi_device *dev,
- unsigned gpct_index)
-{
- ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
- NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
-}
-
-static inline void ni_set_cdo_dma_channel(struct comedi_device *dev,
- unsigned mite_channel)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- unsigned bits;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
- /*
- * XXX just guessing ni_stc_dma_channel_select_bitfield()
- * returns the right bits, under the assumption the cdio dma
- * selection works just like ai/ao/gpct.
- * Definitely works for dma channels 0 and 1.
- */
- bits = ni_stc_dma_channel_select_bitfield(mite_channel);
- devpriv->cdio_dma_select_reg |= NI_M_CDIO_DMA_SEL_CDO(bits);
- ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
-
-static inline void ni_set_cdo_dma_no_channel(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
- ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
+/* selects the MITE channel to use for DMA */
+#define NI_STC_DMA_CHAN_SEL(x) (((x) < 4) ? BIT(x) : \
+ ((x) == 4) ? 0x3 : \
+ ((x) == 5) ? 0x5 : 0x0)
+/* DMA channel setup */
static int ni_request_ai_mite_channel(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ai_mite_chan);
- devpriv->ai_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
- if (!devpriv->ai_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for analog input\n");
return -EBUSY;
}
- devpriv->ai_mite_chan->dir = COMEDI_INPUT;
- ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
+ mite_chan->dir = COMEDI_INPUT;
+ devpriv->ai_mite_chan = mite_chan;
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
@@ -683,37 +596,42 @@ static int ni_request_ai_mite_channel(struct comedi_device *dev)
static int ni_request_ao_mite_channel(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ao_mite_chan);
- devpriv->ao_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
- if (!devpriv->ao_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for analog outut\n");
return -EBUSY;
}
- devpriv->ao_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
+ mite_chan->dir = COMEDI_OUTPUT;
+ devpriv->ao_mite_chan = mite_chan;
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
static int ni_request_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index,
+ unsigned int gpct_index,
enum comedi_io_direction direction)
{
struct ni_private *devpriv = dev->private;
- unsigned long flags;
+ struct ni_gpct *counter = &devpriv->counter_dev->counters[gpct_index];
struct mite_channel *mite_chan;
+ unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan);
- mite_chan =
- mite_request_channel(devpriv->mite,
- devpriv->gpct_mite_ring[gpct_index]);
+ mite_chan = mite_request_channel(devpriv->mite,
+ devpriv->gpct_mite_ring[gpct_index]);
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
@@ -721,37 +639,50 @@ static int ni_request_gpct_mite_channel(struct comedi_device *dev,
return -EBUSY;
}
mite_chan->dir = direction;
- ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index],
- mite_chan);
- ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
+ ni_tio_set_mite_channel(counter, mite_chan);
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
+ NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
+ NI_E_DMA_G0_G1_SEL(gpct_index, bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
-#endif /* PCIDMA */
-
static int ni_request_cdo_mite_channel(struct comedi_device *dev)
{
-#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->cdo_mite_chan);
- devpriv->cdo_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
- if (!devpriv->cdo_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for correlated digital output\n");
return -EBUSY;
}
- devpriv->cdo_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
+ mite_chan->dir = COMEDI_OUTPUT;
+ devpriv->cdo_mite_chan = mite_chan;
+
+ /*
+ * XXX just guessing NI_STC_DMA_CHAN_SEL()
+ * returns the right bits, under the assumption the cdio dma
+ * selection works just like ai/ao/gpct.
+ * Definitely works for dma channels 0 and 1.
+ */
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
+ NI_M_CDIO_DMA_SEL_CDO_MASK,
+ NI_M_CDIO_DMA_SEL_CDO(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
return 0;
}
+#endif /* PCIDMA */
static void ni_release_ai_mite_channel(struct comedi_device *dev)
{
@@ -761,7 +692,8 @@ static void ni_release_ai_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan) {
- ni_set_ai_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AI_SEL_MASK, 0);
mite_release_channel(devpriv->ai_mite_chan);
devpriv->ai_mite_chan = NULL;
}
@@ -777,7 +709,8 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ao_mite_chan) {
- ni_set_ao_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AO_SEL_MASK, 0);
mite_release_channel(devpriv->ao_mite_chan);
devpriv->ao_mite_chan = NULL;
}
@@ -787,7 +720,7 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
#ifdef PCIDMA
static void ni_release_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index)
+ unsigned int gpct_index)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -797,7 +730,8 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
struct mite_channel *mite_chan =
devpriv->counter_dev->counters[gpct_index].mite_chan;
- ni_set_gpct_dma_no_channel(dev, gpct_index);
+ ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
+ NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
ni_tio_set_mite_channel(&devpriv->
counter_dev->counters[gpct_index],
NULL);
@@ -805,30 +739,27 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
-#endif /* PCIDMA */
static void ni_release_cdo_mite_channel(struct comedi_device *dev)
{
-#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
- ni_set_cdo_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
+ NI_M_CDIO_DMA_SEL_CDO_MASK, 0);
mite_release_channel(devpriv->cdo_mite_chan);
devpriv->cdo_mite_chan = NULL;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
}
-#ifdef PCIDMA
static void ni_e_series_enable_second_irq(struct comedi_device *dev,
- unsigned gpct_index, short enable)
+ unsigned int gpct_index, short enable)
{
struct ni_private *devpriv = dev->private;
- uint16_t val = 0;
+ unsigned int val = 0;
int reg;
if (devpriv->is_m_series || gpct_index > 1)
@@ -875,8 +806,10 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
ni_writeb(dev, 0, NI_M_STATIC_AI_CTRL_REG(0));
ni_writeb(dev, 1, NI_M_STATIC_AI_CTRL_REG(0));
#if 0
- /* the NI example code does 3 convert pulses for 625x boards,
- but that appears to be wrong in practice. */
+ /*
+ * The NI example code does 3 convert pulses for 625x
+ * boards, But that appears to be wrong in practice.
+ */
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
NISTC_AI_CMD1_REG);
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
@@ -888,8 +821,8 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
}
}
-static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
- int addr)
+static inline void ni_ao_win_outw(struct comedi_device *dev,
+ unsigned int data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -900,8 +833,8 @@ static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
-static inline void ni_ao_win_outl(struct comedi_device *dev, uint32_t data,
- int addr)
+static inline void ni_ao_win_outl(struct comedi_device *dev,
+ unsigned int data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -925,20 +858,21 @@ static inline unsigned short ni_ao_win_inw(struct comedi_device *dev, int addr)
return data;
}
-/* ni_set_bits( ) allows different parts of the ni_mio_common driver to
-* share registers (such as Interrupt_A_Register) without interfering with
-* each other.
-*
-* NOTE: the switch/case statements are optimized out for a constant argument
-* so this is actually quite fast--- If you must wrap another function around this
-* make it inline to avoid a large speed penalty.
-*
-* value should only be 1 or 0.
-*/
+/*
+ * ni_set_bits( ) allows different parts of the ni_mio_common driver to
+ * share registers (such as Interrupt_A_Register) without interfering with
+ * each other.
+ *
+ * NOTE: the switch/case statements are optimized out for a constant argument
+ * so this is actually quite fast--- If you must wrap another function around
+ * this make it inline to avoid a large speed penalty.
+ *
+ * value should only be 1 or 0.
+ */
static inline void ni_set_bits(struct comedi_device *dev, int reg,
- unsigned bits, unsigned value)
+ unsigned int bits, unsigned int value)
{
- unsigned bit_values;
+ unsigned int bit_values;
if (value)
bit_values = bits;
@@ -956,7 +890,7 @@ static void ni_sync_ai_dma(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan)
- mite_sync_input_dma(devpriv->ai_mite_chan, s);
+ mite_sync_dma(devpriv->ai_mite_chan, s);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
@@ -972,9 +906,8 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
if (devpriv->ai_mite_chan) {
for (i = 0; i < timeout; i++) {
if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E)
- && mite_bytes_in_transit(devpriv->ai_mite_chan) ==
- 0)
+ NISTC_AI_STATUS1_FIFO_E) &&
+ mite_bytes_in_transit(devpriv->ai_mite_chan) == 0)
break;
udelay(5);
}
@@ -994,19 +927,6 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
return retval;
}
-static void mite_handle_b_linkc(struct mite_struct *mite,
- struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan)
- mite_sync_output_dma(devpriv->ao_mite_chan, s);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-
static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
{
static const int timeout = 10000;
@@ -1018,9 +938,11 @@ static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
if (b_status & NISTC_AO_STATUS1_FIFO_HF)
break;
- /* if we poll too often, the pci bus activity seems
- to slow the dma transfer down */
- udelay(10);
+ /*
+ * If we poll too often, the pci bus activity seems
+ * to slow the dma transfer down.
+ */
+ usleep_range(10, 100);
}
if (i == timeout) {
dev_err(dev->class_dev, "timed out waiting for dma load\n");
@@ -1038,7 +960,7 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
int i;
unsigned short d;
- u32 packed_data;
+ unsigned int packed_data;
for (i = 0; i < n; i++) {
comedi_buf_read_samples(s, &d, 1);
@@ -1128,7 +1050,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
struct comedi_async *async = s->async;
- u32 dl;
+ unsigned int dl;
unsigned short data;
int i;
@@ -1148,7 +1070,10 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
comedi_buf_write_samples(s, &data, 1);
}
} else if (devpriv->is_6143) {
- /* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
+ /*
+ * This just reads the FIFO assuming the data is present,
+ * no checks on the FIFO status are performed.
+ */
for (i = 0; i < n / 2; i++) {
dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
@@ -1192,16 +1117,13 @@ static void ni_handle_fifo_half_full(struct comedi_device *dev)
}
#endif
-/*
- Empties the AI fifo
-*/
+/* Empties the AI fifo */
static void ni_handle_fifo_dregs(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- u32 dl;
+ unsigned int dl;
unsigned short data;
- unsigned short fifo_empty;
int i;
if (devpriv->is_611x) {
@@ -1237,15 +1159,16 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
}
} else {
- fifo_empty = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- while (fifo_empty == 0) {
+ unsigned short fe; /* fifo empty */
+
+ fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
+ NISTC_AI_STATUS1_FIFO_E;
+ while (fe == 0) {
for (i = 0;
i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) {
- fifo_empty = ni_stc_readw(dev,
- NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- if (fifo_empty)
+ fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
+ NISTC_AI_STATUS1_FIFO_E;
+ if (fe)
break;
devpriv->ai_fifo_buffer[i] =
ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
@@ -1260,7 +1183,7 @@ static void get_last_sample_611x(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned short data;
- u32 dl;
+ unsigned int dl;
if (!devpriv->is_611x)
return;
@@ -1278,7 +1201,7 @@ static void get_last_sample_6143(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned short data;
- u32 dl;
+ unsigned int dl;
if (!devpriv->is_6143)
return;
@@ -1365,42 +1288,23 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
}
-static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
- unsigned ai_mite_status)
+static void handle_a_interrupt(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned short status)
{
- struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
- /* 67xx boards don't have ai subdevice, but their gpct0 might generate an a interrupt */
- if (s->type == COMEDI_SUBD_UNUSED)
- return;
-
-#ifdef PCIDMA
- if (ai_mite_status & CHSR_LINKC)
- ni_sync_ai_dma(dev);
-
- if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ai_mite_status=%08x)\n",
- ai_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- /* disable_irq(dev->irq); */
- }
-#endif
-
/* test for all uncommon interrupt events at the same time */
if (status & (NISTC_AI_STATUS1_ERR |
NISTC_AI_STATUS1_SC_TC | NISTC_AI_STATUS1_START1)) {
if (status == 0xffff) {
dev_err(dev->class_dev, "Card removed?\n");
- /* we probably aren't even running a command now,
- * so it's a good idea to be careful. */
- if (comedi_is_subdevice_running(s)) {
+ /*
+ * We probably aren't even running a command now,
+ * so it's a good idea to be careful.
+ */
+ if (comedi_is_subdevice_running(s))
s->async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- }
return;
}
if (status & NISTC_AI_STATUS1_ERR) {
@@ -1412,8 +1316,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
s->async->events |= COMEDI_CB_ERROR;
if (status & NISTC_AI_STATUS1_OVER)
s->async->events |= COMEDI_CB_OVERFLOW;
-
- comedi_handle_events(dev, s);
return;
}
if (status & NISTC_AI_STATUS1_SC_TC) {
@@ -1425,8 +1327,11 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & NISTC_AI_STATUS1_FIFO_HF) {
int i;
static const int timeout = 10;
- /* pcmcia cards (at least 6036) seem to stop producing interrupts if we
- *fail to get the fifo less than half full, so loop to be sure.*/
+ /*
+ * PCMCIA cards (at least 6036) seem to stop producing
+ * interrupts if we fail to get the fifo less than half
+ * full, so loop to be sure.
+ */
for (i = 0; i < timeout; ++i) {
ni_handle_fifo_half_full(dev);
if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
@@ -1438,8 +1343,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & NISTC_AI_STATUS1_STOP)
ni_handle_eos(dev, s);
-
- comedi_handle_events(dev, s);
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1465,29 +1368,9 @@ static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
}
static void handle_b_interrupt(struct comedi_device *dev,
- unsigned short b_status, unsigned ao_mite_status)
+ struct comedi_subdevice *s,
+ unsigned short b_status)
{
- struct comedi_subdevice *s = dev->write_subdev;
- /* unsigned short ack=0; */
-
-#ifdef PCIDMA
- /* Currently, mite.c requires us to handle LINKC */
- if (ao_mite_status & CHSR_LINKC) {
- struct ni_private *devpriv = dev->private;
-
- mite_handle_b_linkc(devpriv->mite, dev);
- }
-
- if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ao_mite_status=%08x)\n",
- ao_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- }
-#endif
-
if (b_status == 0xffff)
return;
if (b_status & NISTC_AO_STATUS1_OVERRUN) {
@@ -1515,8 +1398,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
}
}
#endif
-
- comedi_handle_events(dev, s);
}
static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -1606,8 +1487,11 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
if (devpriv->is_611x || devpriv->is_6713) {
mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
} else {
- /* doing 32 instead of 16 bit wide transfers from memory
- makes the mite do 32 bit pci transfers, doubling pci bandwidth. */
+ /*
+ * Doing 32 instead of 16 bit wide transfers from
+ * memory makes the mite do 32 bit pci transfers,
+ * doubling pci bandwidth.
+ */
mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
}
mite_dma_arm(devpriv->ao_mite_chan);
@@ -1622,16 +1506,15 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
#endif /* PCIDMA */
/*
- used for both cancel ioctl and board initialization
-
- this is pretty harsh for a cancel, but it works...
+ * used for both cancel ioctl and board initialization
+ *
+ * this is pretty harsh for a cancel, but it works...
*/
-
static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
- unsigned ai_personal;
- unsigned ai_out_ctrl;
+ unsigned int ai_personal;
+ unsigned int ai_out_ctrl;
ni_release_ai_mite_channel(dev);
/* ai configuration */
@@ -1736,12 +1619,12 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
unsigned int chan, range, aref;
unsigned int i;
unsigned int dither;
- unsigned range_code;
+ unsigned int range_code;
ni_stc_writew(dev, 1, NISTC_CFG_MEM_CLR_REG);
if ((list[0] & CR_ALT_SOURCE)) {
- unsigned bypass_bits;
+ unsigned int bypass_bits;
chan = CR_CHAN(list[0]);
range = CR_RANGE(list[0]);
@@ -1760,7 +1643,7 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
ni_writel(dev, 0, NI_M_CFG_BYPASS_FIFO_REG);
}
for (i = 0; i < n_chan; i++) {
- unsigned config_bits = 0;
+ unsigned int config_bits = 0;
chan = CR_CHAN(list[i]);
aref = CR_AREF(list[i]);
@@ -1842,8 +1725,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
return;
}
if (n_chan == 1 && !devpriv->is_611x && !devpriv->is_6143) {
- if (devpriv->changain_state
- && devpriv->changain_spec == list[0]) {
+ if (devpriv->changain_state &&
+ devpriv->changain_spec == list[0]) {
/* ready to go. */
return;
}
@@ -1857,8 +1740,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
/* Set up Calibration mode if required */
if (devpriv->is_6143) {
- if ((list[0] & CR_ALT_SOURCE)
- && !devpriv->ai_calib_source_enabled) {
+ if ((list[0] & CR_ALT_SOURCE) &&
+ !devpriv->ai_calib_source_enabled) {
/* Strobe Relay enable bit */
ni_writew(dev, devpriv->ai_calib_source |
NI6143_CALIB_CHAN_RELAY_ON,
@@ -1866,9 +1749,10 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
ni_writew(dev, devpriv->ai_calib_source,
NI6143_CALIB_CHAN_REG);
devpriv->ai_calib_source_enabled = 1;
- msleep_interruptible(100); /* Allow relays to change */
- } else if (!(list[0] & CR_ALT_SOURCE)
- && devpriv->ai_calib_source_enabled) {
+ /* Allow relays to change */
+ msleep_interruptible(100);
+ } else if (!(list[0] & CR_ALT_SOURCE) &&
+ devpriv->ai_calib_source_enabled) {
/* Strobe Relay disable bit */
ni_writew(dev, devpriv->ai_calib_source |
NI6143_CALIB_CHAN_RELAY_OFF,
@@ -1876,7 +1760,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
ni_writew(dev, devpriv->ai_calib_source,
NI6143_CALIB_CHAN_REG);
devpriv->ai_calib_source_enabled = 0;
- msleep_interruptible(100); /* Allow relays to change */
+ /* Allow relays to change */
+ msleep_interruptible(100);
}
}
@@ -1949,7 +1834,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
unsigned int mask = (s->maxdata + 1) >> 1;
int i, n;
- unsigned signbits;
+ unsigned int signbits;
unsigned int d;
unsigned long dl;
@@ -1997,7 +1882,11 @@ static int ni_ai_insn_read(struct comedi_device *dev,
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
NISTC_AI_CMD1_REG);
- /* The 6143 has 32-bit FIFOs. You need to strobe a bit to move a single 16bit stranded sample into the FIFO */
+ /*
+ * The 6143 has 32-bit FIFOs. You need to strobe a
+ * bit to move a single 16bit stranded sample into
+ * the FIFO.
+ */
dl = 0;
for (i = 0; i < NI_TIMEOUT; i++) {
if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
@@ -2035,7 +1924,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
data[n] = dl;
} else {
d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- d += signbits; /* subtle: needs to be short addition */
+ /* subtle: needs to be short addition */
+ d += signbits;
data[n] = d;
}
}
@@ -2043,8 +1933,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
return insn->n;
}
-static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
- unsigned int flags)
+static int ni_ns_to_timer(const struct comedi_device *dev,
+ unsigned int nanosec, unsigned int flags)
{
struct ni_private *devpriv = dev->private;
int divider;
@@ -2064,14 +1954,14 @@ static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
return divider - 1;
}
-static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer)
+static unsigned int ni_timer_to_ns(const struct comedi_device *dev, int timer)
{
struct ni_private *devpriv = dev->private;
return devpriv->clock_ns * (timer + 1);
}
-static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
+static void ni_cmd_set_mite_transfer(struct mite_ring *ring,
struct comedi_subdevice *sdev,
const struct comedi_cmd *cmd,
unsigned int max_count) {
@@ -2102,8 +1992,8 @@ static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
#endif
}
-static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev,
- unsigned num_channels)
+static unsigned int ni_min_ai_scan_period_ns(struct comedi_device *dev,
+ unsigned int num_channels)
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
@@ -2294,7 +2184,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int start_stop_select = 0;
unsigned int stop_count;
int interrupt_a_enable = 0;
- unsigned ai_trig;
+ unsigned int ai_trig;
if (dev->irq == 0) {
dev_err(dev->class_dev, "cannot run command without an irq\n");
@@ -2307,8 +2197,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* start configuration */
ni_stc_writew(dev, NISTC_RESET_AI_CFG_START, NISTC_RESET_REG);
- /* disable analog triggering for now, since it
- * interferes with the use of pfi0 */
+ /*
+ * Disable analog triggering for now, since it interferes
+ * with the use of pfi0.
+ */
devpriv->an_trig_etc_reg &= ~NISTC_ATRIG_ETC_ENA;
ni_stc_writew(dev, devpriv->an_trig_etc_reg, NISTC_ATRIG_ETC_REG);
@@ -2369,7 +2261,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (stop_count == 0) {
devpriv->ai_cmd2 |= NISTC_AI_CMD2_END_ON_EOS;
interrupt_a_enable |= NISTC_INTA_ENA_AI_STOP;
- /* this is required to get the last sample for chanlist_len > 1, not sure why */
+ /*
+ * This is required to get the last sample for
+ * chanlist_len > 1, not sure why.
+ */
if (cmd->chanlist_len > 1)
start_stop_select |= NISTC_AI_STOP_POLARITY |
NISTC_AI_STOP_EDGE;
@@ -2489,7 +2384,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
switch (devpriv->aimode) {
case AIMODE_HALF_FULL:
- /*generate FIFO interrupts and DMA requests on half-full */
+ /* FIFO interrupts and DMA requests on half-full */
#ifdef PCIDMA
ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF_E,
NISTC_AI_MODE3_REG);
@@ -2880,9 +2775,11 @@ static int ni_ao_inttrig(struct comedi_device *dev,
if (trig_num != cmd->start_arg)
return -EINVAL;
- /* Null trig at beginning prevent ao start trigger from executing more than
- once per command (and doing things like trying to allocate the ao dma channel
- multiple times) */
+ /*
+ * Null trig at beginning prevent ao start trigger from executing more
+ * than once per command (and doing things like trying to allocate the
+ * ao dma channel multiple times).
+ */
s->async->inttrig = NULL;
ni_set_bits(dev, NISTC_INTB_ENA_REG,
@@ -2951,7 +2848,7 @@ static void ni_ao_cmd_personalize(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
const struct ni_board_struct *board = dev->board_ptr;
- unsigned bits;
+ unsigned int bits;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -2999,6 +2896,7 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
struct ni_private *devpriv = dev->private;
+ unsigned int trigsel;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -3012,39 +2910,20 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
}
ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
- {
- unsigned int trigsel = devpriv->ao_trigger_select;
-
- switch (cmd->start_src) {
- case TRIG_INT:
- case TRIG_NOW:
- trigsel &= ~(NISTC_AO_TRIG_START1_POLARITY |
- NISTC_AO_TRIG_START1_SEL_MASK);
- trigsel |= NISTC_AO_TRIG_START1_EDGE |
- NISTC_AO_TRIG_START1_SYNC;
- break;
- case TRIG_EXT:
- trigsel = NISTC_AO_TRIG_START1_SEL(
- CR_CHAN(cmd->start_arg) + 1);
- if (cmd->start_arg & CR_INVERT)
- /*
- * 0=active high, 1=active low.
- * see daq-stc 3-24 (p186)
- */
- trigsel |= NISTC_AO_TRIG_START1_POLARITY;
- if (cmd->start_arg & CR_EDGE)
- /* 0=edge detection disabled, 1=enabled */
- trigsel |= NISTC_AO_TRIG_START1_EDGE;
- break;
- default:
- BUG();
- break;
- }
-
- devpriv->ao_trigger_select = trigsel;
- ni_stc_writew(dev, devpriv->ao_trigger_select,
- NISTC_AO_TRIG_SEL_REG);
+ if (cmd->start_src == TRIG_INT) {
+ trigsel = NISTC_AO_TRIG_START1_EDGE |
+ NISTC_AO_TRIG_START1_SYNC;
+ } else { /* TRIG_EXT */
+ trigsel = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1);
+ /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
+ if (cmd->start_arg & CR_INVERT)
+ trigsel |= NISTC_AO_TRIG_START1_POLARITY;
+ /* 0=edge detection disabled, 1=enabled */
+ if (cmd->start_arg & CR_EDGE)
+ trigsel |= NISTC_AO_TRIG_START1_EDGE;
}
+ ni_stc_writew(dev, trigsel, NISTC_AO_TRIG_SEL_REG);
+
/* AO_Delayed_START1 = 0, we do not support delayed start...yet */
/* sync */
@@ -3149,8 +3028,9 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
NISTC_AO_MODE1_UPDATE_SRC_POLARITY
);
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ unsigned int trigvar;
+
devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
/*
@@ -3181,34 +3061,25 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
* eseries/ni67xx and tMSeries.h for mseries.
*/
- {
- unsigned trigvar = ni_ns_to_timer(dev,
- cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST);
+ trigvar = ni_ns_to_timer(dev, cmd->scan_begin_arg,
+ CMDF_ROUND_NEAREST);
- /*
- * Wait N TB3 ticks after the start trigger before
- * clocking(N must be >=2).
- */
- /* following line: 2-1 per STC */
- ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD,
- NISTC_AO_CMD1_REG);
- /* following line: N-1 per STC */
- ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
- }
- break;
- case TRIG_EXT:
+ /*
+ * Wait N TB3 ticks after the start trigger before
+ * clocking (N must be >=2).
+ */
+ /* following line: 2-1 per STC */
+ ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
+ ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
+ /* following line: N-1 per STC */
+ ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+ } else { /* TRIG_EXT */
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC(
CR_CHAN(cmd->scan_begin_arg));
if (cmd->scan_begin_arg & CR_INVERT)
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
- break;
- default:
- BUG();
- break;
}
ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
@@ -3231,7 +3102,7 @@ static void ni_ao_cmd_set_channels(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
const struct comedi_cmd *cmd = &s->async->cmd;
- unsigned bits = 0;
+ unsigned int bits = 0;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -3474,7 +3345,6 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE;
else
devpriv->ao_mode3 = 0;
- devpriv->ao_trigger_select = 0;
ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG);
ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG);
@@ -3550,6 +3420,7 @@ static int ni_dio_insn_bits(struct comedi_device *dev,
return insn->n;
}
+#ifdef PCIDMA
static int ni_m_series_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -3652,13 +3523,11 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
unsigned int trig_num)
{
struct comedi_cmd *cmd = &s->async->cmd;
- const unsigned timeout = 1000;
+ const unsigned int timeout = 1000;
int retval = 0;
- unsigned i;
-#ifdef PCIDMA
+ unsigned int i;
struct ni_private *devpriv = dev->private;
unsigned long flags;
-#endif
if (trig_num != cmd->start_arg)
return -EINVAL;
@@ -3668,7 +3537,6 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
/* read alloc the entire buffer */
comedi_buf_read_alloc(s, s->async->prealloc_bufsz);
-#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
@@ -3680,7 +3548,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
if (retval < 0)
return retval;
-#endif
+
/*
* XXX not sure what interrupt C group does
* wait for dma to fill output fifo
@@ -3690,7 +3558,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
if (ni_readl(dev, NI_M_CDIO_STATUS_REG) &
NI_M_CDIO_STATUS_CDO_FIFO_FULL)
break;
- udelay(10);
+ usleep_range(10, 100);
}
if (i == timeout) {
dev_err(dev->class_dev, "dma failed to fill cdo fifo!\n");
@@ -3708,7 +3576,7 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
const struct comedi_cmd *cmd = &s->async->cmd;
- unsigned cdo_mode_bits;
+ unsigned int cdo_mode_bits;
int retval;
ni_writel(dev, NI_M_CDO_CMD_RESET, NI_M_CDIO_CMD_REG);
@@ -3759,28 +3627,14 @@ static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void handle_cdio_interrupt(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
- unsigned cdio_status;
+ unsigned int cdio_status;
struct comedi_subdevice *s = &dev->subdevices[NI_DIO_SUBDEV];
-#ifdef PCIDMA
unsigned long flags;
-#endif
- if (!devpriv->is_m_series)
- return;
-#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->cdo_mite_chan) {
- unsigned cdo_mite_status =
- mite_get_status(devpriv->cdo_mite_chan);
- if (cdo_mite_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->cdo_mite_chan->channel));
- }
- mite_sync_output_dma(devpriv->cdo_mite_chan, s);
- }
+ if (devpriv->cdo_mite_chan)
+ mite_ack_linkc(devpriv->cdo_mite_chan, s, true);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif
cdio_status = ni_readl(dev, NI_M_CDIO_STATUS_REG);
if (cdio_status & NI_M_CDIO_STATUS_CDO_ERROR) {
@@ -3796,6 +3650,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
}
comedi_handle_events(dev, s);
}
+#endif /* PCIDMA */
static int ni_serial_hw_readwrite8(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -3813,7 +3668,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
status1 = ni_stc_readw(dev, NISTC_STATUS1_REG);
if (status1 & NISTC_STATUS1_SERIO_IN_PROG) {
err = -EBUSY;
- goto Error;
+ goto error;
}
devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_START;
@@ -3829,7 +3684,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
dev_err(dev->class_dev,
"SPI serial I/O didn't finish in time!\n");
err = -ETIME;
- goto Error;
+ goto error;
}
}
@@ -3842,7 +3697,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
if (data_in)
*data_in = ni_stc_readw(dev, NISTC_DIO_SERIAL_IN_REG);
-Error:
+error:
ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
return err;
@@ -3860,16 +3715,20 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
udelay((devpriv->serial_interval_ns + 999) / 1000);
for (mask = 0x80; mask; mask >>= 1) {
- /* Output current bit; note that we cannot touch s->state
- because it is a per-subdevice field, and serial is
- a separate subdevice from DIO. */
+ /*
+ * Output current bit; note that we cannot touch s->state
+ * because it is a per-subdevice field, and serial is
+ * a separate subdevice from DIO.
+ */
devpriv->dio_output &= ~NISTC_DIO_SDOUT;
if (data_out & mask)
devpriv->dio_output |= NISTC_DIO_SDOUT;
ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG);
- /* Assert SDCLK (active low, inverted), wait for half of
- the delay, deassert SDCLK, and wait for the other half. */
+ /*
+ * Assert SDCLK (active low, inverted), wait for half of
+ * the delay, deassert SDCLK, and wait for the other half.
+ */
devpriv->dio_control |= NISTC_DIO_SDCLK;
ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
@@ -3897,7 +3756,7 @@ static int ni_serial_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned clk_fout = devpriv->clock_and_fout;
+ unsigned int clk_fout = devpriv->clock_and_fout;
int err = insn->n;
unsigned char byte_out, byte_in = 0;
@@ -3916,8 +3775,10 @@ static int ni_serial_insn_config(struct comedi_device *dev,
data[1] = SERIAL_DISABLED;
devpriv->serial_interval_ns = data[1];
} else if (data[1] <= SERIAL_600NS) {
- /* Warning: this clock speed is too fast to reliably
- control SCXI. */
+ /*
+ * Warning: this clock speed is too fast to reliably
+ * control SCXI.
+ */
devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_TIMEBASE;
clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE;
clk_fout &= ~NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
@@ -3933,10 +3794,12 @@ static int ni_serial_insn_config(struct comedi_device *dev,
devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_TIMEBASE;
clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE |
NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
- /* Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
- 600ns/1.2us. If you turn divide_by_2 off with the
- slow clock, you will still get 10us, except then
- all your delays are wrong. */
+ /*
+ * Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
+ * 600ns/1.2us. If you turn divide_by_2 off with the
+ * slow clock, you will still get 10us, except then
+ * all your delays are wrong.
+ */
data[1] = SERIAL_10US;
devpriv->serial_interval_ns = data[1];
} else {
@@ -4046,15 +3909,11 @@ static unsigned int ni_gpct_to_stc_register(struct comedi_device *dev,
return regmap->mio_reg;
}
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
+static void ni_gpct_write_register(struct ni_gpct *counter, unsigned int bits,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
- static const unsigned gpct_interrupt_a_enable_mask =
- NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC;
- static const unsigned gpct_interrupt_b_enable_mask =
- NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC;
if (stc_register == 0)
return;
@@ -4082,25 +3941,22 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
/* 16 bit registers */
case NITIO_G0_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
ni_set_bitfield(dev, stc_register,
- gpct_interrupt_a_enable_mask, bits);
+ NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC,
+ bits);
break;
case NITIO_G1_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
ni_set_bitfield(dev, stc_register,
- gpct_interrupt_b_enable_mask, bits);
+ NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC,
+ bits);
break;
- case NITIO_G01_RESET:
- BUG_ON(bits & ~(NISTC_RESET_G0 | NISTC_RESET_G1));
- /* fall-through */
default:
ni_stc_writew(dev, bits, stc_register);
}
}
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
+static unsigned int ni_gpct_read_register(struct ni_gpct *counter,
+ enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
@@ -4227,7 +4083,7 @@ static int ni_m_series_pwm_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
+ unsigned int up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
@@ -4287,7 +4143,7 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
+ unsigned int up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
@@ -4343,13 +4199,13 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
static int pack_mb88341(int addr, int val, int *bitstring)
{
/*
- Fujitsu MB 88341
- Note that address bits are reversed. Thanks to
- Ingo Keen for noticing this.
-
- Note also that the 88341 expects address values from
- 1-12, whereas we use channel numbers 0-11. The NI
- docs use 1-12, also, so be careful here.
+ * Fujitsu MB 88341
+ * Note that address bits are reversed. Thanks to
+ * Ingo Keen for noticing this.
+ *
+ * Note also that the 88341 expects address values from
+ * 1-12, whereas we use channel numbers 0-11. The NI
+ * docs use 1-12, also, so be careful here.
*/
addr++;
*bitstring = ((addr & 0x1) << 11) |
@@ -4495,12 +4351,12 @@ static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
s->n_chan = n_chans;
if (diffbits) {
- unsigned int *maxdata_list;
+ unsigned int *maxdata_list = devpriv->caldac_maxdata_list;
if (n_chans > MAX_N_CALDACS)
dev_err(dev->class_dev,
"BUG! MAX_N_CALDACS too small\n");
- s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list;
+ s->maxdata_list = maxdata_list;
chan = 0;
for (i = 0; i < n_dacs; i++) {
type = board->caldac[i];
@@ -4574,8 +4430,8 @@ static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
return 1;
}
-static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
+static unsigned int ni_old_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
/* pre-m-series boards have fixed signals on pfi pins */
switch (chan) {
@@ -4607,7 +4463,7 @@ static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
}
static int ni_old_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
/* pre-m-series boards have fixed signals on pfi pins */
if (source != ni_old_get_pfi_routing(dev, chan))
@@ -4615,21 +4471,21 @@ static int ni_old_set_pfi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_m_series_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
+static unsigned int ni_m_series_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
- const unsigned array_offset = chan / 3;
+ const unsigned int array_offset = chan / 3;
return NI_M_PFI_OUT_SEL_TO_SRC(chan,
devpriv->pfi_output_select_reg[array_offset]);
}
static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
- unsigned index = chan / 3;
+ unsigned int index = chan / 3;
unsigned short val = devpriv->pfi_output_select_reg[index];
if ((source & 0x1f) != source)
@@ -4643,7 +4499,8 @@ static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
+static unsigned int ni_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
@@ -4652,8 +4509,8 @@ static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
: ni_old_get_pfi_routing(dev, chan);
}
-static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
+static int ni_set_pfi_routing(struct comedi_device *dev,
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
@@ -4663,11 +4520,11 @@ static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
}
static int ni_config_filter(struct comedi_device *dev,
- unsigned pfi_channel,
+ unsigned int pfi_channel,
enum ni_pfi_filter_select filter)
{
struct ni_private *devpriv = dev->private;
- unsigned bits;
+ unsigned int bits;
if (!devpriv->is_m_series)
return -ENOTSUPP;
@@ -4818,9 +4675,12 @@ static int cs5529_ai_insn_read(struct comedi_device *dev,
unsigned int channel_select;
const unsigned int INTERNAL_REF = 0x1000;
- /* Set calibration adc source. Docs lie, reference select bits 8 to 11
+ /*
+ * Set calibration adc source. Docs lie, reference select bits 8 to 11
* do nothing. bit 12 seems to chooses internal reference voltage, bit
- * 13 causes the adc input to go overrange (maybe reads external reference?) */
+ * 13 causes the adc input to go overrange (maybe reads external
+ * reference?)
+ */
if (insn->chanspec & CR_ALT_SOURCE)
channel_select = INTERNAL_REF;
else
@@ -4875,27 +4735,28 @@ static int init_cs5529(struct comedi_device *dev)
* Find best multiplier/divider to try and get the PLL running at 80 MHz
* given an arbitrary frequency input clock.
*/
-static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
- unsigned *freq_divider,
- unsigned *freq_multiplier,
- unsigned *actual_period_ns)
-{
- unsigned div;
- unsigned best_div = 1;
- unsigned mult;
- unsigned best_mult = 1;
- static const unsigned pico_per_nano = 1000;
-
- const unsigned reference_picosec = reference_period_ns * pico_per_nano;
- /* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
- * 20 MHz for most timing clocks */
- static const unsigned target_picosec = 12500;
- static const unsigned fudge_factor_80_to_20Mhz = 4;
+static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns,
+ unsigned int *freq_divider,
+ unsigned int *freq_multiplier,
+ unsigned int *actual_period_ns)
+{
+ unsigned int div;
+ unsigned int best_div = 1;
+ unsigned int mult;
+ unsigned int best_mult = 1;
+ static const unsigned int pico_per_nano = 1000;
+ const unsigned int reference_picosec = reference_period_ns *
+ pico_per_nano;
+ /*
+ * m-series wants the phased-locked loop to output 80MHz, which is
+ * divided by 4 to 20 MHz for most timing clocks
+ */
+ static const unsigned int target_picosec = 12500;
int best_period_picosec = 0;
for (div = 1; div <= NI_M_PLL_MAX_DIVISOR; ++div) {
for (mult = 1; mult <= NI_M_PLL_MAX_MULTIPLIER; ++mult) {
- unsigned new_period_ps =
+ unsigned int new_period_ps =
(reference_picosec * div) / mult;
if (abs(new_period_ps - target_picosec) <
abs(best_period_picosec - target_picosec)) {
@@ -4910,29 +4771,33 @@ static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
*freq_divider = best_div;
*freq_multiplier = best_mult;
- *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec *
- fudge_factor_80_to_20Mhz,
+ /* return the actual period (* fudge factor for 80 to 20 MHz) */
+ *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec * 4,
pico_per_nano);
return 0;
}
static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
+ unsigned int source,
+ unsigned int period_ns)
{
struct ni_private *devpriv = dev->private;
- static const unsigned min_period_ns = 50;
- static const unsigned max_period_ns = 1000;
- static const unsigned timeout = 1000;
- unsigned pll_control_bits;
- unsigned freq_divider;
- unsigned freq_multiplier;
- unsigned rtsi;
- unsigned i;
+ static const unsigned int min_period_ns = 50;
+ static const unsigned int max_period_ns = 1000;
+ static const unsigned int timeout = 1000;
+ unsigned int pll_control_bits;
+ unsigned int freq_divider;
+ unsigned int freq_multiplier;
+ unsigned int rtsi;
+ unsigned int i;
int retval;
if (source == NI_MIO_PLL_PXI10_CLOCK)
period_ns = 100;
- /* these limits are somewhat arbitrary, but NI advertises 1 to 20MHz range so we'll use that */
+ /*
+ * These limits are somewhat arbitrary, but NI advertises 1 to 20MHz
+ * range so we'll use that.
+ */
if (period_ns < min_period_ns || period_ns > max_period_ns) {
dev_err(dev->class_dev,
"%s: you must specify an input clock frequency between %i and %i nanosec for the phased-lock loop\n",
@@ -4982,7 +4847,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
ni_writew(dev, pll_control_bits, NI_M_PLL_CTRL_REG);
devpriv->clock_source = source;
- /* it seems to typically take a few hundred microseconds for PLL to lock */
+ /* it takes a few hundred microseconds for PLL to lock */
for (i = 0; i < timeout; ++i) {
if (ni_readw(dev, NI_M_PLL_STATUS_REG) & NI_M_PLL_STATUS_LOCKED)
break;
@@ -4998,7 +4863,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
}
static int ni_set_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
+ unsigned int source, unsigned int period_ns)
{
struct ni_private *devpriv = dev->private;
@@ -5043,7 +4908,7 @@ static int ni_set_master_clock(struct comedi_device *dev,
}
static int ni_valid_rtsi_output_source(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
@@ -5078,7 +4943,7 @@ static int ni_valid_rtsi_output_source(struct comedi_device *dev,
}
static int ni_set_rtsi_routing(struct comedi_device *dev,
- unsigned chan, unsigned src)
+ unsigned int chan, unsigned int src)
{
struct ni_private *devpriv = dev->private;
@@ -5098,7 +4963,8 @@ static int ni_set_rtsi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_get_rtsi_routing(struct comedi_device *dev, unsigned chan)
+static unsigned int ni_get_rtsi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
@@ -5262,10 +5128,10 @@ static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static irqreturn_t ni_E_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
+ struct comedi_subdevice *s_ai = dev->read_subdev;
+ struct comedi_subdevice *s_ao = dev->write_subdev;
unsigned short a_status;
unsigned short b_status;
- unsigned int ai_mite_status = 0;
- unsigned int ao_mite_status = 0;
unsigned long flags;
#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
@@ -5273,7 +5139,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
if (!dev->attached)
return IRQ_NONE;
- smp_mb(); /* make sure dev->attached is checked before handler does anything else. */
+ smp_mb(); /* make sure dev->attached is checked */
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -5284,34 +5150,33 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
unsigned long flags_too;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
- if (devpriv->ai_mite_chan) {
- ai_mite_status = mite_get_status(devpriv->ai_mite_chan);
- if (ai_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ai_mite_chan->channel));
- }
- if (devpriv->ao_mite_chan) {
- ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
- if (ao_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ao_mite_chan->channel));
- }
+ if (s_ai && devpriv->ai_mite_chan)
+ mite_ack_linkc(devpriv->ai_mite_chan, s_ai, false);
+ if (s_ao && devpriv->ao_mite_chan)
+ mite_ack_linkc(devpriv->ao_mite_chan, s_ao, false);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too);
}
#endif
ack_a_interrupt(dev, a_status);
ack_b_interrupt(dev, b_status);
- if ((a_status & NISTC_AI_STATUS1_INTA) || (ai_mite_status & CHSR_INT))
- handle_a_interrupt(dev, a_status, ai_mite_status);
- if ((b_status & NISTC_AO_STATUS1_INTB) || (ao_mite_status & CHSR_INT))
- handle_b_interrupt(dev, b_status, ao_mite_status);
+ if (s_ai) {
+ if (a_status & NISTC_AI_STATUS1_INTA)
+ handle_a_interrupt(dev, s_ai, a_status);
+ /* handle any interrupt or dma events */
+ comedi_handle_events(dev, s_ai);
+ }
+ if (s_ao) {
+ if (b_status & NISTC_AO_STATUS1_INTB)
+ handle_b_interrupt(dev, s_ao, b_status);
+ /* handle any interrupt or dma events */
+ comedi_handle_events(dev, s_ao);
+ }
handle_gpct_interrupt(dev, 0);
handle_gpct_interrupt(dev, 1);
- handle_cdio_interrupt(dev);
+#ifdef PCIDMA
+ if (devpriv->is_m_series)
+ handle_cdio_interrupt(dev);
+#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
return IRQ_HANDLED;
@@ -5333,7 +5198,7 @@ static int ni_alloc_private(struct comedi_device *dev)
}
static int ni_E_init(struct comedi_device *dev,
- unsigned interrupt_pin, unsigned irq_polarity)
+ unsigned int interrupt_pin, unsigned int irq_polarity)
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
@@ -5450,6 +5315,7 @@ static int ni_E_init(struct comedi_device *dev,
s->maxdata = 1;
s->range_table = &range_digital;
if (devpriv->is_m_series) {
+#ifdef PCIDMA
s->subdev_flags |= SDF_LSAMPL;
s->insn_bits = ni_m_series_dio_insn_bits;
s->insn_config = ni_m_series_dio_insn_config;
@@ -5469,6 +5335,7 @@ static int ni_E_init(struct comedi_device *dev,
NI_M_CDI_CMD_RESET,
NI_M_CDIO_CMD_REG);
ni_writel(dev, s->io_bits, NI_M_DIO_DIR_REG);
+#endif /* PCIDMA */
} else {
s->insn_bits = ni_dio_insn_bits;
s->insn_config = ni_dio_insn_config;
@@ -5675,8 +5542,6 @@ static void mio_common_detach(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
- if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
- }
+ if (devpriv)
+ ni_gpct_device_destroy(devpriv->counter_dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 4a5aee058..ed04dea91 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -282,12 +282,12 @@ static const struct nidio_board nidio_boards[] = {
};
struct nidio96_private {
- struct mite_struct *mite;
+ struct mite *mite;
int boardtype;
int dio;
unsigned short OpModeBits;
struct mite_channel *di_mite_chan;
- struct mite_dma_descriptor_ring *di_mite_ring;
+ struct mite_ring *di_mite_ring;
spinlock_t mite_channel_lock;
};
@@ -322,8 +322,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->di_mite_chan) {
- mite_dma_disarm(devpriv->di_mite_chan);
- mite_dma_reset(devpriv->di_mite_chan);
mite_release_channel(devpriv->di_mite_chan);
devpriv->di_mite_chan = NULL;
writeb(primary_DMAChannel_bits(0) |
@@ -368,7 +366,7 @@ static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&dev->spinlock, irq_flags);
spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
- mite_sync_input_dma(devpriv->di_mite_chan, s);
+ mite_sync_dma(devpriv->di_mite_chan, s);
spin_unlock(&devpriv->mite_channel_lock);
count = comedi_buf_n_bytes_ready(s);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
@@ -381,12 +379,10 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
struct nidio96_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
- struct mite_struct *mite = devpriv->mite;
unsigned int auxdata;
int flags;
int status;
int work = 0;
- unsigned int m_status = 0;
/* interrupcions parasites */
if (!dev->attached) {
@@ -401,24 +397,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
flags = readb(dev->mmio + Group_1_Flags);
spin_lock(&devpriv->mite_channel_lock);
- if (devpriv->di_mite_chan)
- m_status = mite_get_status(devpriv->di_mite_chan);
-
- if (m_status & CHSR_INT) {
- if (m_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- mite->mite_io_addr +
- MITE_CHOR(devpriv->di_mite_chan->channel));
- mite_sync_input_dma(devpriv->di_mite_chan, s);
- /* XXX need to byteswap */
- }
- if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY |
- CHSR_DRQ1 | CHSR_MRDY)) {
- dev_dbg(dev->class_dev,
- "unknown mite interrupt, disabling IRQ\n");
- async->events |= COMEDI_CB_ERROR;
- disable_irq(dev->irq);
- }
+ if (devpriv->di_mite_chan) {
+ mite_ack_linkc(devpriv->di_mite_chan, s, false);
+ /* XXX need to byteswap sync'ed dma */
}
spin_unlock(&devpriv->mite_channel_lock);
@@ -914,14 +895,10 @@ static int nidio_auto_attach(struct comedi_device *dev,
spin_lock_init(&devpriv->mite_channel_lock);
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, false); /* use win0 */
if (!devpriv->mite)
return -ENOMEM;
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
devpriv->di_mite_ring = mite_alloc_ring(devpriv->mite);
if (!devpriv->di_mite_ring)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 231e37d6b..344aa343e 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1061,6 +1061,8 @@ static int pcimio_dio_change(struct comedi_device *dev,
static void m_series_init_eeprom_buffer(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite *mite = devpriv->mite;
+ resource_size_t daq_phys_addr;
static const int Start_Cal_EEPROM = 0x400;
static const unsigned window_size = 10;
static const int serial_number_eeprom_offset = 0x4;
@@ -1070,15 +1072,17 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
unsigned old_iodwcr1_bits;
int i;
- old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
- old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
- devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0x1 | old_iodwcr1_bits,
- devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0xf, devpriv->mite->mite_io_addr + 0x30);
+ /* IO Window 1 needs to be temporarily mapped to read the eeprom */
+ daq_phys_addr = pci_resource_start(mite->pcidev, 1);
+
+ old_iodwbsr_bits = readl(mite->mmio + MITE_IODWBSR);
+ old_iodwbsr1_bits = readl(mite->mmio + MITE_IODWBSR_1);
+ old_iodwcr1_bits = readl(mite->mmio + MITE_IODWCR_1);
+ writel(0x0, mite->mmio + MITE_IODWBSR);
+ writel(((0x80 | window_size) | daq_phys_addr),
+ mite->mmio + MITE_IODWBSR_1);
+ writel(0x1 | old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
+ writel(0xf, mite->mmio + 0x30);
BUG_ON(serial_number_eeprom_length > sizeof(devpriv->serial_number));
for (i = 0; i < serial_number_eeprom_length; ++i) {
@@ -1090,10 +1094,10 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i);
- writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + 0x30);
+ writel(old_iodwbsr1_bits, mite->mmio + MITE_IODWBSR_1);
+ writel(old_iodwbsr_bits, mite->mmio + MITE_IODWBSR);
+ writel(old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
+ writel(0x0, mite->mmio + 0x30);
}
static void init_6143(struct comedi_device *dev)
@@ -1168,7 +1172,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
return ret;
devpriv = dev->private;
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, false); /* use win0 */
if (!devpriv->mite)
return -ENOMEM;
@@ -1193,10 +1197,6 @@ static int pcimio_auto_attach(struct comedi_device *dev,
if (board->reg_type == ni_reg_6713)
devpriv->is_6713 = 1;
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
devpriv->ai_mite_ring = mite_alloc_ring(devpriv->mite);
if (!devpriv->ai_mite_ring)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1d5af25b9..1966519cb 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1,24 +1,23 @@
/*
- module/ni_stc.h
- Register descriptions for NI DAQ-STC chip
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Register descriptions for NI DAQ-STC chip
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- References:
- DAQ-STC Technical Reference Manual
+ * References:
+ * DAQ-STC Technical Reference Manual
*/
#ifndef _COMEDI_NI_STC_H
@@ -958,7 +957,7 @@ struct ni_board_struct {
unsigned int ao_maxdata;
int ao_fifo_depth;
const struct comedi_lrange *ao_range_table;
- unsigned ao_speed;
+ unsigned int ao_speed;
int reg_type;
unsigned int has_8255:1;
@@ -1002,12 +1001,11 @@ struct ni_private {
unsigned short ao_mode3;
unsigned short ao_cmd1;
unsigned short ao_cmd2;
- unsigned short ao_trigger_select;
struct ni_gpct_device *counter_dev;
unsigned short an_trig_etc_reg;
- unsigned ai_offset[512];
+ unsigned int ai_offset[512];
unsigned long serial_interval_ns;
unsigned char serial_hw_mode;
@@ -1025,24 +1023,24 @@ struct ni_private {
unsigned short g0_g1_select_reg;
unsigned short cdio_dma_select_reg;
- unsigned clock_ns;
- unsigned clock_source;
+ unsigned int clock_ns;
+ unsigned int clock_source;
unsigned short pwm_up_count;
unsigned short pwm_down_count;
unsigned short ai_fifo_buffer[0x2000];
- uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE];
+ u8 eeprom_buffer[M_SERIES_EEPROM_SIZE];
__be32 serial_number;
- struct mite_struct *mite;
+ struct mite *mite;
struct mite_channel *ai_mite_chan;
struct mite_channel *ao_mite_chan;
struct mite_channel *cdo_mite_chan;
- struct mite_dma_descriptor_ring *ai_mite_ring;
- struct mite_dma_descriptor_ring *ao_mite_ring;
- struct mite_dma_descriptor_ring *cdo_mite_ring;
- struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT];
+ struct mite_ring *ai_mite_ring;
+ struct mite_ring *ao_mite_ring;
+ struct mite_ring *cdo_mite_ring;
+ struct mite_ring *gpct_mite_ring[NUM_GPCT];
/* ni_pcimio board type flags (based on the boardinfo reg_type) */
unsigned int is_m_series:1;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index b74e44ec5..7043eb054 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -1,19 +1,18 @@
/*
- comedi/drivers/ni_tio.c
- Support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Support for NI general purpose counters
+ *
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Module: ni_tio
@@ -36,13 +35,10 @@
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
* 340934b.pdf DAQ-STC reference manual
+ *
+ * TODO: Support use of both banks X and Y
*/
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
#include <linux/module.h>
#include <linux/slab.h>
@@ -115,20 +111,7 @@ TODO:
#define NI_660X_LOGIC_LOW_GATE2_SEL 0x1f
#define NI_660X_MAX_UP_DOWN_PIN 7
-static inline unsigned GI_ALT_SYNC(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_ALT_SYNC;
- case ni_gpct_variant_660x:
- return GI_660X_ALT_SYNC;
- }
-}
-
-static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
+static inline unsigned int GI_PRESCALE_X2(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
@@ -141,7 +124,7 @@ static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
}
}
-static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
+static inline unsigned int GI_PRESCALE_X8(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
@@ -154,19 +137,6 @@ static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
}
}
-static inline unsigned GI_HW_ARM_SEL_MASK(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_HW_ARM_SEL_MASK;
- case ni_gpct_variant_660x:
- return GI_660X_HW_ARM_SEL_MASK;
- }
-}
-
static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
{
switch (counter_dev->variant) {
@@ -179,17 +149,45 @@ static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
}
}
+/**
+ * ni_tio_write() - Write a TIO register using the driver provided callback.
+ * @counter: struct ni_gpct counter.
+ * @value: the value to write
+ * @reg: the register to write.
+ */
+void ni_tio_write(struct ni_gpct *counter, unsigned int value,
+ enum ni_gpct_register reg)
+{
+ if (reg < NITIO_NUM_REGS)
+ counter->counter_dev->write(counter, value, reg);
+}
+EXPORT_SYMBOL_GPL(ni_tio_write);
+
+/**
+ * ni_tio_read() - Read a TIO register using the driver provided callback.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to read.
+ */
+unsigned int ni_tio_read(struct ni_gpct *counter, enum ni_gpct_register reg)
+{
+ if (reg < NITIO_NUM_REGS)
+ return counter->counter_dev->read(counter, reg);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ni_tio_read);
+
static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
- write_register(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
+ ni_tio_write(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
}
-static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
- unsigned generic_clock_source)
+static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
+ unsigned int generic_clock_source,
+ u64 *period_ps)
{
- uint64_t clock_period_ps;
+ u64 clock_period_ps;
switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -222,19 +220,80 @@ static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
clock_period_ps *= 8;
break;
default:
- BUG();
- break;
+ return -EINVAL;
}
- return clock_period_ps;
+ *period_ps = clock_period_ps;
+ return 0;
}
-static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+static void ni_tio_set_bits_transient(struct ni_gpct *counter,
+ enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value,
+ unsigned int transient)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_bits =
+ unsigned long flags;
+
+ if (reg < NITIO_NUM_REGS) {
+ spin_lock_irqsave(&counter_dev->regs_lock, flags);
+ counter_dev->regs[reg] &= ~mask;
+ counter_dev->regs[reg] |= (value & mask);
+ ni_tio_write(counter, counter_dev->regs[reg] | transient, reg);
+ mmiowb();
+ spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
+ }
+}
+
+/**
+ * ni_tio_set_bits() - Safely write a counter register.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to write.
+ * @mask: the bits to change.
+ * @value: the new bits value.
+ *
+ * Used to write to, and update the software copy, a register whose bits may
+ * be twiddled in interrupt context, or whose software copy may be read in
+ * interrupt context.
+ */
+void ni_tio_set_bits(struct ni_gpct *counter, enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value)
+{
+ ni_tio_set_bits_transient(counter, reg, mask, value, 0x0);
+}
+EXPORT_SYMBOL_GPL(ni_tio_set_bits);
+
+/**
+ * ni_tio_get_soft_copy() - Safely read the software copy of a counter register.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to read.
+ *
+ * Used to get the software copy of a register whose bits might be modified
+ * in interrupt context, or whose software copy might need to be read in
+ * interrupt context.
+ */
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
+ enum ni_gpct_register reg)
+{
+ struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int value = 0;
+ unsigned long flags;
+
+ if (reg < NITIO_NUM_REGS) {
+ spin_lock_irqsave(&counter_dev->regs_lock, flags);
+ value = counter_dev->regs[reg];
+ spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
+ }
+ return value;
+}
+EXPORT_SYMBOL_GPL(ni_tio_get_soft_copy);
+
+static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+{
+ struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int cidx = counter->counter_index;
+ unsigned int counting_mode_bits =
ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
- unsigned bits = 0;
+ unsigned int bits = 0;
if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
GI_SRC_POL_INVERT)
@@ -246,14 +305,15 @@ static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
return bits;
}
-static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
+static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
- unsigned clock_source = 0;
- unsigned src;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
+ unsigned int clock_source = 0;
+ unsigned int src;
+ unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
@@ -304,19 +364,20 @@ static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
- BUG();
- break;
+ return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
+ *clk_src = clock_source;
+ return 0;
}
-static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
+static int ni_660x_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
- unsigned clock_source = 0;
- unsigned cidx = counter->counter_index;
- unsigned src;
- unsigned i;
+ unsigned int clock_source = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int src;
+ unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
@@ -361,78 +422,88 @@ static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
- BUG();
- break;
+ return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
+ *clk_src = clock_source;
+ return 0;
}
-static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter)
+static int ni_tio_generic_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
switch (counter->counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- return ni_m_series_clock_src_select(counter);
+ return ni_m_series_clock_src_select(counter, clk_src);
case ni_gpct_variant_660x:
- return ni_660x_clock_src_select(counter);
+ return ni_660x_clock_src_select(counter, clk_src);
}
}
-static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
+static void ni_tio_set_sync_mode(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx);
- static const uint64_t min_normal_sync_period_ps = 25000;
- unsigned mode;
- uint64_t clock_period_ps;
-
- if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
+ unsigned int cidx = counter->counter_index;
+ static const u64 min_normal_sync_period_ps = 25000;
+ unsigned int mask = 0;
+ unsigned int bits = 0;
+ unsigned int reg;
+ unsigned int mode;
+ unsigned int clk_src;
+ u64 ps;
+ bool force_alt_sync;
+
+ /* only m series and 660x variants have counting mode registers */
+ switch (counter_dev->variant) {
+ case ni_gpct_variant_e_series:
+ default:
return;
+ case ni_gpct_variant_m_series:
+ mask = GI_M_ALT_SYNC;
+ break;
+ case ni_gpct_variant_660x:
+ mask = GI_660X_ALT_SYNC;
+ break;
+ }
- mode = ni_tio_get_soft_copy(counter, counting_mode_reg);
+ reg = NITIO_CNT_MODE_REG(cidx);
+ mode = ni_tio_get_soft_copy(counter, reg);
switch (mode & GI_CNT_MODE_MASK) {
case GI_CNT_MODE_QUADX1:
case GI_CNT_MODE_QUADX2:
case GI_CNT_MODE_QUADX4:
case GI_CNT_MODE_SYNC_SRC:
- force_alt_sync = 1;
+ force_alt_sync = true;
break;
default:
+ force_alt_sync = false;
break;
}
- clock_period_ps = ni_tio_clock_period_ps(counter,
- ni_tio_generic_clock_src_select(counter));
+ ni_tio_generic_clock_src_select(counter, &clk_src);
+ ni_tio_clock_period_ps(counter, clk_src, &ps);
/*
* It's not clear what we should do if clock_period is unknown, so we
- * are not using the alt sync bit in that case, but allow the caller
- * to decide by using the force_alt_sync parameter.
+ * are not using the alt sync bit in that case.
*/
- if (force_alt_sync ||
- (clock_period_ps && clock_period_ps < min_normal_sync_period_ps)) {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- GI_ALT_SYNC(counter_dev->variant));
- } else {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- 0x0);
- }
+ if (force_alt_sync || (ps && ps < min_normal_sync_period_ps))
+ bits = mask;
+
+ ni_tio_set_bits(counter, reg, mask, bits);
}
-static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
+static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode_reg_mask;
- unsigned mode_reg_values;
- unsigned input_select_bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mode_reg_mask;
+ unsigned int mode_reg_values;
+ unsigned int input_select_bits = 0;
/* these bits map directly on to the mode register */
- static const unsigned mode_reg_direct_mask =
+ static const unsigned int mode_reg_direct_mask =
NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
@@ -458,7 +529,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
mode_reg_mask, mode_reg_values);
if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
+ unsigned int bits = 0;
bits |= GI_CNT_MODE(mode >> NI_GPCT_COUNTING_MODE_SHIFT);
bits |= GI_INDEX_PHASE((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT));
@@ -467,7 +538,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
GI_CNT_MODE_MASK | GI_INDEX_PHASE_MASK |
GI_INDEX_MODE, bits);
- ni_tio_set_sync_mode(counter, 0);
+ ni_tio_set_sync_mode(counter);
}
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_CNT_DIR_MASK,
@@ -484,65 +555,68 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
return 0;
}
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
+int ni_tio_arm(struct ni_gpct *counter, bool arm, unsigned int start_trigger)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned command_transient_bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int transient_bits = 0;
if (arm) {
+ unsigned int mask = 0;
+ unsigned int bits = 0;
+
+ /* only m series and 660x have counting mode registers */
+ switch (counter_dev->variant) {
+ case ni_gpct_variant_e_series:
+ default:
+ break;
+ case ni_gpct_variant_m_series:
+ mask = GI_M_HW_ARM_SEL_MASK;
+ break;
+ case ni_gpct_variant_660x:
+ mask = GI_660X_HW_ARM_SEL_MASK;
+ break;
+ }
+
switch (start_trigger) {
case NI_GPCT_ARM_IMMEDIATE:
- command_transient_bits |= GI_ARM;
+ transient_bits |= GI_ARM;
break;
case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- command_transient_bits |= GI_ARM | GI_ARM_COPY;
+ transient_bits |= GI_ARM | GI_ARM_COPY;
break;
default:
+ /*
+ * for m series and 660x, pass-through the least
+ * significant bits so we can figure out what select
+ * later
+ */
+ if (mask && (start_trigger & NI_GPCT_ARM_UNKNOWN)) {
+ bits |= GI_HW_ARM_ENA |
+ (GI_HW_ARM_SEL(start_trigger) & mask);
+ } else {
+ return -EINVAL;
+ }
break;
}
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
- unsigned sel_mask;
- sel_mask = GI_HW_ARM_SEL_MASK(counter_dev->variant);
-
- switch (start_trigger) {
- case NI_GPCT_ARM_IMMEDIATE:
- case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- break;
- default:
- if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
- /*
- * pass-through the least significant
- * bits so we can figure out what
- * select later
- */
- bits |= GI_HW_ARM_ENA |
- (GI_HW_ARM_SEL(start_trigger) &
- sel_mask);
- } else {
- return -EINVAL;
- }
- break;
- }
+ if (mask)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
- GI_HW_ARM_ENA | sel_mask, bits);
- }
+ GI_HW_ARM_ENA | mask, bits);
} else {
- command_transient_bits |= GI_DISARM;
+ transient_bits |= GI_DISARM;
}
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
- 0, 0, command_transient_bits);
+ 0, 0, transient_bits);
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_arm);
-static unsigned ni_660x_clk_src(unsigned int clock_source)
+static int ni_660x_clk_src(unsigned int clock_source, unsigned int *bits)
{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_660x_clock;
- unsigned i;
+ unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+ unsigned int ni_660x_clock;
+ unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -583,18 +657,17 @@ static unsigned ni_660x_clk_src(unsigned int clock_source)
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
- ni_660x_clock = 0;
- BUG();
- break;
+ return -EINVAL;
}
- return GI_SRC_SEL(ni_660x_clock);
+ *bits = GI_SRC_SEL(ni_660x_clock);
+ return 0;
}
-static unsigned ni_m_clk_src(unsigned int clock_source)
+static int ni_m_clk_src(unsigned int clock_source, unsigned int *bits)
{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_m_series_clock;
- unsigned i;
+ unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+ unsigned int ni_m_series_clock;
+ unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -641,21 +714,18 @@ static unsigned ni_m_clk_src(unsigned int clock_source)
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
- pr_err("invalid clock source 0x%lx\n",
- (unsigned long)clock_source);
- BUG();
- ni_m_series_clock = 0;
- break;
+ return -EINVAL;
}
- return GI_SRC_SEL(ni_m_series_clock);
+ *bits = GI_SRC_SEL(ni_m_series_clock);
+ return 0;
};
static void ni_tio_set_source_subselect(struct ni_gpct *counter,
unsigned int clock_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
+ unsigned int cidx = counter->counter_index;
+ unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
if (counter_dev->variant != ni_gpct_variant_m_series)
return;
@@ -674,8 +744,8 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
default:
return;
}
- write_register(counter, counter_dev->regs[second_gate_reg],
- second_gate_reg);
+ ni_tio_write(counter, counter_dev->regs[second_gate_reg],
+ second_gate_reg);
}
static int ni_tio_set_clock_src(struct ni_gpct *counter,
@@ -683,20 +753,28 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
unsigned int period_ns)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int bits = 0;
+ int ret;
- /* FIXME: validate clock source */
switch (counter_dev->variant) {
case ni_gpct_variant_660x:
- bits |= ni_660x_clk_src(clock_source);
+ ret = ni_660x_clk_src(clock_source, &bits);
break;
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- bits |= ni_m_clk_src(clock_source);
+ ret = ni_m_clk_src(clock_source, &bits);
break;
}
+ if (ret) {
+ struct comedi_device *dev = counter_dev->dev;
+
+ dev_err(dev->class_dev, "invalid clock source 0x%x\n",
+ clock_source);
+ return ret;
+ }
+
if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
bits |= GI_SRC_POL_INVERT;
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
@@ -722,28 +800,34 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
GI_PRESCALE_X8(counter_dev->variant), bits);
}
counter->clock_period_ps = period_ns * 1000;
- ni_tio_set_sync_mode(counter, 0);
+ ni_tio_set_sync_mode(counter);
return 0;
}
-static void ni_tio_get_clock_src(struct ni_gpct *counter,
- unsigned int *clock_source,
- unsigned int *period_ns)
+static int ni_tio_get_clock_src(struct ni_gpct *counter,
+ unsigned int *clock_source,
+ unsigned int *period_ns)
{
- uint64_t temp64;
-
- *clock_source = ni_tio_generic_clock_src_select(counter);
- temp64 = ni_tio_clock_period_ps(counter, *clock_source);
+ u64 temp64;
+ int ret;
+
+ ret = ni_tio_generic_clock_src_select(counter, clock_source);
+ if (ret)
+ return ret;
+ ret = ni_tio_clock_period_ps(counter, *clock_source, &temp64);
+ if (ret)
+ return ret;
do_div(temp64, 1000); /* ps to ns */
*period_ns = temp64;
+ return 0;
}
static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int gate_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
@@ -782,9 +866,9 @@ static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int gate_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
@@ -824,11 +908,11 @@ static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
- unsigned i;
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int gate2_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
@@ -863,17 +947,17 @@ static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
return 0;
}
static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int gate2_sel;
/*
* FIXME: We don't know what the m-series second gate codes are,
@@ -887,20 +971,20 @@ static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
return 0;
}
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source)
+int ni_tio_set_gate_src(struct ni_gpct *counter,
+ unsigned int gate, unsigned int src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned mode = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int chan = CR_CHAN(src);
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int mode = 0;
- switch (gate_index) {
+ switch (gate) {
case 0:
if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
@@ -908,9 +992,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
GI_GATING_DISABLED);
return 0;
}
- if (gate_source & CR_INVERT)
+ if (src & CR_INVERT)
mode |= GI_GATE_POL_INVERT;
- if (gate_source & CR_EDGE)
+ if (src & CR_EDGE)
mode |= GI_RISING_EDGE_GATING;
else
mode |= GI_LEVEL_GATING;
@@ -921,9 +1005,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- return ni_m_set_gate(counter, gate_source);
+ return ni_m_set_gate(counter, src);
case ni_gpct_variant_660x:
- return ni_660x_set_gate(counter, gate_source);
+ return ni_660x_set_gate(counter, src);
}
break;
case 1:
@@ -932,22 +1016,21 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
counter_dev->regs[gate2_reg] &= ~GI_GATE2_MODE;
- write_register(counter, counter_dev->regs[gate2_reg],
- gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg],
+ gate2_reg);
return 0;
}
- if (gate_source & CR_INVERT)
+ if (src & CR_INVERT)
counter_dev->regs[gate2_reg] |= GI_GATE2_POL_INVERT;
else
counter_dev->regs[gate2_reg] &= ~GI_GATE2_POL_INVERT;
switch (counter_dev->variant) {
case ni_gpct_variant_m_series:
- return ni_m_set_gate2(counter, gate_source);
+ return ni_m_set_gate2(counter, src);
case ni_gpct_variant_660x:
- return ni_660x_set_gate2(counter, gate_source);
+ return ni_660x_set_gate2(counter, src);
default:
- BUG();
- break;
+ return -EINVAL;
}
break;
default:
@@ -957,11 +1040,11 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
}
EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
-static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
+static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index,
unsigned int source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int abz_reg, shift, mask;
if (counter_dev->variant != ni_gpct_variant_m_series)
@@ -987,175 +1070,221 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
counter_dev->regs[abz_reg] &= ~mask;
counter_dev->regs[abz_reg] |= (source << shift) & mask;
- write_register(counter, counter_dev->regs[abz_reg], abz_reg);
+ ni_tio_write(counter, counter_dev->regs[abz_reg], abz_reg);
return 0;
}
-static unsigned ni_660x_gate_to_generic_gate(unsigned gate)
+static int ni_660x_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_GATE_PIN_I_GATE_SEL:
- return NI_GPCT_GATE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_GATE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_660X_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_660X_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_660X_RTSI_GATE_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_660X_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
- if (gate == NI_660X_PIN_GATE_SEL(i))
- return NI_GPCT_GATE_PIN_GATE_SELECT(i);
+ if (gate == NI_660X_PIN_GATE_SEL(i)) {
+ source = NI_GPCT_GATE_PIN_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_660X_MAX_GATE_PIN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_m_gate_to_generic_gate(unsigned gate)
+static int ni_m_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_M_TIMESTAMP_MUX_GATE_SEL:
- return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+ source = NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+ break;
case NI_M_AI_START2_GATE_SEL:
- return NI_GPCT_AI_START2_GATE_SELECT;
+ source = NI_GPCT_AI_START2_GATE_SELECT;
+ break;
case NI_M_PXI_STAR_TRIGGER_GATE_SEL:
- return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+ source = NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+ break;
case NI_M_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_M_AI_START1_GATE_SEL:
- return NI_GPCT_AI_START1_GATE_SELECT;
+ source = NI_GPCT_AI_START1_GATE_SELECT;
+ break;
case NI_M_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_M_ANALOG_TRIG_OUT_GATE_SEL:
- return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+ source = NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+ break;
case NI_M_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_M_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_M_RTSI_GATE_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_M_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
- if (gate == NI_M_PFI_GATE_SEL(i))
- return NI_GPCT_PFI_GATE_SELECT(i);
+ if (gate == NI_M_PFI_GATE_SEL(i)) {
+ source = NI_GPCT_PFI_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_M_MAX_PFI_CHAN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_660x_gate2_to_generic_gate(unsigned gate)
+static int ni_660x_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE2_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_UD_PIN_I_GATE2_SEL:
- return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+ source = NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+ break;
case NI_660X_NEXT_SRC_GATE2_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_660X_NEXT_OUT_GATE2_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_660X_SELECTED_GATE2_SEL:
- return NI_GPCT_SELECTED_GATE_GATE_SELECT;
+ source = NI_GPCT_SELECTED_GATE_GATE_SELECT;
+ break;
case NI_660X_LOGIC_LOW_GATE2_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE2_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_660X_RTSI_GATE2_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_660X_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
- if (gate == NI_660X_UD_PIN_GATE2_SEL(i))
- return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+ if (gate == NI_660X_UD_PIN_GATE2_SEL(i)) {
+ source = NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_660X_MAX_UP_DOWN_PIN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_m_gate2_to_generic_gate(unsigned gate)
+static int ni_m_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
/*
* FIXME: the second gate sources for the m series are undocumented,
* so we just return the raw bits for now.
*/
- switch (gate) {
- default:
- return gate;
- }
+ *src = gate;
return 0;
};
-static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
+static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned int gate_index,
unsigned int *gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mode;
+ unsigned int reg;
+ unsigned int gate;
+ int ret;
+
+ mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
+ if (((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) ||
+ (gate_index == 1 &&
+ !(counter_dev->regs[NITIO_GATE2_REG(cidx)] & GI_GATE2_MODE))) {
+ *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+ return 0;
+ }
switch (gate_index) {
case 0:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter,
- NITIO_INPUT_SEL_REG(cidx)));
+ reg = NITIO_INPUT_SEL_REG(cidx);
+ gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter, reg));
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- *gate_source = ni_m_gate_to_generic_gate(gate);
+ ret = ni_m_gate_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate_to_generic_gate(gate);
+ ret = ni_660x_gate_to_generic_gate(gate, gate_source);
break;
}
+ if (ret)
+ return ret;
if (mode & GI_GATE_POL_INVERT)
*gate_source |= CR_INVERT;
if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
*gate_source |= CR_EDGE;
break;
case 1:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED ||
- !(counter_dev->regs[gate2_reg] & GI_GATE2_MODE)) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE2(counter_dev->regs[gate2_reg]);
+ reg = NITIO_GATE2_REG(cidx);
+ gate = GI_BITS_TO_GATE2(counter_dev->regs[reg]);
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- *gate_source = ni_m_gate2_to_generic_gate(gate);
+ ret = ni_m_gate2_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate2_to_generic_gate(gate);
+ ret = ni_660x_gate2_to_generic_gate(gate, gate_source);
break;
}
- if (counter_dev->regs[gate2_reg] & GI_GATE2_POL_INVERT)
+ if (ret)
+ return ret;
+ if (counter_dev->regs[reg] & GI_GATE2_POL_INVERT)
*gate_source |= CR_INVERT;
/* second gate can't have edge/level mode set independently */
if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
@@ -1173,45 +1302,52 @@ int ni_tio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
- unsigned status;
+ unsigned int cidx = counter->counter_index;
+ unsigned int status;
+ int ret = 0;
switch (data[0]) {
case INSN_CONFIG_SET_COUNTER_MODE:
- return ni_tio_set_counter_mode(counter, data[1]);
+ ret = ni_tio_set_counter_mode(counter, data[1]);
+ break;
case INSN_CONFIG_ARM:
- return ni_tio_arm(counter, 1, data[1]);
+ ret = ni_tio_arm(counter, true, data[1]);
+ break;
case INSN_CONFIG_DISARM:
- ni_tio_arm(counter, 0, 0);
- return 0;
+ ret = ni_tio_arm(counter, false, 0);
+ break;
case INSN_CONFIG_GET_COUNTER_STATUS:
data[1] = 0;
- status = read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
+ status = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
if (status & GI_ARMED(cidx)) {
data[1] |= COMEDI_COUNTER_ARMED;
if (status & GI_COUNTING(cidx))
data[1] |= COMEDI_COUNTER_COUNTING;
}
data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
- return 0;
+ break;
case INSN_CONFIG_SET_CLOCK_SRC:
- return ni_tio_set_clock_src(counter, data[1], data[2]);
+ ret = ni_tio_set_clock_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_GET_CLOCK_SRC:
- ni_tio_get_clock_src(counter, &data[1], &data[2]);
- return 0;
+ ret = ni_tio_get_clock_src(counter, &data[1], &data[2]);
+ break;
case INSN_CONFIG_SET_GATE_SRC:
- return ni_tio_set_gate_src(counter, data[1], data[2]);
+ ret = ni_tio_set_gate_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_GET_GATE_SRC:
- return ni_tio_get_gate_src(counter, data[1], &data[2]);
+ ret = ni_tio_get_gate_src(counter, data[1], &data[2]);
+ break;
case INSN_CONFIG_SET_OTHER_SRC:
- return ni_tio_set_other_src(counter, data[1], data[2]);
+ ret = ni_tio_set_other_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_RESET:
ni_tio_reset_count_and_disarm(counter);
- return 0;
- default:
break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+ return ret ? ret : insn->n;
}
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
@@ -1219,7 +1355,7 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int val;
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
@@ -1235,9 +1371,9 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
* will be correct since the count value will definitely have latched
* by then.
*/
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
- if (val != read_register(counter, NITIO_SW_SAVE_REG(cidx)))
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
+ val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
+ if (val != ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx)))
+ val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
return val;
}
@@ -1250,7 +1386,7 @@ int ni_tio_insn_read(struct comedi_device *dev,
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
int i;
for (i = 0; i < insn->n; i++) {
@@ -1270,11 +1406,10 @@ int ni_tio_insn_read(struct comedi_device *dev,
}
EXPORT_SYMBOL_GPL(ni_tio_insn_read);
-static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
+static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
- const unsigned bits =
- read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
+ unsigned int cidx = counter->counter_index;
+ unsigned int bits = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
return (bits & GI_NEXT_LOAD_SRC(cidx))
? NITIO_LOADB_REG(cidx)
@@ -1288,9 +1423,9 @@ int ni_tio_insn_write(struct comedi_device *dev,
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
- unsigned load_reg;
+ unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned int cidx = counter->counter_index;
+ unsigned int load_reg;
if (insn->n < 1)
return 0;
@@ -1306,19 +1441,19 @@ int ni_tio_insn_write(struct comedi_device *dev,
* load register is already selected.
*/
load_reg = ni_tio_next_load_register(counter);
- write_register(counter, data[0], load_reg);
+ ni_tio_write(counter, data[0], load_reg);
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, GI_LOAD);
/* restore load reg */
- write_register(counter, counter_dev->regs[load_reg], load_reg);
+ ni_tio_write(counter, counter_dev->regs[load_reg], load_reg);
break;
case 1:
counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADA_REG(cidx));
+ ni_tio_write(counter, data[0], NITIO_LOADA_REG(cidx));
break;
case 2:
counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADB_REG(cidx));
+ ni_tio_write(counter, data[0], NITIO_LOADB_REG(cidx));
break;
default:
return -EINVAL;
@@ -1330,13 +1465,13 @@ EXPORT_SYMBOL_GPL(ni_tio_insn_write);
void ni_tio_init_counter(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
ni_tio_reset_count_and_disarm(counter);
/* initialize counter registers */
counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
~0, GI_SYNC_GATE);
@@ -1344,10 +1479,10 @@ void ni_tio_init_counter(struct ni_gpct *counter)
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADA_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_LOADA_REG(cidx));
counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADB_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_LOADB_REG(cidx));
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
@@ -1356,7 +1491,7 @@ void ni_tio_init_counter(struct ni_gpct *counter)
if (ni_tio_has_gate2_registers(counter_dev)) {
counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_GATE2_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_GATE2_REG(cidx));
}
ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
@@ -1367,17 +1502,17 @@ EXPORT_SYMBOL_GPL(ni_tio_init_counter);
struct ni_gpct_device *
ni_gpct_device_construct(struct comedi_device *dev,
- void (*write_register)(struct ni_gpct *counter,
- unsigned bits,
- enum ni_gpct_register reg),
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg),
+ void (*write)(struct ni_gpct *counter,
+ unsigned int value,
+ enum ni_gpct_register reg),
+ unsigned int (*read)(struct ni_gpct *counter,
+ enum ni_gpct_register reg),
enum ni_gpct_variant variant,
- unsigned num_counters)
+ unsigned int num_counters)
{
struct ni_gpct_device *counter_dev;
struct ni_gpct *counter;
- unsigned i;
+ unsigned int i;
if (num_counters == 0)
return NULL;
@@ -1387,8 +1522,8 @@ ni_gpct_device_construct(struct comedi_device *dev,
return NULL;
counter_dev->dev = dev;
- counter_dev->write_register = write_register;
- counter_dev->read_register = read_register;
+ counter_dev->write = write;
+ counter_dev->read = read;
counter_dev->variant = variant;
spin_lock_init(&counter_dev->regs_lock);
@@ -1413,7 +1548,7 @@ EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
{
- if (!counter_dev->counters)
+ if (!counter_dev)
return;
kfree(counter_dev->counters);
kfree(counter_dev);
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 25aedd0e5..4978358f9 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -1,29 +1,24 @@
/*
- drivers/ni_tio.h
- Header file for NI general purpose counter support code (ni_tio.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header file for NI general purpose counter support code (ni_tio.c)
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_NI_TIO_H
#define _COMEDI_NI_TIO_H
#include "../comedidev.h"
-/* forward declarations */
-struct mite_struct;
-struct ni_gpct_device;
-
enum ni_gpct_register {
NITIO_G0_AUTO_INC,
NITIO_G1_AUTO_INC,
@@ -106,35 +101,34 @@ enum ni_gpct_variant {
struct ni_gpct {
struct ni_gpct_device *counter_dev;
- unsigned counter_index;
- unsigned chip_index;
- uint64_t clock_period_ps; /* clock period in picoseconds */
+ unsigned int counter_index;
+ unsigned int chip_index;
+ u64 clock_period_ps; /* clock period in picoseconds */
struct mite_channel *mite_chan;
- spinlock_t lock;
+ spinlock_t lock; /* protects 'mite_chan' */
};
struct ni_gpct_device {
struct comedi_device *dev;
- void (*write_register)(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg);
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg);
+ void (*write)(struct ni_gpct *, unsigned int value,
+ enum ni_gpct_register);
+ unsigned int (*read)(struct ni_gpct *, enum ni_gpct_register);
enum ni_gpct_variant variant;
struct ni_gpct *counters;
- unsigned num_counters;
- unsigned regs[NITIO_NUM_REGS];
- spinlock_t regs_lock;
+ unsigned int num_counters;
+ unsigned int regs[NITIO_NUM_REGS];
+ spinlock_t regs_lock; /* protects 'regs' */
};
struct ni_gpct_device *
ni_gpct_device_construct(struct comedi_device *,
- void (*write_register)(struct ni_gpct *,
- unsigned bits,
- enum ni_gpct_register),
- unsigned (*read_register)(struct ni_gpct *,
- enum ni_gpct_register),
+ void (*write)(struct ni_gpct *,
+ unsigned int value,
+ enum ni_gpct_register),
+ unsigned int (*read)(struct ni_gpct *,
+ enum ni_gpct_register),
enum ni_gpct_variant,
- unsigned num_counters);
+ unsigned int num_counters);
void ni_gpct_device_destroy(struct ni_gpct_device *);
void ni_tio_init_counter(struct ni_gpct *);
int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *,
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index 2bceae493..b15b10833 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -1,20 +1,19 @@
/*
- drivers/ni_tio_internal.h
- Header file for NI general purpose counter support code (ni_tio.c and
- ni_tiocmd.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header file for NI general purpose counter support code (ni_tio.c and
+ * ni_tiocmd.c)
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_NI_TIO_INTERNAL_H
#define _COMEDI_NI_TIO_INTERNAL_H
@@ -24,68 +23,73 @@
#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x))
#define GI_AUTO_INC_MASK 0xff
#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x))
-#define GI_ARM (1 << 0)
-#define GI_SAVE_TRACE (1 << 1)
-#define GI_LOAD (1 << 2)
-#define GI_DISARM (1 << 4)
+#define GI_ARM BIT(0)
+#define GI_SAVE_TRACE BIT(1)
+#define GI_LOAD BIT(2)
+#define GI_DISARM BIT(4)
#define GI_CNT_DIR(x) (((x) & 0x3) << 5)
-#define GI_CNT_DIR_MASK (3 << 5)
-#define GI_WRITE_SWITCH (1 << 7)
-#define GI_SYNC_GATE (1 << 8)
-#define GI_LITTLE_BIG_ENDIAN (1 << 9)
-#define GI_BANK_SWITCH_START (1 << 10)
-#define GI_BANK_SWITCH_MODE (1 << 11)
-#define GI_BANK_SWITCH_ENABLE (1 << 12)
-#define GI_ARM_COPY (1 << 13)
-#define GI_SAVE_TRACE_COPY (1 << 14)
-#define GI_DISARM_COPY (1 << 15)
+#define GI_CNT_DIR_MASK GI_CNT_DIR(3)
+#define GI_WRITE_SWITCH BIT(7)
+#define GI_SYNC_GATE BIT(8)
+#define GI_LITTLE_BIG_ENDIAN BIT(9)
+#define GI_BANK_SWITCH_START BIT(10)
+#define GI_BANK_SWITCH_MODE BIT(11)
+#define GI_BANK_SWITCH_ENABLE BIT(12)
+#define GI_ARM_COPY BIT(13)
+#define GI_SAVE_TRACE_COPY BIT(14)
+#define GI_DISARM_COPY BIT(15)
#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x))
#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x))
#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x))
-#define GI_GATING_DISABLED (0 << 0)
-#define GI_LEVEL_GATING (1 << 0)
-#define GI_RISING_EDGE_GATING (2 << 0)
-#define GI_FALLING_EDGE_GATING (3 << 0)
-#define GI_GATING_MODE_MASK (3 << 0)
-#define GI_GATE_ON_BOTH_EDGES (1 << 2)
-#define GI_EDGE_GATE_STARTS_STOPS (0 << 3)
-#define GI_EDGE_GATE_STOPS_STARTS (1 << 3)
-#define GI_EDGE_GATE_STARTS (2 << 3)
-#define GI_EDGE_GATE_NO_STARTS_OR_STOPS (3 << 3)
-#define GI_EDGE_GATE_MODE_MASK (3 << 3)
-#define GI_STOP_ON_GATE (0 << 5)
-#define GI_STOP_ON_GATE_OR_TC (1 << 5)
-#define GI_STOP_ON_GATE_OR_SECOND_TC (2 << 5)
-#define GI_STOP_MODE_MASK (3 << 5)
-#define GI_LOAD_SRC_SEL (1 << 7)
-#define GI_OUTPUT_TC_PULSE (1 << 8)
-#define GI_OUTPUT_TC_TOGGLE (2 << 8)
-#define GI_OUTPUT_TC_OR_GATE_TOGGLE (3 << 8)
-#define GI_OUTPUT_MODE_MASK (3 << 8)
-#define GI_NO_HARDWARE_DISARM (0 << 10)
-#define GI_DISARM_AT_TC (1 << 10)
-#define GI_DISARM_AT_GATE (2 << 10)
-#define GI_DISARM_AT_TC_OR_GATE (3 << 10)
-#define GI_COUNTING_ONCE_MASK (3 << 10)
-#define GI_LOADING_ON_TC (1 << 12)
-#define GI_GATE_POL_INVERT (1 << 13)
-#define GI_LOADING_ON_GATE (1 << 14)
-#define GI_RELOAD_SRC_SWITCHING (1 << 15)
+#define GI_GATING_MODE(x) (((x) & 0x3) << 0)
+#define GI_GATING_DISABLED GI_GATING_MODE(0)
+#define GI_LEVEL_GATING GI_GATING_MODE(1)
+#define GI_RISING_EDGE_GATING GI_GATING_MODE(2)
+#define GI_FALLING_EDGE_GATING GI_GATING_MODE(3)
+#define GI_GATING_MODE_MASK GI_GATING_MODE(3)
+#define GI_GATE_ON_BOTH_EDGES BIT(2)
+#define GI_EDGE_GATE_MODE(x) (((x) & 0x3) << 3)
+#define GI_EDGE_GATE_STARTS_STOPS GI_EDGE_GATE_MODE(0)
+#define GI_EDGE_GATE_STOPS_STARTS GI_EDGE_GATE_MODE(1)
+#define GI_EDGE_GATE_STARTS GI_EDGE_GATE_MODE(2)
+#define GI_EDGE_GATE_NO_STARTS_OR_STOPS GI_EDGE_GATE_MODE(3)
+#define GI_EDGE_GATE_MODE_MASK GI_EDGE_GATE_MODE(3)
+#define GI_STOP_MODE(x) (((x) & 0x3) << 5)
+#define GI_STOP_ON_GATE GI_STOP_MODE(0)
+#define GI_STOP_ON_GATE_OR_TC GI_STOP_MODE(1)
+#define GI_STOP_ON_GATE_OR_SECOND_TC GI_STOP_MODE(2)
+#define GI_STOP_MODE_MASK GI_STOP_MODE(3)
+#define GI_LOAD_SRC_SEL BIT(7)
+#define GI_OUTPUT_MODE(x) (((x) & 0x3) << 8)
+#define GI_OUTPUT_TC_PULSE GI_OUTPUT_MODE(1)
+#define GI_OUTPUT_TC_TOGGLE GI_OUTPUT_MODE(2)
+#define GI_OUTPUT_TC_OR_GATE_TOGGLE GI_OUTPUT_MODE(3)
+#define GI_OUTPUT_MODE_MASK GI_OUTPUT_MODE(3)
+#define GI_COUNTING_ONCE(x) (((x) & 0x3) << 10)
+#define GI_NO_HARDWARE_DISARM GI_COUNTING_ONCE(0)
+#define GI_DISARM_AT_TC GI_COUNTING_ONCE(1)
+#define GI_DISARM_AT_GATE GI_COUNTING_ONCE(2)
+#define GI_DISARM_AT_TC_OR_GATE GI_COUNTING_ONCE(3)
+#define GI_COUNTING_ONCE_MASK GI_COUNTING_ONCE(3)
+#define GI_LOADING_ON_TC BIT(12)
+#define GI_GATE_POL_INVERT BIT(13)
+#define GI_LOADING_ON_GATE BIT(14)
+#define GI_RELOAD_SRC_SWITCHING BIT(15)
#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x))
#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x))
#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x))
-#define GI_READ_ACKS_IRQ (1 << 0)
-#define GI_WRITE_ACKS_IRQ (1 << 1)
+#define GI_READ_ACKS_IRQ BIT(0)
+#define GI_WRITE_ACKS_IRQ BIT(1)
#define GI_BITS_TO_SRC(x) (((x) >> 2) & 0x1f)
#define GI_SRC_SEL(x) (((x) & 0x1f) << 2)
-#define GI_SRC_SEL_MASK (0x1f << 2)
+#define GI_SRC_SEL_MASK GI_SRC_SEL(0x1f)
#define GI_BITS_TO_GATE(x) (((x) >> 7) & 0x1f)
#define GI_GATE_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE_SEL_MASK (0x1f << 7)
-#define GI_GATE_SEL_LOAD_SRC (1 << 12)
-#define GI_OR_GATE (1 << 13)
-#define GI_OUTPUT_POL_INVERT (1 << 14)
-#define GI_SRC_POL_INVERT (1 << 15)
+#define GI_GATE_SEL_MASK GI_GATE_SEL(0x1f)
+#define GI_GATE_SEL_LOAD_SRC BIT(12)
+#define GI_OR_GATE BIT(13)
+#define GI_OUTPUT_POL_INVERT BIT(14)
+#define GI_SRC_POL_INVERT BIT(15)
#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x))
#define GI_CNT_MODE(x) (((x) & 0x7) << 0)
#define GI_CNT_MODE_NORMAL GI_CNT_MODE(0)
@@ -94,152 +98,84 @@
#define GI_CNT_MODE_QUADX4 GI_CNT_MODE(3)
#define GI_CNT_MODE_TWO_PULSE GI_CNT_MODE(4)
#define GI_CNT_MODE_SYNC_SRC GI_CNT_MODE(6)
-#define GI_CNT_MODE_MASK (7 << 0)
-#define GI_INDEX_MODE (1 << 4)
+#define GI_CNT_MODE_MASK GI_CNT_MODE(7)
+#define GI_INDEX_MODE BIT(4)
#define GI_INDEX_PHASE(x) (((x) & 0x3) << 5)
-#define GI_INDEX_PHASE_MASK (3 << 5)
-#define GI_HW_ARM_ENA (1 << 7)
+#define GI_INDEX_PHASE_MASK GI_INDEX_PHASE(3)
+#define GI_HW_ARM_ENA BIT(7)
#define GI_HW_ARM_SEL(x) ((x) << 8)
-#define GI_660X_HW_ARM_SEL_MASK (0x7 << 8)
-#define GI_M_HW_ARM_SEL_MASK (0x1f << 8)
-#define GI_660X_PRESCALE_X8 (1 << 12)
-#define GI_M_PRESCALE_X8 (1 << 13)
-#define GI_660X_ALT_SYNC (1 << 13)
-#define GI_M_ALT_SYNC (1 << 14)
-#define GI_660X_PRESCALE_X2 (1 << 14)
-#define GI_M_PRESCALE_X2 (1 << 15)
+#define GI_660X_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x7)
+#define GI_M_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x1f)
+#define GI_660X_PRESCALE_X8 BIT(12)
+#define GI_M_PRESCALE_X8 BIT(13)
+#define GI_660X_ALT_SYNC BIT(13)
+#define GI_M_ALT_SYNC BIT(14)
+#define GI_660X_PRESCALE_X2 BIT(14)
+#define GI_M_PRESCALE_X2 BIT(15)
#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x))
-#define GI_GATE2_MODE (1 << 0)
+#define GI_GATE2_MODE BIT(0)
#define GI_BITS_TO_GATE2(x) (((x) >> 7) & 0x1f)
#define GI_GATE2_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE2_SEL_MASK (0x1f << 7)
-#define GI_GATE2_POL_INVERT (1 << 13)
-#define GI_GATE2_SUBSEL (1 << 14)
-#define GI_SRC_SUBSEL (1 << 15)
+#define GI_GATE2_SEL_MASK GI_GATE2_SEL(0x1f)
+#define GI_GATE2_POL_INVERT BIT(13)
+#define GI_GATE2_SUBSEL BIT(14)
+#define GI_SRC_SUBSEL BIT(15)
#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2))
-#define GI_SAVE(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_COUNTING(x) (((x) % 2) ? (1 << 3) : (1 << 2))
-#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? (1 << 5) : (1 << 4))
-#define GI_STALE_DATA(x) (((x) % 2) ? (1 << 7) : (1 << 6))
-#define GI_ARMED(x) (((x) % 2) ? (1 << 9) : (1 << 8))
-#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? (1 << 11) : (1 << 10))
-#define GI_TC_ERROR(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_GATE_ERROR(x) (((x) % 2) ? (1 << 15) : (1 << 14))
+#define GI_SAVE(x) (((x) % 2) ? BIT(1) : BIT(0))
+#define GI_COUNTING(x) (((x) % 2) ? BIT(3) : BIT(2))
+#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? BIT(5) : BIT(4))
+#define GI_STALE_DATA(x) (((x) % 2) ? BIT(7) : BIT(6))
+#define GI_ARMED(x) (((x) % 2) ? BIT(9) : BIT(8))
+#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? BIT(11) : BIT(10))
+#define GI_TC_ERROR(x) (((x) % 2) ? BIT(13) : BIT(12))
+#define GI_GATE_ERROR(x) (((x) % 2) ? BIT(15) : BIT(14))
#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2))
-#define GI_RESET(x) (1 << (2 + ((x) % 2)))
+#define GI_RESET(x) BIT(2 + ((x) % 2))
#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2))
#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2))
-#define GI_OUTPUT(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_HW_SAVE(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_PERMANENT_STALE(x) (((x) % 2) ? (1 << 15) : (1 << 14))
+#define GI_OUTPUT(x) (((x) % 2) ? BIT(1) : BIT(0))
+#define GI_HW_SAVE(x) (((x) % 2) ? BIT(13) : BIT(12))
+#define GI_PERMANENT_STALE(x) (((x) % 2) ? BIT(15) : BIT(14))
#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x))
-#define GI_DMA_ENABLE (1 << 0)
-#define GI_DMA_WRITE (1 << 1)
-#define GI_DMA_INT_ENA (1 << 2)
-#define GI_DMA_RESET (1 << 3)
-#define GI_DMA_BANKSW_ERROR (1 << 4)
+#define GI_DMA_ENABLE BIT(0)
+#define GI_DMA_WRITE BIT(1)
+#define GI_DMA_INT_ENA BIT(2)
+#define GI_DMA_RESET BIT(3)
+#define GI_DMA_BANKSW_ERROR BIT(4)
#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x))
-#define GI_DMA_READBANK (1 << 13)
-#define GI_DRQ_ERROR (1 << 14)
-#define GI_DRQ_STATUS (1 << 15)
+#define GI_DMA_READBANK BIT(13)
+#define GI_DRQ_ERROR BIT(14)
+#define GI_DRQ_STATUS BIT(15)
#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x))
#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x))
-#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 1) : (1 << 5))
-#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 2) : (1 << 6))
-#define GI_TC_INTERRUPT_ACK (1 << 14)
-#define GI_GATE_INTERRUPT_ACK (1 << 15)
+#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? BIT(1) : BIT(5))
+#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? BIT(2) : BIT(6))
+#define GI_TC_INTERRUPT_ACK BIT(14)
+#define GI_GATE_INTERRUPT_ACK BIT(15)
#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x))
-#define GI_GATE_INTERRUPT (1 << 2)
-#define GI_TC (1 << 3)
-#define GI_INTERRUPT (1 << 15)
+#define GI_GATE_INTERRUPT BIT(2)
+#define GI_TC BIT(3)
+#define GI_INTERRUPT BIT(15)
#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x))
-#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 9) : (1 << 6))
-#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 10) : (1 << 8))
-
-static inline void write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- counter->counter_dev->write_register(counter, bits, reg);
-}
-
-static inline unsigned read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- return counter->counter_dev->read_register(counter, reg);
-}
+#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(9) : BIT(6))
+#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(10) : BIT(8))
-static inline int ni_tio_counting_mode_registers_present(const struct
- ni_gpct_device
- *counter_dev)
-{
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- return 0;
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- return 1;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline void ni_tio_set_bits_transient(struct ni_gpct *counter,
- enum ni_gpct_register
- register_index, unsigned bit_mask,
- unsigned bit_values,
- unsigned transient_bit_values)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- counter_dev->regs[register_index] &= ~bit_mask;
- counter_dev->regs[register_index] |= (bit_values & bit_mask);
- write_register(counter,
- counter_dev->regs[register_index] | transient_bit_values,
- register_index);
- mmiowb();
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
-}
+void ni_tio_write(struct ni_gpct *, unsigned int value, enum ni_gpct_register);
+unsigned int ni_tio_read(struct ni_gpct *, enum ni_gpct_register);
-/* ni_tio_set_bits( ) is for safely writing to registers whose bits may be
- * twiddled in interrupt context, or whose software copy may be read in
- * interrupt context.
- */
-static inline void ni_tio_set_bits(struct ni_gpct *counter,
- enum ni_gpct_register register_index,
- unsigned bit_mask, unsigned bit_values)
+static inline bool
+ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev)
{
- ni_tio_set_bits_transient(counter, register_index, bit_mask, bit_values,
- 0x0);
+ /* m series and 660x variants have counting mode registers */
+ return counter_dev->variant != ni_gpct_variant_e_series;
}
-/* ni_tio_get_soft_copy( ) is for safely reading the software copy of a register
-whose bits might be modified in interrupt context, or whose software copy
-might need to be read in interrupt context.
-*/
-static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter,
- enum ni_gpct_register
- register_index)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
- unsigned value;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- value = counter_dev->regs[register_index];
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
- return value;
-}
+void ni_tio_set_bits(struct ni_gpct *, enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value);
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *,
+ enum ni_gpct_register reg);
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger);
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source);
+int ni_tio_arm(struct ni_gpct *, bool arm, unsigned int start_trigger);
+int ni_tio_set_gate_src(struct ni_gpct *, unsigned int gate, unsigned int src);
#endif /* _COMEDI_NI_TIO_INTERNAL_H */
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 823e47910..9007c5754 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -1,19 +1,18 @@
/*
- comedi/drivers/ni_tiocmd.c
- Command support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Command support for NI general purpose counters
+ *
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Module: ni_tiocmd
@@ -36,13 +35,10 @@
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
* 340934b.pdf DAQ-STC reference manual
+ *
+ * TODO: Support use of both banks X and Y
*/
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
#include <linux/module.h>
#include "ni_tio_internal.h"
#include "mite.h"
@@ -51,9 +47,9 @@ static void ni_tio_configure_dma(struct ni_gpct *counter,
bool enable, bool read)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mask;
- unsigned bits;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mask;
+ unsigned int bits;
mask = GI_READ_ACKS_IRQ | GI_WRITE_ACKS_IRQ;
bits = 0;
@@ -103,7 +99,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
spin_unlock_irqrestore(&counter->lock, flags);
if (ret < 0)
return ret;
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+ ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
s->async->inttrig = NULL;
return ret;
@@ -113,7 +109,7 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
int ret = 0;
@@ -129,9 +125,6 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
case ni_gpct_variant_e_series:
mite_prep_dma(counter->mite_chan, 16, 32);
break;
- default:
- BUG();
- break;
}
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
ni_tio_configure_dma(counter, true, true);
@@ -143,9 +136,9 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
mite_dma_arm(counter->mite_chan);
if (cmd->start_src == TRIG_NOW)
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+ ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
else if (cmd->start_src == TRIG_EXT)
- ret = ni_tio_arm(counter, 1, cmd->start_arg);
+ ret = ni_tio_arm(counter, true, cmd->start_arg);
}
return ret;
}
@@ -163,9 +156,9 @@ static int ni_tio_cmd_setup(struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
int set_gate_source = 0;
- unsigned gate_source;
+ unsigned int gate_source;
int retval = 0;
if (cmd->scan_begin_src == TRIG_EXT) {
@@ -289,10 +282,10 @@ EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
int ni_tio_cancel(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned long flags;
- ni_tio_arm(counter, 0, 0);
+ ni_tio_arm(counter, false, 0);
spin_lock_irqsave(&counter->lock, flags);
if (counter->mite_chan)
mite_dma_disarm(counter->mite_chan);
@@ -305,9 +298,6 @@ int ni_tio_cancel(struct ni_gpct *counter)
}
EXPORT_SYMBOL_GPL(ni_tio_cancel);
- /* During buffered input counter operation for e-series, the gate
- interrupt is acked automatically by the dma controller, due to the
- Gi_Read/Write_Acknowledges_IRQ bits in the input select register. */
static int should_ack_gate(struct ni_gpct *counter)
{
unsigned long flags;
@@ -315,12 +305,19 @@ static int should_ack_gate(struct ni_gpct *counter)
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
- /* not sure if 660x really supports gate
- interrupts (the bits are not listed
- in register-level manual) */
case ni_gpct_variant_660x:
+ /*
+ * not sure if 660x really supports gate interrupts
+ * (the bits are not listed in register-level manual)
+ */
return 1;
case ni_gpct_variant_e_series:
+ /*
+ * During buffered input counter operation for e-series,
+ * the gate interrupt is acked automatically by the dma
+ * controller, due to the Gi_Read/Write_Acknowledges_IRQ
+ * bits in the input select register.
+ */
spin_lock_irqsave(&counter->lock, flags);
{
if (!counter->mite_chan ||
@@ -338,15 +335,14 @@ static int should_ack_gate(struct ni_gpct *counter)
static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
int *gate_error,
int *tc_error,
- int *perm_stale_data,
- int *stale_data)
+ int *perm_stale_data)
{
- unsigned cidx = counter->counter_index;
- const unsigned short gxx_status = read_register(counter,
+ unsigned int cidx = counter->counter_index;
+ const unsigned short gxx_status = ni_tio_read(counter,
NITIO_SHARED_STATUS_REG(cidx));
- const unsigned short gi_status = read_register(counter,
+ const unsigned short gi_status = ni_tio_read(counter,
NITIO_STATUS_REG(cidx));
- unsigned ack = 0;
+ unsigned int ack = 0;
if (gate_error)
*gate_error = 0;
@@ -354,15 +350,15 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
*tc_error = 0;
if (perm_stale_data)
*perm_stale_data = 0;
- if (stale_data)
- *stale_data = 0;
if (gxx_status & GI_GATE_ERROR(cidx)) {
ack |= GI_GATE_ERROR_CONFIRM(cidx);
if (gate_error) {
- /*660x don't support automatic acknowledgment
- of gate interrupt via dma read/write
- and report bogus gate errors */
+ /*
+ * 660x don't support automatic acknowledgment
+ * of gate interrupt via dma read/write
+ * and report bogus gate errors
+ */
if (counter->counter_dev->variant !=
ni_gpct_variant_660x)
*gate_error = 1;
@@ -380,14 +376,10 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
ack |= GI_GATE_INTERRUPT_ACK;
}
if (ack)
- write_register(counter, ack, NITIO_INT_ACK_REG(cidx));
+ ni_tio_write(counter, ack, NITIO_INT_ACK_REG(cidx));
if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) &
GI_LOADING_ON_GATE) {
- if (gxx_status & GI_STALE_DATA(cidx)) {
- if (stale_data)
- *stale_data = 1;
- }
- if (read_register(counter, NITIO_STATUS2_REG(cidx)) &
+ if (ni_tio_read(counter, NITIO_STATUS2_REG(cidx)) &
GI_PERMANENT_STALE(cidx)) {
dev_info(counter->counter_dev->dev->class_dev,
"%s: Gi_Permanent_Stale_Data detected.\n",
@@ -400,22 +392,21 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
void ni_tio_acknowledge(struct ni_gpct *counter)
{
- ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
+ ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(ni_tio_acknowledge);
void ni_tio_handle_interrupt(struct ni_gpct *counter,
struct comedi_subdevice *s)
{
- unsigned cidx = counter->counter_index;
- unsigned gpct_mite_status;
+ unsigned int cidx = counter->counter_index;
unsigned long flags;
int gate_error;
int tc_error;
int perm_stale_data;
ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
- &perm_stale_data, NULL);
+ &perm_stale_data);
if (gate_error) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_Gate_Error detected.\n", __func__);
@@ -426,7 +417,7 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
case ni_gpct_variant_660x:
- if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) &
+ if (ni_tio_read(counter, NITIO_DMA_STATUS_REG(cidx)) &
GI_DRQ_ERROR) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_DRQ_Error detected.\n", __func__);
@@ -437,16 +428,8 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
break;
}
spin_lock_irqsave(&counter->lock, flags);
- if (!counter->mite_chan) {
- spin_unlock_irqrestore(&counter->lock, flags);
- return;
- }
- gpct_mite_status = mite_get_status(counter->mite_chan);
- if (gpct_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- counter->mite_chan->mite->mite_io_addr +
- MITE_CHOR(counter->mite_chan->channel));
- mite_sync_input_dma(counter->mite_chan, s);
+ if (counter->mite_chan)
+ mite_ack_linkc(counter->mite_chan, s, true);
spin_unlock_irqrestore(&counter->lock, flags);
}
EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
diff --git a/drivers/staging/comedi/drivers/plx9052.h b/drivers/staging/comedi/drivers/plx9052.h
index fbcf25069..2892e6528 100644
--- a/drivers/staging/comedi/drivers/plx9052.h
+++ b/drivers/staging/comedi/drivers/plx9052.h
@@ -1,22 +1,21 @@
/*
- comedi/drivers/plx9052.h
- Definitions for the PLX-9052 PCI interface chip
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Definitions for the PLX-9052 PCI interface chip
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _PLX9052_H_
#define _PLX9052_H_
@@ -25,55 +24,56 @@
* INTCSR - Interrupt Control/Status register
*/
#define PLX9052_INTCSR 0x4c
-#define PLX9052_INTCSR_LI1ENAB (1 << 0) /* LI1 enabled */
-#define PLX9052_INTCSR_LI1POL (1 << 1) /* LI1 active high */
-#define PLX9052_INTCSR_LI1STAT (1 << 2) /* LI1 active */
-#define PLX9052_INTCSR_LI2ENAB (1 << 3) /* LI2 enabled */
-#define PLX9052_INTCSR_LI2POL (1 << 4) /* LI2 active high */
-#define PLX9052_INTCSR_LI2STAT (1 << 5) /* LI2 active */
-#define PLX9052_INTCSR_PCIENAB (1 << 6) /* PCIINT enabled */
-#define PLX9052_INTCSR_SOFTINT (1 << 7) /* generate soft int */
-#define PLX9052_INTCSR_LI1SEL (1 << 8) /* LI1 edge */
-#define PLX9052_INTCSR_LI2SEL (1 << 9) /* LI2 edge */
-#define PLX9052_INTCSR_LI1CLRINT (1 << 10) /* LI1 clear int */
-#define PLX9052_INTCSR_LI2CLRINT (1 << 11) /* LI2 clear int */
-#define PLX9052_INTCSR_ISAMODE (1 << 12) /* ISA interface mode */
+#define PLX9052_INTCSR_LI1ENAB BIT(0) /* LI1 enabled */
+#define PLX9052_INTCSR_LI1POL BIT(1) /* LI1 active high */
+#define PLX9052_INTCSR_LI1STAT BIT(2) /* LI1 active */
+#define PLX9052_INTCSR_LI2ENAB BIT(3) /* LI2 enabled */
+#define PLX9052_INTCSR_LI2POL BIT(4) /* LI2 active high */
+#define PLX9052_INTCSR_LI2STAT BIT(5) /* LI2 active */
+#define PLX9052_INTCSR_PCIENAB BIT(6) /* PCIINT enabled */
+#define PLX9052_INTCSR_SOFTINT BIT(7) /* generate soft int */
+#define PLX9052_INTCSR_LI1SEL BIT(8) /* LI1 edge */
+#define PLX9052_INTCSR_LI2SEL BIT(9) /* LI2 edge */
+#define PLX9052_INTCSR_LI1CLRINT BIT(10) /* LI1 clear int */
+#define PLX9052_INTCSR_LI2CLRINT BIT(11) /* LI2 clear int */
+#define PLX9052_INTCSR_ISAMODE BIT(12) /* ISA interface mode */
/*
* CNTRL - User I/O, Direct Slave Response, Serial EEPROM, and
* Initialization Control register
*/
#define PLX9052_CNTRL 0x50
-#define PLX9052_CNTRL_WAITO (1 << 0) /* UIO0 or WAITO# select */
-#define PLX9052_CNTRL_UIO0_DIR (1 << 1) /* UIO0 direction */
-#define PLX9052_CNTRL_UIO0_DATA (1 << 2) /* UIO0 data */
-#define PLX9052_CNTRL_LLOCKO (1 << 3) /* UIO1 or LLOCKo# select */
-#define PLX9052_CNTRL_UIO1_DIR (1 << 4) /* UIO1 direction */
-#define PLX9052_CNTRL_UIO1_DATA (1 << 5) /* UIO1 data */
-#define PLX9052_CNTRL_CS2 (1 << 6) /* UIO2 or CS2# select */
-#define PLX9052_CNTRL_UIO2_DIR (1 << 7) /* UIO2 direction */
-#define PLX9052_CNTRL_UIO2_DATA (1 << 8) /* UIO2 data */
-#define PLX9052_CNTRL_CS3 (1 << 9) /* UIO3 or CS3# select */
-#define PLX9052_CNTRL_UIO3_DIR (1 << 10) /* UIO3 direction */
-#define PLX9052_CNTRL_UIO3_DATA (1 << 11) /* UIO3 data */
-#define PLX9052_CNTRL_PCIBAR01 (0 << 12) /* bar 0 (mem) and 1 (I/O) */
-#define PLX9052_CNTRL_PCIBAR0 (1 << 12) /* bar 0 (mem) only */
-#define PLX9052_CNTRL_PCIBAR1 (2 << 12) /* bar 1 (I/O) only */
-#define PLX9052_CNTRL_PCI2_1_FEATURES (1 << 14) /* PCI r2.1 features enabled */
-#define PLX9052_CNTRL_PCI_R_W_FLUSH (1 << 15) /* read w/write flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_FLUSH (1 << 16) /* read no flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_WRITE (1 << 17) /* read no write mode */
-#define PLX9052_CNTRL_PCI_W_RELEASE (1 << 18) /* write release bus mode */
-#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* slave retry clks */
-#define PLX9052_CNTRL_LOCK_ENAB (1 << 23) /* slave LOCK# enable */
+#define PLX9052_CNTRL_WAITO BIT(0) /* UIO0 or WAITO# select */
+#define PLX9052_CNTRL_UIO0_DIR BIT(1) /* UIO0 direction */
+#define PLX9052_CNTRL_UIO0_DATA BIT(2) /* UIO0 data */
+#define PLX9052_CNTRL_LLOCKO BIT(3) /* UIO1 or LLOCKo# select */
+#define PLX9052_CNTRL_UIO1_DIR BIT(4) /* UIO1 direction */
+#define PLX9052_CNTRL_UIO1_DATA BIT(5) /* UIO1 data */
+#define PLX9052_CNTRL_CS2 BIT(6) /* UIO2 or CS2# select */
+#define PLX9052_CNTRL_UIO2_DIR BIT(7) /* UIO2 direction */
+#define PLX9052_CNTRL_UIO2_DATA BIT(8) /* UIO2 data */
+#define PLX9052_CNTRL_CS3 BIT(9) /* UIO3 or CS3# select */
+#define PLX9052_CNTRL_UIO3_DIR BIT(10) /* UIO3 direction */
+#define PLX9052_CNTRL_UIO3_DATA BIT(11) /* UIO3 data */
+#define PLX9052_CNTRL_PCIBAR(x) (((x) & 0x3) << 12)
+#define PLX9052_CNTRL_PCIBAR01 PLX9052_CNTRL_PCIBAR(0) /* mem and IO */
+#define PLX9052_CNTRL_PCIBAR0 PLX9052_CNTRL_PCIBAR(1) /* mem only */
+#define PLX9052_CNTRL_PCIBAR1 PLX9052_CNTRL_PCIBAR(2) /* IO only */
+#define PLX9052_CNTRL_PCI2_1_FEATURES BIT(14) /* PCI v2.1 features enabled */
+#define PLX9052_CNTRL_PCI_R_W_FLUSH BIT(15) /* read w/write flush mode */
+#define PLX9052_CNTRL_PCI_R_NO_FLUSH BIT(16) /* read no flush mode */
+#define PLX9052_CNTRL_PCI_R_NO_WRITE BIT(17) /* read no write mode */
+#define PLX9052_CNTRL_PCI_W_RELEASE BIT(18) /* write release bus mode */
+#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* retry clks */
+#define PLX9052_CNTRL_LOCK_ENAB BIT(23) /* slave LOCK# enable */
#define PLX9052_CNTRL_EEPROM_MASK (0x1f << 24) /* EEPROM bits */
-#define PLX9052_CNTRL_EEPROM_CLK (1 << 24) /* EEPROM clock */
-#define PLX9052_CNTRL_EEPROM_CS (1 << 25) /* EEPROM chip select */
-#define PLX9052_CNTRL_EEPROM_DOUT (1 << 26) /* EEPROM write bit */
-#define PLX9052_CNTRL_EEPROM_DIN (1 << 27) /* EEPROM read bit */
-#define PLX9052_CNTRL_EEPROM_PRESENT (1 << 28) /* EEPROM present */
-#define PLX9052_CNTRL_RELOAD_CFG (1 << 29) /* reload configuration */
-#define PLX9052_CNTRL_PCI_RESET (1 << 30) /* PCI adapter reset */
-#define PLX9052_CNTRL_MASK_REV (1 << 31) /* mask revision */
+#define PLX9052_CNTRL_EEPROM_CLK BIT(24) /* EEPROM clock */
+#define PLX9052_CNTRL_EEPROM_CS BIT(25) /* EEPROM chip select */
+#define PLX9052_CNTRL_EEPROM_DOUT BIT(26) /* EEPROM write bit */
+#define PLX9052_CNTRL_EEPROM_DIN BIT(27) /* EEPROM read bit */
+#define PLX9052_CNTRL_EEPROM_PRESENT BIT(28) /* EEPROM present */
+#define PLX9052_CNTRL_RELOAD_CFG BIT(29) /* reload configuration */
+#define PLX9052_CNTRL_PCI_RESET BIT(30) /* PCI adapter reset */
+#define PLX9052_CNTRL_MASK_REV BIT(31) /* mask revision */
#endif /* _PLX9052_H_ */
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index f5cd6d500..8d1aee00b 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -88,7 +88,7 @@ enum marb_bits {
/* direct slave LLOCKo# enable */
MARB_DS_LLOCK_ENABLE = 0x00400000,
MARB_PCI_REQUEST_MODE = 0x00800000,
- MARB_PCIv21_MODE = 0x01000000, /* pci specification v2.1 mode */
+ MARB_PCIV21_MODE = 0x01000000, /* pci specification v2.1 mode */
MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
/* gate local bus latency timer with BREQ */
diff --git a/drivers/staging/comedi/drivers/z8536.h b/drivers/staging/comedi/drivers/z8536.h
index 7be53109c..47eadbf4d 100644
--- a/drivers/staging/comedi/drivers/z8536.h
+++ b/drivers/staging/comedi/drivers/z8536.h
@@ -24,11 +24,12 @@
#define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */
#define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */
#define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */
-#define Z8536_CFG_CTRL_LC_INDEP (0 << 0)/* C/Ts Independent */
-#define Z8536_CFG_CTRL_LC_GATE (1 << 0)/* C/T 1 Out Gates C/T 2 */
-#define Z8536_CFG_CTRL_LC_TRIG (2 << 0)/* C/T 1 Out Triggers C/T 2 */
-#define Z8536_CFG_CTRL_LC_CLK (3 << 0)/* C/T 1 Out Clocks C/T 2 */
-#define Z8536_CFG_CTRL_LC_MASK (3 << 0)/* C/T Link Control mask */
+#define Z8536_CFG_CTRL_LC(x) (((x) & 0x3) << 0) /* Link Control */
+#define Z8536_CFG_CTRL_LC_INDEP Z8536_CFG_CTRL_LC(0)/* Independent */
+#define Z8536_CFG_CTRL_LC_GATE Z8536_CFG_CTRL_LC(1)/* 1 Gates 2 */
+#define Z8536_CFG_CTRL_LC_TRIG Z8536_CFG_CTRL_LC(2)/* 1 Triggers 2 */
+#define Z8536_CFG_CTRL_LC_CLK Z8536_CFG_CTRL_LC(3)/* 1 Clocks 2 */
+#define Z8536_CFG_CTRL_LC_MASK Z8536_CFG_CTRL_LC(3)
/* Interrupt Vector registers */
#define Z8536_PA_INT_VECT_REG 0x02
@@ -43,15 +44,16 @@
#define Z8536_CT2_CMDSTAT_REG 0x0b
#define Z8536_CT3_CMDSTAT_REG 0x0c
#define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x))
-#define Z8536_CMD_NULL (0 << 5)/* Null Code */
-#define Z8536_CMD_CLR_IP_IUS (1 << 5)/* Clear IP & IUS */
-#define Z8536_CMD_SET_IUS (2 << 5)/* Set IUS */
-#define Z8536_CMD_CLR_IUS (3 << 5)/* Clear IUS */
-#define Z8536_CMD_SET_IP (4 << 5)/* Set IP */
-#define Z8536_CMD_CLR_IP (5 << 5)/* Clear IP */
-#define Z8536_CMD_SET_IE (6 << 5)/* Set IE */
-#define Z8536_CMD_CLR_IE (7 << 5)/* Clear IE */
-#define Z8536_CMD_MASK (7 << 5)
+#define Z8536_CMD(x) (((x) & 0x7) << 5)
+#define Z8536_CMD_NULL Z8536_CMD(0) /* Null Code */
+#define Z8536_CMD_CLR_IP_IUS Z8536_CMD(1) /* Clear IP & IUS */
+#define Z8536_CMD_SET_IUS Z8536_CMD(2) /* Set IUS */
+#define Z8536_CMD_CLR_IUS Z8536_CMD(3) /* Clear IUS */
+#define Z8536_CMD_SET_IP Z8536_CMD(4) /* Set IP */
+#define Z8536_CMD_CLR_IP Z8536_CMD(5) /* Clear IP */
+#define Z8536_CMD_SET_IE Z8536_CMD(6) /* Set IE */
+#define Z8536_CMD_CLR_IE Z8536_CMD(7) /* Clear IE */
+#define Z8536_CMD_MASK Z8536_CMD(7)
#define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */
#define Z8536_STAT_IE BIT(6) /* Interrupt Enable */
@@ -105,46 +107,51 @@
#define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */
#define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */
#define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */
-#define Z8536_CT_MODE_DCS_PULSE (0 << 0)/* Duty Cycle - Pulse */
-#define Z8536_CT_MODE_DCS_ONESHOT (1 << 0)/* Duty Cycle - One-Shot */
-#define Z8536_CT_MODE_DCS_SQRWAVE (2 << 0)/* Duty Cycle - Square Wave */
-#define Z8536_CT_MODE_DCS_DO_NOT_USE (3 << 0)/* Duty Cycle - Do Not Use */
-#define Z8536_CT_MODE_DCS_MASK (3 << 0)/* Duty Cycle mask */
+#define Z8536_CT_MODE_DCS(x) (((x) & 0x3) << 0) /* Duty Cycle */
+#define Z8536_CT_MODE_DCS_PULSE Z8536_CT_MODE_DCS(0) /* Pulse */
+#define Z8536_CT_MODE_DCS_ONESHOT Z8536_CT_MODE_DCS(1) /* One-Shot */
+#define Z8536_CT_MODE_DCS_SQRWAVE Z8536_CT_MODE_DCS(2) /* Square Wave */
+#define Z8536_CT_MODE_DCS_DO_NOT_USE Z8536_CT_MODE_DCS(3) /* Do Not Use */
+#define Z8536_CT_MODE_DCS_MASK Z8536_CT_MODE_DCS(3)
/* Port A/B Mode Specification registers */
#define Z8536_PA_MODE_REG 0x20
#define Z8536_PB_MODE_REG 0x28
-#define Z8536_PAB_MODE_PTS_BIT (0 << 6)/* Bit Port */
-#define Z8536_PAB_MODE_PTS_INPUT (1 << 6)/* Input Port */
-#define Z8536_PAB_MODE_PTS_OUTPUT (2 << 6)/* Output Port */
-#define Z8536_PAB_MODE_PTS_BIDIR (3 << 6)/* Bidirectional Port */
-#define Z8536_PAB_MODE_PTS_MASK (3 << 6)/* Port Type Select mask */
+#define Z8536_PAB_MODE_PTS(x) (((x) & 0x3) << 6) /* Port type */
+#define Z8536_PAB_MODE_PTS_BIT Z8536_PAB_MODE_PTS(0 << 6)/* Bit */
+#define Z8536_PAB_MODE_PTS_INPUT Z8536_PAB_MODE_PTS(1 << 6)/* Input */
+#define Z8536_PAB_MODE_PTS_OUTPUT Z8536_PAB_MODE_PTS(2 << 6)/* Output */
+#define Z8536_PAB_MODE_PTS_BIDIR Z8536_PAB_MODE_PTS(3 << 6)/* Bidir */
+#define Z8536_PAB_MODE_PTS_MASK Z8536_PAB_MODE_PTS(3 << 6)
#define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */
#define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */
#define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */
-#define Z8536_PAB_MODE_PMS_DISABLE (0 << 1)/* Disable Pattern Match */
-#define Z8536_PAB_MODE_PMS_AND (1 << 1)/* "AND" mode */
-#define Z8536_PAB_MODE_PMS_OR (2 << 1)/* "OR" mode */
-#define Z8536_PAB_MODE_PMS_OR_PEV (3 << 1)/* "OR-Priority" mode */
-#define Z8536_PAB_MODE_PMS_MASK (3 << 1)/* Pattern Mode mask */
+#define Z8536_PAB_MODE_PMS(x) (((x) & 0x3) << 1) /* Pattern Mode */
+#define Z8536_PAB_MODE_PMS_DISABLE Z8536_PAB_MODE_PMS(0)/* Disabled */
+#define Z8536_PAB_MODE_PMS_AND Z8536_PAB_MODE_PMS(1)/* "AND" */
+#define Z8536_PAB_MODE_PMS_OR Z8536_PAB_MODE_PMS(2)/* "OR" */
+#define Z8536_PAB_MODE_PMS_OR_PEV Z8536_PAB_MODE_PMS(3)/* "OR-Priority" */
+#define Z8536_PAB_MODE_PMS_MASK Z8536_PAB_MODE_PMS(3)
#define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */
#define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */
/* Port A/B Handshake Specification registers */
#define Z8536_PA_HANDSHAKE_REG 0x21
#define Z8536_PB_HANDSHAKE_REG 0x29
-#define Z8536_PAB_HANDSHAKE_HST_INTER (0 << 6)/* Interlocked Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_STROBED (1 << 6)/* Strobed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_PULSED (2 << 6)/* Pulsed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_3WIRE (3 << 6)/* Three-Wire Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_MASK (3 << 6)/* Handshake Type mask */
-#define Z8536_PAB_HANDSHAKE_RWS_DISABLE (0 << 3)/* Req/Wait Disabled */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT (1 << 3)/* Output Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_INWAIT (3 << 3)/* Input Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_SPREQ (4 << 3)/* Special Request */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ (5 << 4)/* Output Request */
-#define Z8536_PAB_HANDSHAKE_RWS_INREQ (7 << 3)/* Input Request */
-#define Z8536_PAB_HANDSHAKE_RWS_MASK (7 << 3)/* Req/Wait mask */
+#define Z8536_PAB_HANDSHAKE_HST(x) (((x) & 0x3) << 6) /* Handshake Type */
+#define Z8536_PAB_HANDSHAKE_HST_INTER Z8536_PAB_HANDSHAKE_HST(0)/*Interlock*/
+#define Z8536_PAB_HANDSHAKE_HST_STROBED Z8536_PAB_HANDSHAKE_HST(1)/* Strobed */
+#define Z8536_PAB_HANDSHAKE_HST_PULSED Z8536_PAB_HANDSHAKE_HST(2)/* Pulsed */
+#define Z8536_PAB_HANDSHAKE_HST_3WIRE Z8536_PAB_HANDSHAKE_HST(3)/* 3-Wire */
+#define Z8536_PAB_HANDSHAKE_HST_MASK Z8536_PAB_HANDSHAKE_HST(3)
+#define Z8536_PAB_HANDSHAKE_RWS(x) (((x) & 0x7) << 3) /* Req/Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_DISABLE Z8536_PAB_HANDSHAKE_RWS(0)/* Disabled */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT Z8536_PAB_HANDSHAKE_RWS(1)/* Out Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_INWAIT Z8536_PAB_HANDSHAKE_RWS(3)/* In Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_SPREQ Z8536_PAB_HANDSHAKE_RWS(4)/* Special */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ Z8536_PAB_HANDSHAKE_RWS(5)/* Out Req */
+#define Z8536_PAB_HANDSHAKE_RWS_INREQ Z8536_PAB_HANDSHAKE_RWS(7)/* In Req */
+#define Z8536_PAB_HANDSHAKE_RWS_MASK Z8536_PAB_HANDSHAKE_RWS(7)
#define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */
#define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 0ff3139e5..46c050cc7 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -1168,7 +1168,7 @@ static void cls_uart_init(struct channel_t *ch)
/* Clear out UART and FIFO */
readb(&ch->ch_cls_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT),
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
&ch->ch_cls_uart->isr_fcr);
udelay(10);
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 4eb410e09..af2e835ef 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -48,7 +48,7 @@ static void dgnc_do_remap(struct dgnc_board *brd);
/*
* File operations permitted on Control/Management major.
*/
-static const struct file_operations dgnc_BoardFops = {
+static const struct file_operations dgnc_board_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
.open = dgnc_mgmt_open,
@@ -58,11 +58,11 @@ static const struct file_operations dgnc_BoardFops = {
/*
* Globals
*/
-uint dgnc_NumBoards;
-struct dgnc_board *dgnc_Board[MAXBOARDS];
+uint dgnc_num_boards;
+struct dgnc_board *dgnc_board[MAXBOARDS];
DEFINE_SPINLOCK(dgnc_global_lock);
DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
-uint dgnc_Major;
+uint dgnc_major;
int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
/*
@@ -92,7 +92,7 @@ struct board_id {
unsigned int is_pci_express;
};
-static struct board_id dgnc_Ids[] = {
+static struct board_id dgnc_ids[] = {
{ PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
@@ -140,14 +140,14 @@ static void cleanup(bool sysfiles)
if (sysfiles)
dgnc_remove_driver_sysfiles(&dgnc_driver);
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
class_destroy(dgnc_class);
- unregister_chrdev(dgnc_Major, "dgnc");
+ unregister_chrdev(dgnc_major, "dgnc");
- for (i = 0; i < dgnc_NumBoards; ++i) {
- dgnc_remove_ports_sysfiles(dgnc_Board[i]);
- dgnc_tty_uninit(dgnc_Board[i]);
- dgnc_cleanup_board(dgnc_Board[i]);
+ for (i = 0; i < dgnc_num_boards; ++i) {
+ dgnc_remove_ports_sysfiles(dgnc_board[i]);
+ dgnc_tty_uninit(dgnc_board[i]);
+ dgnc_cleanup_board(dgnc_board[i]);
}
dgnc_tty_post_uninit();
@@ -217,12 +217,12 @@ static int dgnc_start(void)
*
* Register management/dpa devices
*/
- rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
+ rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
if (rc < 0) {
pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
return rc;
}
- dgnc_Major = rc;
+ dgnc_major = rc;
dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
if (IS_ERR(dgnc_class)) {
@@ -232,7 +232,7 @@ static int dgnc_start(void)
}
dev = device_create(dgnc_class, NULL,
- MKDEV(dgnc_Major, 0),
+ MKDEV(dgnc_major, 0),
NULL, "dgnc_mgmt");
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
@@ -262,11 +262,11 @@ static int dgnc_start(void)
return 0;
failed_tty:
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
failed_device:
class_destroy(dgnc_class);
failed_class:
- unregister_chrdev(dgnc_Major, "dgnc");
+ unregister_chrdev(dgnc_major, "dgnc");
return rc;
}
@@ -283,7 +283,7 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = dgnc_found_board(pdev, ent->driver_data);
if (rc == 0)
- dgnc_NumBoards++;
+ dgnc_num_boards++;
return rc;
}
@@ -346,7 +346,7 @@ static void dgnc_cleanup_board(struct dgnc_board *brd)
}
}
- dgnc_Board[brd->boardnum] = NULL;
+ dgnc_board[brd->boardnum] = NULL;
kfree(brd);
}
@@ -365,8 +365,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
unsigned long flags;
/* get the board structure and prep it */
- dgnc_Board[dgnc_NumBoards] = kzalloc(sizeof(*brd), GFP_KERNEL);
- brd = dgnc_Board[dgnc_NumBoards];
+ dgnc_board[dgnc_num_boards] = kzalloc(sizeof(*brd), GFP_KERNEL);
+ brd = dgnc_board[dgnc_num_boards];
if (!brd)
return -ENOMEM;
@@ -382,15 +382,15 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
/* store the info for the board we've found */
brd->magic = DGNC_BOARD_MAGIC;
- brd->boardnum = dgnc_NumBoards;
+ brd->boardnum = dgnc_num_boards;
brd->vendor = dgnc_pci_tbl[id].vendor;
brd->device = dgnc_pci_tbl[id].device;
brd->pdev = pdev;
brd->pci_bus = pdev->bus->number;
brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgnc_Ids[id].name;
- brd->maxports = dgnc_Ids[id].maxports;
- if (dgnc_Ids[i].is_pci_express)
+ brd->name = dgnc_ids[id].name;
+ brd->maxports = dgnc_ids[id].maxports;
+ if (dgnc_ids[i].is_pci_express)
brd->bd_flags |= BD_IS_PCI_EXPRESS;
brd->dpastatus = BD_NOFEP;
init_waitqueue_head(&brd->state_wait);
@@ -642,8 +642,8 @@ static void dgnc_poll_handler(ulong dummy)
unsigned long new_time;
/* Go thru each board, kicking off a tasklet for each if needed */
- for (i = 0; i < dgnc_NumBoards; i++) {
- brd = dgnc_Board[i];
+ for (i = 0; i < dgnc_num_boards; i++) {
+ brd = dgnc_board[i];
spin_lock_irqsave(&brd->bd_lock, flags);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index e4be81b66..95ec729fa 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -202,18 +202,13 @@ struct dgnc_board {
* to our channels.
*/
- struct tty_driver SerialDriver;
- char SerialName[200];
- struct tty_driver PrintDriver;
- char PrintName[200];
+ struct tty_driver *serial_driver;
+ char serial_name[200];
+ struct tty_driver *print_driver;
+ char print_name[200];
- bool dgnc_Major_Serial_Registered;
- bool dgnc_Major_TransparentPrint_Registered;
-
- uint dgnc_Serial_Major;
- uint dgnc_TransparentPrint_Major;
-
- uint TtyRefCnt;
+ bool dgnc_major_serial_registered;
+ bool dgnc_major_transparent_print_registered;
u16 dpatype; /* The board "type",
* as defined by DPA
@@ -399,12 +394,12 @@ struct channel_t {
/*
* Our Global Variables.
*/
-extern uint dgnc_Major; /* Our driver/mgmt major */
+extern uint dgnc_major; /* Our driver/mgmt major */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
-extern uint dgnc_NumBoards; /* Total number of boards */
-extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board
+extern uint dgnc_num_boards; /* Total number of boards */
+extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board
* structs
*/
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index ba29a8d91..683c09839 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -111,7 +111,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
spin_lock_irqsave(&dgnc_global_lock, flags);
memset(&ddi, 0, sizeof(ddi));
- ddi.dinfo_nboards = dgnc_NumBoards;
+ ddi.dinfo_nboards = dgnc_num_boards;
sprintf(ddi.dinfo_version, "%s", DG_PART);
spin_unlock_irqrestore(&dgnc_global_lock, flags);
@@ -131,27 +131,27 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&brd, uarg, sizeof(int)))
return -EFAULT;
- if (brd < 0 || brd >= dgnc_NumBoards)
+ if (brd < 0 || brd >= dgnc_num_boards)
return -ENODEV;
memset(&di, 0, sizeof(di));
di.info_bdnum = brd;
- spin_lock_irqsave(&dgnc_Board[brd]->bd_lock, flags);
+ spin_lock_irqsave(&dgnc_board[brd]->bd_lock, flags);
- di.info_bdtype = dgnc_Board[brd]->dpatype;
- di.info_bdstate = dgnc_Board[brd]->dpastatus;
+ di.info_bdtype = dgnc_board[brd]->dpatype;
+ di.info_bdstate = dgnc_board[brd]->dpastatus;
di.info_ioport = 0;
- di.info_physaddr = (ulong)dgnc_Board[brd]->membase;
- di.info_physsize = (ulong)dgnc_Board[brd]->membase
- - dgnc_Board[brd]->membase_end;
- if (dgnc_Board[brd]->state != BOARD_FAILED)
- di.info_nports = dgnc_Board[brd]->nasync;
+ di.info_physaddr = (ulong)dgnc_board[brd]->membase;
+ di.info_physsize = (ulong)dgnc_board[brd]->membase
+ - dgnc_board[brd]->membase_end;
+ if (dgnc_board[brd]->state != BOARD_FAILED)
+ di.info_nports = dgnc_board[brd]->nasync;
else
di.info_nports = 0;
- spin_unlock_irqrestore(&dgnc_Board[brd]->bd_lock, flags);
+ spin_unlock_irqrestore(&dgnc_board[brd]->bd_lock, flags);
if (copy_to_user(uarg, &di, sizeof(di)))
return -EFAULT;
@@ -174,14 +174,14 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
channel = ni.channel;
/* Verify boundaries on board */
- if (board >= dgnc_NumBoards)
+ if (board >= dgnc_num_boards)
return -ENODEV;
/* Verify boundaries on channel */
- if (channel >= dgnc_Board[board]->nasync)
+ if (channel >= dgnc_board[board]->nasync)
return -ENODEV;
- ch = dgnc_Board[board]->channels[channel];
+ ch = dgnc_board[board]->channels[channel];
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return -ENODEV;
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 31ac437cb..ba57e9546 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -77,8 +77,6 @@ struct board_ops dgnc_neo_ops = {
.send_immediate_char = neo_send_immediate_char
};
-static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-
/*
* This function allows calls to ensure that all outstanding
* PCI writes have been completed, by doing a PCI read against
@@ -116,7 +114,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
+ &ch->ch_neo_uart->fctr);
/* Feed the UART our trigger levels */
writeb(8, &ch->ch_neo_uart->tfifo);
@@ -150,7 +149,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
/* Turn on UART enhanced bits */
writeb(efr, &ch->ch_neo_uart->efr);
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 4;
writeb(32, &ch->ch_neo_uart->rfifo);
@@ -187,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
/* Turn on UART enhanced bits */
writeb(efr, &ch->ch_neo_uart->efr);
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 4;
writeb(32, &ch->ch_neo_uart->rfifo);
@@ -225,7 +226,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
writeb(8, &ch->ch_neo_uart->tfifo);
ch->ch_t_tlevel = 8;
@@ -265,7 +267,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 0;
@@ -302,7 +305,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 0;
@@ -321,7 +325,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
{
/* if hardware flow control is set, then skip this whole thing */
- if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS)
+ if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) ||
+ ch->ch_c_cflag & CRTSCTS)
return;
/* Tell UART what start/stop chars it should be looking for */
@@ -351,8 +356,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
/* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
- if (time_after_eq(jiffies, ch->ch_stop_sending_break)
- || force) {
+ if (force ||
+ time_after_eq(jiffies, ch->ch_stop_sending_break)) {
unsigned char temp = readb(&ch->ch_neo_uart->lcr);
writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
@@ -374,14 +379,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
unsigned char cause;
unsigned long flags;
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- if (port >= brd->maxports)
- return;
-
ch = brd->channels[port];
- if (ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
/* Here we try to figure out what caused the interrupt to happen */
@@ -393,7 +392,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
break;
/*
- * Yank off the upper 2 bits, which just show that the FIFO's are enabled.
+ * Yank off the upper 2 bits,
+ * which just show that the FIFO's are enabled.
*/
isr &= ~(UART_17158_IIR_FIFO_ENABLED);
@@ -666,7 +666,8 @@ static void neo_param(struct tty_struct *tty)
};
/* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ (un->un_type == DGNC_PRINT))
baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
else
baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
@@ -679,7 +680,8 @@ static void neo_param(struct tty_struct *tty)
jindex = baud;
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16))
+ if ((iindex >= 0) && (iindex < 4) &&
+ (jindex >= 0) && (jindex < 16))
baud = bauds[iindex][jindex];
else
baud = 0;
@@ -787,7 +789,8 @@ static void neo_param(struct tty_struct *tty)
neo_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
/* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_output_flow_control(ch);
else
neo_set_ixon_flow_control(ch);
@@ -799,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
neo_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
/* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_input_flow_control(ch);
else
neo_set_ixoff_flow_control(ch);
@@ -910,9 +914,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
struct dgnc_board *brd = voidbrd;
struct channel_t *ch;
int port = 0;
- int type = 0;
- int current_port;
- u32 tmp;
+ int type;
u32 uart_poll;
unsigned long flags;
unsigned long flags2;
@@ -947,29 +949,12 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
/* At this point, we have at least SOMETHING to service, dig further... */
- current_port = 0;
-
/* Loop on each port */
while ((uart_poll & 0xff) != 0) {
- tmp = uart_poll;
-
- /* Check current port to see if it has interrupt pending */
- if ((tmp & dgnc_offset_table[current_port]) != 0) {
- port = current_port;
- type = tmp >> (8 + (port * 3));
- type &= 0x7;
- } else {
- current_port++;
- continue;
- }
+ type = uart_poll >> (8 + (port * 3));
+ type &= 0x7;
- /* Remove this port + type from uart_poll */
- uart_poll &= ~(dgnc_offset_table[port]);
-
- if (!type) {
- /* If no type, just ignore it, and move onto next port */
- continue;
- }
+ uart_poll &= ~(0x01 << port);
/* Switch on type of interrupt we have */
switch (type) {
@@ -981,7 +966,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
/* Verify the port is in range. */
if (port >= brd->nasync)
- continue;
+ break;
ch = brd->channels[port];
neo_copy_data_from_uart_to_queue(ch);
@@ -991,14 +976,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
dgnc_check_queue_flow_control(ch);
spin_unlock_irqrestore(&ch->ch_lock, flags2);
- continue;
+ break;
case UART_17158_RX_LINE_STATUS:
/*
* RXRDY and RX LINE Status (logic OR of LSR[4:1])
*/
neo_parse_lsr(brd, port);
- continue;
+ break;
case UART_17158_TXRDY:
/*
@@ -1014,14 +999,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* it should be, I was getting things like RXDY too. Weird.
*/
neo_parse_isr(brd, port);
- continue;
+ break;
case UART_17158_MSR:
/*
* MSR or flow control was seen.
*/
neo_parse_isr(brd, port);
- continue;
+ break;
default:
/*
@@ -1030,8 +1015,10 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* these once and awhile.
* Its harmless, just ignore it and move on.
*/
- continue;
+ break;
}
+
+ port++;
}
/*
@@ -1172,7 +1159,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
linestatus = 0;
/* Copy data from uart to the queue */
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
+ memcpy_fromio(ch->ch_rqueue + head,
+ &ch->ch_neo_uart->txrxburst, n);
/*
* Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
@@ -1225,7 +1213,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
* we don't miss our TX FIFO emptys.
*/
if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
- linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
+ linestatus &= ~(UART_LSR_THRE |
+ UART_17158_TX_AND_FIFO_CLR);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
}
@@ -1255,7 +1244,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
qleft++;
}
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
+ memcpy_fromio(ch->ch_rqueue + head,
+ &ch->ch_neo_uart->txrxburst, 1);
ch->ch_equeue[head] = (unsigned char)linestatus;
/* Ditch any remaining linestatus value. */
@@ -1328,7 +1318,8 @@ static void neo_flush_uart_write(struct channel_t *ch)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
+ &ch->ch_neo_uart->isr_fcr);
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
@@ -1356,7 +1347,8 @@ static void neo_flush_uart_read(struct channel_t *ch)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr);
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
+ &ch->ch_neo_uart->isr_fcr);
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
@@ -1427,7 +1419,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
- writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
+ writeb(ch->ch_wqueue[ch->ch_w_tail],
+ &ch->ch_neo_uart->txrx);
ch->ch_w_tail++;
ch->ch_w_tail &= WQUEUEMASK;
ch->ch_txcount++;
@@ -1494,7 +1487,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
- memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
+ memcpy_toio(&ch->ch_neo_uart->txrxburst,
+ ch->ch_wqueue + tail, s);
/* Add and flip queue if needed */
tail = (tail + s) & WQUEUEMASK;
@@ -1628,7 +1622,8 @@ static void neo_uart_init(struct channel_t *ch)
/* Clear out UART and FIFO */
readb(&ch->ch_neo_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ &ch->ch_neo_uart->isr_fcr);
readb(&ch->ch_neo_uart->lsr);
readb(&ch->ch_neo_uart->msr);
@@ -1725,7 +1720,8 @@ static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
neo_pci_posting_flush(ch->ch_bd);
}
-static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int address)
+static unsigned int neo_read_eeprom(unsigned char __iomem *base,
+ unsigned int address)
{
unsigned int enable;
unsigned int bits;
@@ -1783,10 +1779,15 @@ static void neo_vpd(struct dgnc_board *brd)
brd->vpd[(i * 2) + 1] = (a >> 8) & 0xff;
}
- if (((brd->vpd[0x08] != 0x82) /* long resource name tag */
- && (brd->vpd[0x10] != 0x82)) /* long resource name tag (PCI-66 files)*/
- || (brd->vpd[0x7F] != 0x78)) { /* small resource end tag */
-
+ /*
+ * brd->vpd has different name tags by below index.
+ * 0x08 : long resource name tag
+ * 0x10 : long resource name tage (PCI-66 files)
+ * 0x7F : small resource end tag
+ */
+ if (((brd->vpd[0x08] != 0x82) &&
+ (brd->vpd[0x10] != 0x82)) ||
+ (brd->vpd[0x7F] != 0x78)) {
memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
} else {
/* Search for the serial number */
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index 74a072599..b8d41c561 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -33,7 +33,7 @@ static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_NumBoards);
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
}
static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
@@ -189,19 +189,21 @@ static ssize_t dgnc_ports_msignals_show(struct device *p,
DGNC_VERIFY_BOARD(p, bd);
for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count) {
+ struct channel_t *ch = bd->channels[i];
+
+ if (ch->ch_open_count) {
count += snprintf(buf + count, PAGE_SIZE - count,
"%d %s %s %s %s %s %s\n",
- bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ ch->ch_portnum,
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
} else {
count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
+ "%d\n", ch->ch_portnum);
}
}
return count;
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index bcd2bdfb9..4eeecc992 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -176,57 +176,42 @@ int dgnc_tty_preinit(void)
*/
int dgnc_tty_register(struct dgnc_board *brd)
{
- int rc = 0;
-
- brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
+ int rc;
- snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
+ brd->serial_driver = tty_alloc_driver(brd->maxports,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
- brd->SerialDriver.name = brd->SerialName;
- brd->SerialDriver.name_base = 0;
- brd->SerialDriver.major = 0;
- brd->SerialDriver.minor_start = 0;
- brd->SerialDriver.num = brd->maxports;
- brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->SerialDriver.init_termios = DgncDefaultTermios;
- brd->SerialDriver.driver_name = DRVSTR;
- brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
+ if (IS_ERR(brd->serial_driver))
+ return PTR_ERR(brd->serial_driver);
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's.
- */
- brd->SerialDriver.ttys = kcalloc(brd->maxports,
- sizeof(*brd->SerialDriver.ttys),
- GFP_KERNEL);
- if (!brd->SerialDriver.ttys)
- return -ENOMEM;
+ snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
- kref_init(&brd->SerialDriver.kref);
- brd->SerialDriver.termios = kcalloc(brd->maxports,
- sizeof(*brd->SerialDriver.termios),
- GFP_KERNEL);
- if (!brd->SerialDriver.termios)
- return -ENOMEM;
+ brd->serial_driver->name = brd->serial_name;
+ brd->serial_driver->name_base = 0;
+ brd->serial_driver->major = 0;
+ brd->serial_driver->minor_start = 0;
+ brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->serial_driver->init_termios = DgncDefaultTermios;
+ brd->serial_driver->driver_name = DRVSTR;
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
- tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops);
+ tty_set_operations(brd->serial_driver, &dgnc_tty_ops);
- if (!brd->dgnc_Major_Serial_Registered) {
+ if (!brd->dgnc_major_serial_registered) {
/* Register tty devices */
- rc = tty_register_driver(&brd->SerialDriver);
+ rc = tty_register_driver(brd->serial_driver);
if (rc < 0) {
dev_dbg(&brd->pdev->dev,
"Can't register tty device (%d)\n", rc);
- return rc;
+ goto free_serial_driver;
}
- brd->dgnc_Major_Serial_Registered = true;
+ brd->dgnc_major_serial_registered = true;
}
/*
@@ -234,60 +219,55 @@ int dgnc_tty_register(struct dgnc_board *brd)
* again, separately so we don't get the LD confused about what major
* we are when we get into the dgnc_tty_open() routine.
*/
- brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
- snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
-
- brd->PrintDriver.name = brd->PrintName;
- brd->PrintDriver.name_base = 0;
- brd->PrintDriver.major = brd->SerialDriver.major;
- brd->PrintDriver.minor_start = 0x80;
- brd->PrintDriver.num = brd->maxports;
- brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->PrintDriver.init_termios = DgncDefaultTermios;
- brd->PrintDriver.driver_name = DRVSTR;
- brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
+ brd->print_driver = tty_alloc_driver(brd->maxports,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+
+ if (IS_ERR(brd->print_driver)) {
+ rc = PTR_ERR(brd->print_driver);
+ goto unregister_serial_driver;
+ }
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's. Must be separated from
- * the Serial Driver so we don't get confused
- */
- brd->PrintDriver.ttys = kcalloc(brd->maxports,
- sizeof(*brd->PrintDriver.ttys),
- GFP_KERNEL);
- if (!brd->PrintDriver.ttys)
- return -ENOMEM;
- kref_init(&brd->PrintDriver.kref);
- brd->PrintDriver.termios = kcalloc(brd->maxports,
- sizeof(*brd->PrintDriver.termios),
- GFP_KERNEL);
- if (!brd->PrintDriver.termios)
- return -ENOMEM;
+ snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
+
+ brd->print_driver->name = brd->print_name;
+ brd->print_driver->name_base = 0;
+ brd->print_driver->major = brd->serial_driver->major;
+ brd->print_driver->minor_start = 0x80;
+ brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->print_driver->init_termios = DgncDefaultTermios;
+ brd->print_driver->driver_name = DRVSTR;
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
- tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops);
+ tty_set_operations(brd->print_driver, &dgnc_tty_ops);
- if (!brd->dgnc_Major_TransparentPrint_Registered) {
+ if (!brd->dgnc_major_transparent_print_registered) {
/* Register Transparent Print devices */
- rc = tty_register_driver(&brd->PrintDriver);
+ rc = tty_register_driver(brd->print_driver);
if (rc < 0) {
dev_dbg(&brd->pdev->dev,
"Can't register Transparent Print device(%d)\n",
rc);
- return rc;
+ goto free_print_driver;
}
- brd->dgnc_Major_TransparentPrint_Registered = true;
+ brd->dgnc_major_transparent_print_registered = true;
}
- dgnc_BoardsByMajor[brd->SerialDriver.major] = brd;
- brd->dgnc_Serial_Major = brd->SerialDriver.major;
- brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major;
+ dgnc_BoardsByMajor[brd->serial_driver->major] = brd;
+
+ return 0;
+
+free_print_driver:
+ put_tty_driver(brd->print_driver);
+unregister_serial_driver:
+ tty_unregister_driver(brd->serial_driver);
+free_serial_driver:
+ put_tty_driver(brd->serial_driver);
return rc;
}
@@ -364,12 +344,12 @@ int dgnc_tty_init(struct dgnc_board *brd)
{
struct device *classp;
- classp = tty_register_device(&brd->SerialDriver, i,
+ classp = tty_register_device(brd->serial_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_tun.un_sysfs = classp;
dgnc_create_tty_sysfs(&ch->ch_tun, classp);
- classp = tty_register_device(&brd->PrintDriver, i,
+ classp = tty_register_device(brd->print_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_pun.un_sysfs = classp;
dgnc_create_tty_sysfs(&ch->ch_pun, classp);
@@ -407,40 +387,32 @@ void dgnc_tty_uninit(struct dgnc_board *brd)
{
int i = 0;
- if (brd->dgnc_Major_Serial_Registered) {
- dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL;
- brd->dgnc_Serial_Major = 0;
+ if (brd->dgnc_major_serial_registered) {
+ dgnc_BoardsByMajor[brd->serial_driver->major] = NULL;
for (i = 0; i < brd->nasync; i++) {
if (brd->channels[i])
dgnc_remove_tty_sysfs(brd->channels[i]->
ch_tun.un_sysfs);
- tty_unregister_device(&brd->SerialDriver, i);
+ tty_unregister_device(brd->serial_driver, i);
}
- tty_unregister_driver(&brd->SerialDriver);
- brd->dgnc_Major_Serial_Registered = false;
+ tty_unregister_driver(brd->serial_driver);
+ brd->dgnc_major_serial_registered = false;
}
- if (brd->dgnc_Major_TransparentPrint_Registered) {
- dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL;
- brd->dgnc_TransparentPrint_Major = 0;
+ if (brd->dgnc_major_transparent_print_registered) {
+ dgnc_BoardsByMajor[brd->print_driver->major] = NULL;
for (i = 0; i < brd->nasync; i++) {
if (brd->channels[i])
dgnc_remove_tty_sysfs(brd->channels[i]->
ch_pun.un_sysfs);
- tty_unregister_device(&brd->PrintDriver, i);
+ tty_unregister_device(brd->print_driver, i);
}
- tty_unregister_driver(&brd->PrintDriver);
- brd->dgnc_Major_TransparentPrint_Registered = false;
+ tty_unregister_driver(brd->print_driver);
+ brd->dgnc_major_transparent_print_registered = false;
}
- kfree(brd->SerialDriver.ttys);
- brd->SerialDriver.ttys = NULL;
- kfree(brd->SerialDriver.termios);
- brd->SerialDriver.termios = NULL;
- kfree(brd->PrintDriver.ttys);
- brd->PrintDriver.ttys = NULL;
- kfree(brd->PrintDriver.termios);
- brd->PrintDriver.termios = NULL;
+ put_tty_driver(brd->serial_driver);
+ put_tty_driver(brd->print_driver);
}
/*
@@ -606,6 +578,8 @@ void dgnc_input(struct channel_t *ch)
* or the amount of data the card actually has pending...
*/
while (n) {
+ unsigned char *ch_pos = ch->ch_equeue + tail;
+
s = ((head >= tail) ? head : RQUEUESIZE) - tail;
s = min(s, n);
@@ -620,29 +594,20 @@ void dgnc_input(struct channel_t *ch)
*/
if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
for (i = 0; i < s; i++) {
- if (*(ch->ch_equeue + tail + i) & UART_LSR_BI)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_BREAK);
- else if (*(ch->ch_equeue + tail + i) &
- UART_LSR_PE)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_PARITY);
- else if (*(ch->ch_equeue + tail + i) &
- UART_LSR_FE)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_FRAME);
- else
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_NORMAL);
+ unsigned char ch = *(ch_pos + i);
+ char flag = TTY_NORMAL;
+
+ if (ch & UART_LSR_BI)
+ flag = TTY_BREAK;
+ else if (ch & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART_LSR_FE)
+ flag = TTY_FRAME;
+
+ tty_insert_flip_char(tp->port, ch, flag);
}
} else {
- tty_insert_flip_string(tp->port,
- ch->ch_rqueue + tail,
- s);
+ tty_insert_flip_string(tp->port, ch_pos, s);
}
tail += s;
@@ -1117,6 +1082,14 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
if (!ch->ch_wqueue)
ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
+ if (!ch->ch_rqueue || !ch->ch_equeue || !ch->ch_wqueue) {
+ kfree(ch->ch_rqueue);
+ kfree(ch->ch_equeue);
+ kfree(ch->ch_wqueue);
+
+ return -ENOMEM;
+ }
+
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags &= ~(CH_OPENING);
@@ -1255,7 +1228,7 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
if (file->f_flags & O_NONBLOCK)
break;
- if (tty->flags & (1 << TTY_IO_ERROR)) {
+ if (tty_io_error(tty)) {
retval = -EIO;
break;
}
@@ -1539,19 +1512,8 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
*/
static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
-
- if (!tty)
- return bytes_available;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return bytes_available;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return bytes_available;
+ struct un_t *un = tty->driver_data;
+ struct channel_t *ch = un->un_ch;
/*
* If its not the Transparent print device, return
@@ -2058,17 +2020,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
static int dgnc_get_modem_info(struct channel_t *ch,
unsigned int __user *value)
{
- int result;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- result = dgnc_get_mstat(ch);
-
- if (result < 0)
- return -ENXIO;
-
- return put_user(result, value);
+ return put_user(dgnc_get_mstat(ch), value);
}
/*
@@ -2529,6 +2481,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct dgnc_board *bd;
+ struct board_ops *ch_bd_ops;
struct channel_t *ch;
struct un_t *un;
int rc;
@@ -2550,6 +2503,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return -ENODEV;
+ ch_bd_ops = bd->bd_ops;
+
spin_lock_irqsave(&ch->ch_lock, flags);
if (un->un_open_count <= 0) {
@@ -2574,7 +2529,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2582,7 +2537,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_lock_irqsave(&ch->ch_lock, flags);
if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2599,13 +2554,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2617,13 +2572,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2652,7 +2607,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_lock_irqsave(&ch->ch_lock, flags);
tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
@@ -2689,7 +2644,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
+ ch_bd_ops->flush_uart_read(ch);
/* Force queue flow control to be released, if needed */
dgnc_check_queue_flow_control(ch);
}
@@ -2697,9 +2652,9 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
if (!(un->un_type == DGNC_PRINT)) {
ch->ch_w_head = ch->ch_w_tail;
- ch->ch_bd->bd_ops->flush_uart_write(ch);
+ ch_bd_ops->flush_uart_write(ch);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
ch->ch_tun.un_flags &=
~(UN_LOW | UN_EMPTY);
wake_up_interruptible(&ch->ch_tun.un_flags_wait);
@@ -2731,14 +2686,14 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* flush rx */
ch->ch_flags &= ~CH_STOP;
ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
+ ch_bd_ops->flush_uart_read(ch);
/* Force queue flow control to be released, if needed */
dgnc_check_queue_flow_control(ch);
}
/* now wait for all the output to drain */
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2748,7 +2703,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case TCSETAW:
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2771,7 +2726,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* set information for ditty */
if (cmd == (DIGI_SETAW)) {
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2804,7 +2759,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
else
ch->ch_flags &= ~(CH_LOOPBACK);
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2824,7 +2779,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return rc;
spin_lock_irqsave(&ch->ch_lock, flags);
dgnc_set_custom_speed(ch, new_rate);
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2845,7 +2800,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_immediate_char(ch, c);
+ ch_bd_ops->send_immediate_char(ch, c);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2933,13 +2888,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* Is the UART empty? Add that value to whats in our TX queue.
*/
- count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch);
+ count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
/*
* Figure out how much data the RealPort Server believes should
* be in our TX queue.
*/
- tdist = (buf.tIn - buf.tOut) & 0xffff;
+ tdist = (buf.tx_in - buf.tx_out) & 0xffff;
/*
* If we have more data than the RealPort Server believes we
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 523a2d34f..5b983e6f5 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -109,8 +109,8 @@ struct digi_info {
struct digi_getbuffer /* Struct for holding buffer use counts */
{
- unsigned long tIn;
- unsigned long tOut;
+ unsigned long tx_in;
+ unsigned long tx_out;
unsigned long rxbuf;
unsigned long txbuf;
unsigned long txdone;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index e8cacaecf..3bd91758b 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -418,9 +418,9 @@ static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
struct fc_regs *preg = udc->p_regs;
- _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum-1].EP_DCR1, DCR1_EPn_REQEN);
+ _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPn_REQEN);
mdelay(DMA_DISABLE_TIME); /* DCR1_EPn_REQEN Clear */
- _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum-1].EP_DMA_CTRL, EPn_DMA_EN);
+ _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPn_DMA_EN);
}
/*-------------------------------------------------------------------------*/
@@ -909,7 +909,7 @@ static int _nbu2ss_epn_out_pio(
/* Copy of every four bytes */
for (i = 0; i < iWordLength; i++) {
pBuf32->dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
+ _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
pBuf32++;
}
result = iWordLength * sizeof(u32);
@@ -919,7 +919,7 @@ static int _nbu2ss_epn_out_pio(
if (data > 0) {
/*---------------------------------------------------------*/
/* Copy of fraction byte */
- Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
+ Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
for (i = 0 ; i < data ; i++)
pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
result += data;
@@ -1128,7 +1128,7 @@ static int _nbu2ss_epn_in_pio(
if (iWordLength > 0) {
for (i = 0; i < iWordLength; i++) {
_nbu2ss_writel(
- &preg->EP_REGS[ep->epnum-1].EP_WRITE
+ &preg->EP_REGS[ep->epnum - 1].EP_WRITE
, pBuf32->dw
);
@@ -1290,7 +1290,7 @@ static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
if (ep->epnum > 0) {
length = _nbu2ss_readl(
- &ep->udc->p_regs->EP_REGS[ep->epnum-1].EP_LEN_DCNT);
+ &ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
length &= EPn_LDATA;
if (length < ep->ep.maxpacket)
@@ -1463,7 +1463,7 @@ static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
bit_data = EP0_STL;
} else {
- data = _nbu2ss_readl(&preg->EP_REGS[epnum-1].EP_CONTROL);
+ data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
if ((data & EPn_EN) == 0)
return -1;
@@ -1558,7 +1558,7 @@ static void _nbu2ss_epn_set_stall(
; limit_cnt++) {
regdata = _nbu2ss_readl(
- &preg->EP_REGS[ep->epnum-1].EP_STATUS);
+ &preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((regdata & EPn_IN_DATA) == 0)
break;
@@ -1983,7 +1983,7 @@ static inline void _nbu2ss_epn_in_int(
if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
status =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_STATUS);
+ _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((status & EPn_IN_FULL) == 0) {
/*-----------------------------------------*/
@@ -2894,7 +2894,7 @@ static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
} else {
- data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_LEN_DCNT)
+ data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
& EPn_LDATA;
}
@@ -3051,7 +3051,7 @@ static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
}
/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned mA)
+static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned int mA)
{
struct nbu2ss_udc *udc;
unsigned long flags;
@@ -3101,7 +3101,7 @@ static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_ioctl(
struct usb_gadget *pgadget,
- unsigned code,
+ unsigned int code,
unsigned long param)
{
return 0;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 4a2cc38de..39769e3a8 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -97,7 +97,7 @@
#define BIT30 0x40000000
#define BIT31 0x80000000
-#define TEST_FORCE_ENABLE (BIT18+BIT16)
+#define TEST_FORCE_ENABLE (BIT18 + BIT16)
#define INT_SEL BIT10
#define CONSTFS BIT09
@@ -125,15 +125,15 @@
/*------- (0x0008) USB Address Register */
#define USB_ADDR 0x007F0000
#define SOF_STATUS BIT15
-#define UFRAME (BIT14+BIT13+BIT12)
+#define UFRAME (BIT14 + BIT13 + BIT12)
#define FRAME 0x000007FF
#define USB_ADRS_SHIFT 16
/*------- (0x000C) UTMI Characteristic 1 Register */
-#define SQUSET (BIT07+BIT06+BIT05+BIT04)
+#define SQUSET (BIT07 + BIT06 + BIT05 + BIT04)
-#define USB_SQUSET (BIT06+BIT05+BIT04)
+#define USB_SQUSET (BIT06 + BIT05 + BIT04)
/*------- (0x0010) TEST Control Register */
#define FORCEHS BIT02
@@ -196,7 +196,7 @@
#define RSUM_EN BIT01
#define USB_INT_EN_BIT \
- (EP0_EN|SPEED_MODE_EN|USB_RST_EN|SPND_EN|RSUM_EN)
+ (EP0_EN | SPEED_MODE_EN | USB_RST_EN | SPND_EN | RSUM_EN)
/*------- (0x0028) EP0 Control Register */
#define EP0_STGSEL BIT18
@@ -205,9 +205,9 @@
#define EP0_PIDCLR BIT09
#define EP0_BCLR BIT08
#define EP0_DEND BIT07
-#define EP0_DW (BIT06+BIT05)
+#define EP0_DW (BIT06 + BIT05)
#define EP0_DW4 0
-#define EP0_DW3 (BIT06+BIT05)
+#define EP0_DW3 (BIT06 + BIT05)
#define EP0_DW2 BIT06
#define EP0_DW1 BIT05
@@ -238,7 +238,7 @@
#define STG_START_INT BIT01
#define SETUP_INT BIT00
-#define EP0_STATUS_RW_BIT (BIT16|BIT15|BIT11|0xFF)
+#define EP0_STATUS_RW_BIT (BIT16 | BIT15 | BIT11 | 0xFF)
/*------- (0x0030) EP0 Interrupt Enable Register */
#define EP0_PERR_NAK_EN BIT16
@@ -256,7 +256,7 @@
#define SETUP_EN BIT00
#define EP0_INT_EN_BIT \
- (EP0_OUT_OR_EN|EP0_OUT_EN|EP0_IN_EN|STG_END_EN|SETUP_EN)
+ (EP0_OUT_OR_EN | EP0_OUT_EN | EP0_IN_EN | STG_END_EN | SETUP_EN)
/*------- (0x0034) EP0 Length Register */
#define EP0_LDATA 0x0000007F
@@ -270,7 +270,7 @@
#define EPn_BUF_SINGLE BIT30
#define EPn_DIR0 BIT26
-#define EPn_MODE (BIT25+BIT24)
+#define EPn_MODE (BIT25 + BIT24)
#define EPn_BULK 0
#define EPn_INTERRUPT BIT24
#define EPn_ISO BIT25
@@ -283,9 +283,9 @@
#define EPn_BCLR BIT09
#define EPn_CBCLR BIT08
#define EPn_DEND BIT07
-#define EPn_DW (BIT06+BIT05)
+#define EPn_DW (BIT06 + BIT05)
#define EPn_DW4 0
-#define EPn_DW3 (BIT06+BIT05)
+#define EPn_DW3 (BIT06 + BIT05)
#define EPn_DW2 BIT06
#define EPn_DW1 BIT05
@@ -324,7 +324,7 @@
#define EPn_IN_EMPTY BIT00 /* R */
#define EPn_INT_EN \
- (EPn_OUT_END_INT|EPn_OUT_INT|EPn_IN_END_INT|EPn_IN_INT)
+ (EPn_OUT_END_INT | EPn_OUT_INT | EPn_IN_END_INT | EPn_IN_INT)
/*------- (0x0048:) EPn Interrupt Enable Register */
#define EPn_OUT_END_EN BIT23 /* RW */
@@ -368,7 +368,7 @@
#define ARBITER_CTR BIT31 /* RW */
#define MCYCLE_RST BIT12 /* RW */
-#define ENDIAN_CTR (BIT09+BIT08) /* RW */
+#define ENDIAN_CTR (BIT09 + BIT08) /* RW */
#define ENDIAN_BYTE_SWAP BIT09
#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
@@ -376,7 +376,7 @@
#define HTRANS_MODE BIT04 /* RW */
#define WBURST_TYPE BIT02 /* RW */
-#define BURST_TYPE (BIT01+BIT00) /* RW */
+#define BURST_TYPE (BIT01 + BIT00) /* RW */
#define BURST_MAX_16 0
#define BURST_MAX_8 BIT00
#define BURST_MAX_4 BIT01
@@ -412,7 +412,7 @@
#define EPC_RST BIT00 /* RW */
/*------- (0x1014) USBF_EPTEST Register */
-#define LINESTATE (BIT09+BIT08) /* R */
+#define LINESTATE (BIT09 + BIT08) /* R */
#define DM_LEVEL BIT09 /* R */
#define DP_LEVEL BIT08 /* R */
@@ -485,7 +485,7 @@ struct fc_regs {
struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */
- u8 Reserved220[0x1000-0x220]; /* (0x0220:0x0FFF) Reserved */
+ u8 Reserved220[0x1000 - 0x220]; /* (0x0220:0x0FFF) Reserved */
u32 AHBSCTR; /* (0x1000) AHBSCTR */
u32 AHBMCTR; /* (0x1004) AHBMCTR */
@@ -494,16 +494,16 @@ struct fc_regs {
u32 EPCTR; /* (0x1010) EPCTR */
u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */
- u8 Reserved1018[0x20-0x18]; /* (0x1018:0x101F) Reserved */
+ u8 Reserved1018[0x20 - 0x18]; /* (0x1018:0x101F) Reserved */
u32 USBSSVER; /* (0x1020) USBSSVER */
u32 USBSSCONF; /* (0x1024) USBSSCONF */
- u8 Reserved1028[0x110-0x28]; /* (0x1028:0x110F) Reserved */
+ u8 Reserved1028[0x110 - 0x28]; /* (0x1028:0x110F) Reserved */
struct ep_dcr EP_DCR[REG_EP_NUM]; /* */
- u8 Reserved1200[0x1000-0x200]; /* Reserved */
+ u8 Reserved1200[0x1000 - 0x200]; /* Reserved */
} __aligned(32);
#define EP0_PACKETSIZE 64
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index ba9fc444b..82b46cd27 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -414,7 +414,7 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
while (len--) {
u8 i, data;
- data = *(u8 *) buf++;
+ data = *(u8 *)buf++;
/* set data bus */
for (i = 0; i < 8; ++i)
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index a6f091fb9..4dcea2e0b 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -141,7 +141,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len--) {
- data = *(u8 *) buf;
+ data = *(u8 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -170,7 +170,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u8 *) buf;
+ prev_data = *(u8 *)buf;
#endif
buf++;
}
@@ -191,7 +191,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len) {
- data = *(u16 *) buf;
+ data = *(u16 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -220,7 +220,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
+ prev_data = *(u16 *)buf;
#endif
buf += 2;
len -= 2;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 241d7c6be..e4a355aef 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -1254,7 +1254,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len) {
- data = *(u16 *) buf;
+ data = *(u16 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -1283,7 +1283,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
+ prev_data = *(u16 *)buf;
#endif
buf += 2;
len -= 2;
@@ -1436,7 +1436,7 @@ static int __init fbtft_device_init(void)
}
strncpy(fbtft_device_param_gpios[i].name, p_name,
FBTFT_GPIO_NAME_SIZE - 1);
- fbtft_device_param_gpios[i++].gpio = (int) val;
+ fbtft_device_param_gpios[i++].gpio = (int)val;
if (i == MAX_GPIOS) {
pr_err("gpios parameter: exceeded max array size: %d\n",
MAX_GPIOS);
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
index 8214102f1..179536a9b 100644
--- a/drivers/staging/fsl-mc/README.txt
+++ b/drivers/staging/fsl-mc/README.txt
@@ -11,11 +11,11 @@ Contents summary
-Overview of DPAA2 objects
-DPAA2 Linux driver architecture overview
-bus driver
- -dprc driver
+ -DPRC driver
-allocator
- -dpio driver
+ -DPIO driver
-Ethernet
- -mac
+ -MAC
DPAA2 Overview
--------------
@@ -37,6 +37,9 @@ interfaces, an L2 switch, or accelerator instances.
The MC provides memory-mapped I/O command interfaces (MC portals)
which DPAA2 software drivers use to operate on DPAA2 objects:
+The diagram below shows an overview of the DPAA2 resource management
+architecture:
+
+--------------------------------------+
| OS |
| DPAA2 drivers |
@@ -77,13 +80,13 @@ DPIO objects.
Overview of DPAA2 Objects
-------------------------
-The section provides a brief overview of some key objects
-in the DPAA2 hardware. A simple scenario is described illustrating
-the objects involved in creating a network interfaces.
+The section provides a brief overview of some key DPAA2 objects.
+A simple scenario is described illustrating the objects involved
+in creating a network interfaces.
-DPRC (Datapath Resource Container)
- A DPRC is an container object that holds all the other
+ A DPRC is a container object that holds all the other
types of DPAA2 objects. In the example diagram below there
are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
in the container.
@@ -101,23 +104,23 @@ the objects involved in creating a network interfaces.
| |
+---------------------------------------------------------+
- From the point of view of an OS, a DPRC is bus-like. Like
- a plug-and-play bus, such as PCI, DPRC commands can be used to
- enumerate the contents of the DPRC, discover the hardware
- objects present (including mappable regions and interrupts).
+ From the point of view of an OS, a DPRC behaves similar to a plug and
+ play bus, like PCI. DPRC commands can be used to enumerate the contents
+ of the DPRC, discover the hardware objects present (including mappable
+ regions and interrupts).
- dprc.1 (bus)
+ DPRC.1 (bus)
|
+--+--------+-------+-------+-------+
| | | | |
- dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1
- dpmcp.2 dpio.2
- dpmcp.3
+ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
+ DPMCP.2 DPIO.2
+ DPMCP.3
Hardware objects can be created and destroyed dynamically, providing
the ability to hot plug/unplug objects in and out of the DPRC.
- A DPRC has a mappable mmio region (an MC portal) that can be used
+ A DPRC has a mappable MMIO region (an MC portal) that can be used
to send MC commands. It has an interrupt for status events (like
hotplug).
@@ -137,10 +140,11 @@ the objects involved in creating a network interfaces.
A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
queuing mechanisms, configuration mechanisms, buffer management,
physical ports, and interrupts. DPAA2 uses a more granular approach
- utilizing multiple hardware objects. Each object has specialized
- functions, and are used together by software to provide Ethernet network
- interface functionality. This approach provides efficient use of finite
- hardware resources, flexibility, and performance advantages.
+ utilizing multiple hardware objects. Each object provides specialized
+ functions. Groups of these objects are used by software to provide
+ Ethernet network interface functionality. This approach provides
+ efficient use of finite hardware resources, flexibility, and
+ performance advantages.
The diagram below shows the objects needed for a simple
network interface configuration on a system with 2 CPUs.
@@ -168,46 +172,52 @@ the objects involved in creating a network interfaces.
Below the objects are described. For each object a brief description
is provided along with a summary of the kinds of operations the object
- supports and a summary of key resources of the object (mmio regions
- and irqs).
+ supports and a summary of key resources of the object (MMIO regions
+ and IRQs).
-DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
hardware device that connects to an Ethernet PHY and allows
physical transmission and reception of Ethernet frames.
- -mmio regions: none
- -irqs: dpni link change
+ -MMIO regions: none
+ -IRQs: DPNI link change
-commands: set link up/down, link config, get stats,
- irq config, enable, reset
+ IRQ config, enable, reset
-DPNI (Datapath Network Interface): contains TX/RX queues,
- network interface configuration, and rx buffer pool configuration
- mechanisms.
- -mmio regions: none
- -irqs: link state
+ network interface configuration, and RX buffer pool configuration
+ mechanisms. The TX/RX queues are in memory and are identified by
+ queue number.
+ -MMIO regions: none
+ -IRQs: link state
-commands: port config, offload config, queue config,
- parse/classify config, irq config, enable, reset
+ parse/classify config, IRQ config, enable, reset
-DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
- packets and do hardware buffer pool management operations. For
- optimum performance there is typically DPIO per CPU. This allows
- each CPU to perform simultaneous enqueue/dequeue operations.
- -mmio regions: queue operations, buffer mgmt
- -irqs: data availability, congestion notification, buffer
+ packets and do hardware buffer pool management operations. The DPAA2
+ architecture separates the mechanism to access queues (the DPIO object)
+ from the queues themselves. The DPIO provides an MMIO interface to
+ enqueue/dequeue packets. To enqueue something a descriptor is written
+ to the DPIO MMIO region, which includes the target queue number.
+ There will typically be one DPIO assigned to each CPU. This allows all
+ CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
+ expected to be shared by different DPAA2 drivers.
+ -MMIO regions: queue operations, buffer management
+ -IRQs: data availability, congestion notification, buffer
pool depletion
- -commands: irq config, enable, reset
+ -commands: IRQ config, enable, reset
-DPBP (Datapath Buffer Pool): represents a hardware buffer
pool.
- -mmio regions: none
- -irqs: none
+ -MMIO regions: none
+ -IRQs: none
-commands: enable, reset
-DPMCP (Datapath MC Portal): provides an MC command portal.
Used by drivers to send commands to the MC to manage
objects.
- -mmio regions: MC command portal
- -irqs: command completion
- -commands: irq config, enable, reset
+ -MMIO regions: MC command portal
+ -IRQs: command completion
+ -commands: IRQ config, enable, reset
Object Connections
------------------
@@ -268,22 +278,22 @@ of each driver follows.
| Stack |
+------------+ +------------+
| Allocator |. . . . . . . | Ethernet |
- |(dpmcp,dpbp)| | (dpni) |
+ |(DPMCP,DPBP)| | (DPNI) |
+-.----------+ +---+---+----+
. . ^ |
. . <data avail, | |<enqueue,
. . tx confirm> | | dequeue>
+-------------+ . | |
| DPRC driver | . +---+---V----+ +---------+
- | (dprc) | . . . . . .| DPIO driver| | MAC |
- +----------+--+ | (dpio) | | (dpmac) |
+ | (DPRC) | . . . . . .| DPIO driver| | MAC |
+ +----------+--+ | (DPIO) | | (DPMAC) |
| +------+-----+ +-----+---+
|<dev add/remove> | |
| | |
+----+--------------+ | +--+---+
- | mc-bus driver | | | PHY |
+ | MC-bus driver | | | PHY |
| | | |driver|
- | /fsl-mc@80c000000 | | +--+---+
+ | /soc/fsl-mc | | +--+---+
+-------------------+ | |
| |
================================ HARDWARE =========|=================|======
@@ -298,25 +308,27 @@ of each driver follows.
A brief description of each driver is provided below.
- mc-bus driver
+ MC-bus driver
-------------
- The mc-bus driver is a platform driver and is probed from an
- "/fsl-mc@xxxx" node in the device tree passed in by boot firmware.
- It is responsible for bootstrapping the DPAA2 kernel infrastructure.
+ The MC-bus driver is a platform driver and is probed from a
+ node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
+ firmware. It is responsible for bootstrapping the DPAA2 kernel
+ infrastructure.
Key functions include:
-registering a new bus type named "fsl-mc" with the kernel,
and implementing bus call-backs (e.g. match/uevent/dev_groups)
- -implemeting APIs for DPAA2 driver registration and for device
+ -implementing APIs for DPAA2 driver registration and for device
add/remove
- -creates an MSI irq domain
- -do a device add of the 'root' DPRC device, which is needed
- to bootstrap things
+ -creates an MSI IRQ domain
+ -doing a 'device add' to expose the 'root' DPRC, in turn triggering
+ a bind of the root DPRC to the DPRC driver
DPRC driver
-----------
- The dprc-driver is bound DPRC objects and does runtime management
+ The DPRC driver is bound to DPRC objects and does runtime management
of a bus instance. It performs the initial bus scan of the DPRC
- and handles interrupts for container events such as hot plug.
+ and handles interrupts for container events such as hot plug by
+ re-scanning the DPRC.
Allocator
----------
@@ -334,14 +346,20 @@ A brief description of each driver is provided below.
DPIO driver
-----------
The DPIO driver is bound to DPIO objects and provides services that allow
- other drivers such as the Ethernet driver to receive and transmit data.
+ other drivers such as the Ethernet driver to enqueue and dequeue data for
+ their respective objects.
Key services include:
-data availability notifications
-hardware queuing operations (enqueue and dequeue of data)
-hardware buffer pool management
+ To transmit a packet the Ethernet driver puts data on a queue and
+ invokes a DPIO API. For receive, the Ethernet driver registers
+ a data availability notification callback. To dequeue a packet
+ a DPIO API is used.
+
There is typically one DPIO object per physical CPU for optimum
- performance, allowing each CPU to simultaneously enqueue
+ performance, allowing different CPUs to simultaneously enqueue
and dequeue data.
The DPIO driver operates on behalf of all DPAA2 drivers
@@ -362,3 +380,7 @@ A brief description of each driver is provided below.
by the appropriate PHY driver via an mdio bus. The MAC driver
plays a role of being a proxy between the PHY driver and the
MC. It does this proxy via the MC commands to a DPMAC object.
+ If the PHY driver signals a link change, the MAC driver notifies
+ the MC via a DPMAC command. If a network interface is brought
+ up or down, the MC notifies the DPMAC driver via an interrupt and
+ the driver can take appropriate action.
diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO
index 389436891..54a8bc692 100644
--- a/drivers/staging/fsl-mc/TODO
+++ b/drivers/staging/fsl-mc/TODO
@@ -1,21 +1,8 @@
-* Decide if multiple root fsl-mc buses will be supported per Linux instance,
- and if so add support for this.
-
* Add at least one device driver for a DPAA2 object (child device of the
fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
driver support, which depends on drivers for several objects: DPNI,
DPIO, DPMAC. Other pre-requisites include:
- * interrupt support. for meaningful driver support we need
- interrupts, and thus need message interrupt support by the bus
- driver.
- -Note: this has dependencies on generic MSI support work
- in process upstream, see [1] and [2].
-
- * Management Complex (MC) command serialization. locking mechanisms
- are needed by drivers to serialize commands sent to the MC, including
- from atomic context.
-
* MC firmware uprev. The MC firmware upon which the fsl-mc
bus driver and DPAA2 object drivers are based is continuing
to evolve, so minor updates are needed to keep in sync with binary
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 2d97173f8..c31fe1bca 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -293,7 +293,7 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(0, 8, irq_index);
cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -334,7 +334,7 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
}
@@ -502,6 +502,7 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
+ cmd.params[0] |= mc_enc(0, 32, *status);
cmd.params[0] |= mc_enc(32, 8, irq_index);
/* send command to mc*/
@@ -580,3 +581,75 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
EXPORT_SYMBOL(dpbp_get_attributes);
+
+/**
+ * dpbp_set_notifications() - Set notifications towards software
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @cfg: notifications configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
+ cmd_flags,
+ token);
+
+ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
+ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
+ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
+ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
+ cmd.params[2] |= mc_enc(0, 16, cfg->options);
+ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
+ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_get_notifications() - Get the notifications configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @cfg: notifications configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
+ cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
+ cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
+ cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
+ cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
+ cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
+ cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index a87e9f84f..c9b52dd7b 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -32,9 +32,9 @@
#ifndef _FSL_DPMCP_CMD_H
#define _FSL_DPMCP_CMD_H
-/* DPMCP Version */
-#define DPMCP_VER_MAJOR 2
-#define DPMCP_VER_MINOR 1
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
/* Command IDs */
#define DPMCP_CMDID_CLOSE 0x800
@@ -52,6 +52,5 @@
#define DPMCP_CMDID_SET_IRQ_MASK 0x014
#define DPMCP_CMDID_GET_IRQ_MASK 0x015
#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
-#define DPMCP_CMDID_CLEAR_IRQ_STATUS 0x017
#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index b0248f574..fd6dd4e07 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -213,7 +213,7 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(0, 8, irq_index);
cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -254,7 +254,7 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
}
@@ -435,37 +435,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
}
/**
- * dpmcp_clear_irq_status() - Clear a pending interrupt's status
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @status: Bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
* dpmcp_get_attributes - Retrieve DPMCP attributes.
*
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index 6df351f0c..fe79d4d92 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -82,12 +82,12 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
* struct dpmcp_irq_cfg - IRQ configuration
* @paddr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dpmcp_irq_cfg {
uint64_t paddr;
uint32_t val;
- int user_irq_id;
+ int irq_num;
};
int dpmcp_set_irq(struct fsl_mc_io *mc_io,
@@ -133,12 +133,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
uint8_t irq_index,
uint32_t *status);
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t status);
-
/**
* struct dpmcp_attr - Structure representing DPMCP attributes
* @id: DPMCP object ID
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 6552c2034..9b854fa8e 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -40,9 +40,9 @@
#ifndef _FSL_DPRC_CMD_H
#define _FSL_DPRC_CMD_H
-/* DPRC Version */
-#define DPRC_VER_MAJOR 4
-#define DPRC_VER_MINOR 0
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 5
+#define DPRC_MIN_VER_MINOR 0
/* Command IDs */
#define DPRC_CMDID_CLOSE 0x800
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 31488a7b9..7fc47173c 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -312,6 +312,15 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
continue;
}
+ /*
+ * add a quirk for all versions of dpsec < 4.0...none
+ * are coherent regardless of what the MC reports.
+ */
+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+ (obj_desc->ver_major < 4))
+ obj_desc->flags |=
+ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+
irq_count += obj_desc->irq_count;
dev_dbg(&mc_bus_dev->dev,
"Discovered object: type %s, id %d\n",
@@ -423,6 +432,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
goto out;
+ status = 0;
error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
&status);
if (error < 0) {
@@ -692,6 +702,25 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_msi_domain;
}
+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &mc_bus->dprc_attr);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
+ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
+ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+ mc_bus->dprc_attr.version.major,
+ mc_bus->dprc_attr.version.minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+
mutex_init(&mc_bus->scan_mutex);
/*
@@ -779,9 +808,7 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
static const struct fsl_mc_device_match_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dprc",
- .ver_major = DPRC_VER_MAJOR,
- .ver_minor = DPRC_VER_MINOR},
+ .obj_type = "dprc"},
{.vendor = 0x0},
};
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 381b9a96a..a2c47377c 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -265,7 +265,7 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
*type = mc_dec(cmd.params[2], 32, 32);
return 0;
@@ -296,7 +296,7 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(32, 8, irq_index);
cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -466,6 +466,7 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
+ cmd.params[0] |= mc_enc(0, 32, *status);
cmd.params[0] |= mc_enc(32, 8, irq_index);
/* send command to mc*/
@@ -948,6 +949,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
obj_desc->state = mc_dec(cmd.params[1], 32, 32);
obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
@@ -1042,6 +1044,7 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
@@ -1108,7 +1111,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(32, 8, irq_index);
cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
cmd.params[2] |= mc_enc(32, 32, obj_id);
cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
@@ -1189,7 +1192,7 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
@@ -1437,14 +1440,8 @@ EXPORT_SYMBOL(dprc_set_obj_label);
* @endpoint1: Endpoint 1 configuration parameters
* @endpoint2: Endpoint 2 configuration parameters
* @cfg: Connection configuration. The connection configuration is ignored for
- * connections made to DPMAC objects, where rate is set according to
- * MAC configuration.
- * The committed rate is the guaranteed rate for the connection.
- * The maximum rate is an upper limit allowed for the connection; it is
- * expected to be equal or higher than the committed rate.
- * When committed and maximum rates are both zero, the connection is set
- * to "best effort" mode, having lower priority compared to connections
- * with committed or maximum rates.
+ * connections made to DPMAC objects, where rate is retrieved from the
+ * MAC configuration.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -1555,7 +1552,10 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
* @token: Token of DPRC object
* @endpoint1: Endpoint 1 configuration parameters
* @endpoint2: Returned endpoint 2 configuration parameters
-* @state: Returned link state: 1 - link is up, 0 - link is down
+* @state: Returned link state:
+* 1 - link is up;
+* 0 - link is down;
+* -1 - no connection (endpoint2 information is irrelevant)
*
* Return: '0' on Success; -ENAVAIL if connection does not exist.
*/
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index 86f8543c2..fb08f22a7 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -39,7 +39,6 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
- bool mutex_locked = false;
if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
goto out;
@@ -55,13 +54,12 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->max_count < 0))
- goto out;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count < 0 ||
res_pool->free_count > res_pool->max_count))
- goto out;
+ goto out_unlock;
resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
GFP_KERNEL);
@@ -69,7 +67,7 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
error = -ENOMEM;
dev_err(&mc_bus_dev->dev,
"Failed to allocate memory for fsl_mc_resource\n");
- goto out;
+ goto out_unlock;
}
resource->type = pool_type;
@@ -82,10 +80,9 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
res_pool->free_count++;
res_pool->max_count++;
error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
return error;
}
@@ -106,7 +103,6 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
struct fsl_mc_resource_pool *res_pool;
struct fsl_mc_resource *resource;
int error = -EINVAL;
- bool mutex_locked = false;
if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
goto out;
@@ -122,13 +118,12 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->max_count <= 0))
- goto out;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count))
- goto out;
+ goto out_unlock;
/*
* If the device is currently allocated, its resource is not
@@ -139,7 +134,7 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
dev_err(&mc_bus_dev->dev,
"Device %s cannot be removed from resource pool\n",
dev_name(&mc_dev->dev));
- goto out;
+ goto out_unlock;
}
list_del(&resource->node);
@@ -150,10 +145,9 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
devm_kfree(&mc_bus_dev->dev, resource);
mc_dev->resource = NULL;
error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
return error;
}
@@ -188,21 +182,19 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
- bool mutex_locked = false;
BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
FSL_MC_NUM_POOL_TYPES);
*new_resource = NULL;
if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
- goto error;
+ goto out;
res_pool = &mc_bus->resource_pools[pool_type];
if (WARN_ON(res_pool->mc_bus != mc_bus))
- goto error;
+ goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
resource = list_first_entry_or_null(&res_pool->free_list,
struct fsl_mc_resource, node);
@@ -212,28 +204,26 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
dev_err(&mc_bus_dev->dev,
"No more resources of type %s left\n",
fsl_mc_pool_type_strings[pool_type]);
- goto error;
+ goto out_unlock;
}
if (WARN_ON(resource->type != pool_type))
- goto error;
+ goto out_unlock;
if (WARN_ON(resource->parent_pool != res_pool))
- goto error;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count))
- goto error;
+ goto out_unlock;
list_del(&resource->node);
INIT_LIST_HEAD(&resource->node);
res_pool->free_count--;
+ error = 0;
+out_unlock:
mutex_unlock(&res_pool->mutex);
*new_resource = resource;
- return 0;
-error:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
+out:
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
@@ -241,26 +231,23 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
void fsl_mc_resource_free(struct fsl_mc_resource *resource)
{
struct fsl_mc_resource_pool *res_pool;
- bool mutex_locked = false;
res_pool = resource->parent_pool;
if (WARN_ON(resource->type != res_pool->type))
- goto out;
+ return;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->free_count < 0 ||
res_pool->free_count >= res_pool->max_count))
- goto out;
+ goto out_unlock;
if (WARN_ON(!list_empty(&resource->node)))
- goto out;
+ goto out_unlock;
list_add_tail(&resource->node, &res_pool->free_list);
res_pool->free_count++;
-out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
@@ -306,10 +293,22 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
return error;
+ error = -EINVAL;
dpmcp_dev = resource->data;
if (WARN_ON(!dpmcp_dev))
goto error_cleanup_resource;
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
goto error_cleanup_resource;
@@ -722,20 +721,14 @@ static const struct fsl_mc_device_match_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpbp",
- .ver_major = DPBP_VER_MAJOR,
- .ver_minor = DPBP_VER_MINOR
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpmcp",
- .ver_major = DPMCP_VER_MAJOR,
- .ver_minor = DPMCP_VER_MINOR
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpcon",
- .ver_major = DPCON_VER_MAJOR,
- .ver_minor = DPCON_VER_MINOR
},
{.vendor = 0x0},
};
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index b59455661..405364307 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -40,8 +40,6 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
- bool major_version_mismatch = false;
- bool minor_version_mismatch = false;
if (WARN_ON(!fsl_mc_bus_exists()))
goto out;
@@ -64,32 +62,12 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
if (id->vendor == mc_dev->obj_desc.vendor &&
strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
- if (id->ver_major == mc_dev->obj_desc.ver_major) {
- found = true;
- if (id->ver_minor != mc_dev->obj_desc.ver_minor)
- minor_version_mismatch = true;
- } else {
- major_version_mismatch = true;
- }
+ found = true;
break;
}
}
- if (major_version_mismatch) {
- dev_warn(dev,
- "Major version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- } else if (minor_version_mismatch) {
- dev_warn(dev,
- "Minor version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- }
-
out:
dev_dbg(dev, "%smatched\n", found ? "" : "not ");
return found;
@@ -251,11 +229,10 @@ static bool fsl_mc_is_root_dprc(struct device *dev)
return dev == root_dprc_dev;
}
-static int get_dprc_icid(struct fsl_mc_io *mc_io,
- int container_id, u16 *icid)
+static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
{
u16 dprc_handle;
- struct dprc_attributes attr;
int error;
error = dprc_open(mc_io, 0, container_id, &dprc_handle);
@@ -264,15 +241,14 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
return error;
}
- memset(&attr, 0, sizeof(attr));
- error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
+ memset(attr, 0, sizeof(struct dprc_attributes));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
if (error < 0) {
dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
error);
goto common_cleanup;
}
- *icid = attr.icid;
error = 0;
common_cleanup:
@@ -280,6 +256,34 @@ common_cleanup:
return error;
}
+static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ int container_id, u16 *icid)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0)
+ *icid = attr.icid;
+
+ return error;
+}
+
+static int get_dprc_version(struct fsl_mc_io *mc_io,
+ int container_id, u16 *major, u16 *minor)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0) {
+ *major = attr.version.major;
+ *minor = attr.version.minor;
+ }
+
+ return error;
+}
+
static int translate_mc_addr(struct fsl_mc_device *mc_dev,
enum dprc_region_type mc_region_type,
u64 mc_offset, phys_addr_t *phys_addr)
@@ -376,6 +380,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
regions[i].end = regions[i].start + region_desc.size - 1;
regions[i].name = "fsl-mc object MMIO region";
regions[i].flags = IORESOURCE_IO;
+ if (region_desc.flags & DPRC_REGION_CACHEABLE)
+ regions[i].flags |= IORESOURCE_CACHEABLE;
}
mc_dev->regions = regions;
@@ -491,6 +497,10 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
goto error_cleanup_dev;
}
+ /* Objects are coherent, unless 'no shareability' flag set. */
+ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
+
/*
* The device-specific probe callback will get invoked by device_add()
*/
@@ -722,20 +732,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
"Freescale Management Complex Firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
- if (mc_version.major < MC_VER_MAJOR) {
- dev_err(&pdev->dev,
- "ERROR: MC firmware version not supported by driver (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- error = -ENOTSUPP;
- goto error_cleanup_mc_io;
- }
-
- if (mc_version.major > MC_VER_MAJOR) {
- dev_warn(&pdev->dev,
- "WARNING: driver may not support newer MC firmware features (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- }
-
error = get_mc_addr_translation_ranges(&pdev->dev,
&mc->translation_ranges,
&mc->num_translation_ranges);
@@ -749,11 +745,15 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
}
+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+ error = get_dprc_version(mc_io, container_id,
+ &obj_desc.ver_major, &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
strcpy(obj_desc.type, "dprc");
obj_desc.id = container_id;
- obj_desc.ver_major = DPRC_VER_MAJOR;
- obj_desc.ver_minor = DPRC_VER_MINOR;
obj_desc.irq_count = 1;
obj_desc.region_count = 0;
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
index 3a8258ff4..e202b2b88 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -37,10 +37,8 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
/*
* set_desc should not be set by the caller
*/
- if (WARN_ON(ops->set_desc))
- return;
-
- ops->set_desc = fsl_mc_msi_set_desc;
+ if (ops->set_desc == NULL)
+ ops->set_desc = fsl_mc_msi_set_desc;
}
static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
@@ -65,7 +63,7 @@ static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
msi_desc->msg.address_lo;
irq_cfg.val = msi_desc->msg.data;
- irq_cfg.user_irq_id = msi_desc->irq;
+ irq_cfg.irq_num = msi_desc->irq;
if (owner_mc_dev == mc_bus_dev) {
/*
@@ -129,10 +127,8 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
/*
* irq_write_msi_msg should not be set by the caller
*/
- if (WARN_ON(chip->irq_write_msi_msg))
- return;
-
- chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+ if (chip->irq_write_msi_msg == NULL)
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
}
/**
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
index efa9bf33c..c57b454a2 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
@@ -34,7 +34,7 @@
/* DPBP Version */
#define DPBP_VER_MAJOR 2
-#define DPBP_VER_MINOR 1
+#define DPBP_VER_MINOR 2
/* Command IDs */
#define DPBP_CMDID_CLOSE 0x800
@@ -57,4 +57,6 @@
#define DPBP_CMDID_GET_IRQ_STATUS 0x016
#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index 37ed95143..e14e85a5d 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -85,12 +85,12 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
* struct dpbp_irq_cfg - IRQ configuration
* @addr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dpbp_irq_cfg {
u64 addr;
u32 val;
- int user_irq_id;
+ int irq_num;
};
int dpbp_set_irq(struct fsl_mc_io *mc_io,
@@ -168,6 +168,53 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpbp_attr *attr);
+/**
+ * DPBP notifications options
+ */
+
+/**
+ * BPSCN write will attempt to allocate into a cache (coherent write)
+ */
+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
+
+/**
+ * struct dpbp_notification_cfg - Structure representing DPBP notifications
+ * towards software
+ * @depletion_entry: below this threshold the pool is "depleted";
+ * set it to '0' to disable it
+ * @depletion_exit: greater than or equal to this threshold the pool exit its
+ * "depleted" state
+ * @surplus_entry: above this threshold the pool is in "surplus" state;
+ * set it to '0' to disable it
+ * @surplus_exit: less than or equal to this threshold the pool exit its
+ * "surplus" state
+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned.
+ * @message_ctx: The context that will be part of the BPSCN message and will
+ * be written to 'message_iova'
+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
+ */
+struct dpbp_notification_cfg {
+ u32 depletion_entry;
+ u32 depletion_exit;
+ u32 surplus_entry;
+ u32 surplus_exit;
+ u64 message_iova;
+ u64 message_ctx;
+ u16 options;
+};
+
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
/** @} */
#endif /* __FSL_DPBP_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 94c492706..593b2bbe7 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -94,11 +94,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
*/
#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
-/* IOMMU bypass - indicates whether objects of this container are permitted
- * to bypass the IOMMU.
- */
-#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010
-
/* AIOP - Indicates that container belongs to AIOP. */
#define DPRC_CFG_OPT_AIOP 0x00000020
@@ -173,12 +168,12 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
* struct dprc_irq_cfg - IRQ configuration
* @paddr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dprc_irq_cfg {
phys_addr_t paddr;
u32 val;
- int user_irq_id;
+ int irq_num;
};
int dprc_set_irq(struct fsl_mc_io *mc_io,
@@ -353,6 +348,14 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
#define DPRC_OBJ_STATE_PLUGGED 0x00000002
/**
+ * Shareability flag - Object flag indicating no memory shareability.
+ * the object generates memory accesses that are non coherent with other
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+/**
* struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
* @type: Type of object: NULL terminated string
* @id: ID of logical object resource
@@ -363,6 +366,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
* @region_count: Number of mappable regions supported by the object
* @state: Object state: combination of DPRC_OBJ_STATE_ states
* @label: Object label
+ * @flags: Object's flags
*/
struct dprc_obj_desc {
char type[16];
@@ -374,6 +378,7 @@ struct dprc_obj_desc {
u8 region_count;
u32 state;
char label[16];
+ u16 flags;
};
int dprc_get_obj(struct fsl_mc_io *mc_io,
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h
index ee5f1d2bf..cab1ae90f 100644
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ b/drivers/staging/fsl-mc/include/mc-private.h
@@ -94,12 +94,14 @@ struct fsl_mc_resource_pool {
* from the physical DPRC.
* @irq_resources: Pointer to array of IRQ objects for the IRQ pool
* @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
*/
struct fsl_mc_bus {
struct fsl_mc_device mc_dev;
struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
struct fsl_mc_device_irq *irq_resources;
struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
};
#define to_fsl_mc_bus(_mc_dev) \
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
index 4cd3ed3ee..8b23a553f 100644
--- a/drivers/staging/fwserial/dma_fifo.c
+++ b/drivers/staging/fwserial/dma_fifo.c
@@ -35,7 +35,7 @@
/*
* private helper fn to determine if check is in open interval (lo,hi)
*/
-static bool addr_check(unsigned check, unsigned lo, unsigned hi)
+static bool addr_check(unsigned int check, unsigned int lo, unsigned int hi)
{
return check - (lo + 1) < (hi - 1) - lo;
}
@@ -64,7 +64,7 @@ void dma_fifo_init(struct dma_fifo *fifo)
* The 'apparent' size will be rounded up to next greater aligned size.
* Returns 0 if no error, otherwise an error code
*/
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
+int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
int tx_limit, int open_limit, gfp_t gfp_mask)
{
int capacity;
@@ -190,7 +190,7 @@ int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
*/
int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
{
- unsigned len, n, ofs, l, limit;
+ unsigned int len, n, ofs, l, limit;
if (!fifo->data)
return -ENOENT;
@@ -210,7 +210,7 @@ int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
n = len;
ofs = fifo->out % fifo->capacity;
l = fifo->capacity - ofs;
- limit = min_t(unsigned, l, fifo->tx_limit);
+ limit = min_t(unsigned int, l, fifo->tx_limit);
if (n > limit) {
n = limit;
fifo->out += limit;
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
index 410988224..37a91c6a1 100644
--- a/drivers/staging/fwserial/dma_fifo.h
+++ b/drivers/staging/fwserial/dma_fifo.h
@@ -45,9 +45,9 @@
#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
struct dma_fifo {
- unsigned in;
- unsigned out; /* updated when dma is pended */
- unsigned done; /* updated upon dma completion */
+ unsigned int in;
+ unsigned int out; /* updated when dma is pended */
+ unsigned int done; /* updated upon dma completion */
struct {
unsigned corrupt:1;
};
@@ -55,7 +55,7 @@ struct dma_fifo {
int guard; /* ofs of guard area */
int capacity; /* size + reserved */
int avail; /* # of unused bytes in fifo */
- unsigned align; /* must be power of 2 */
+ unsigned int align; /* must be power of 2 */
int tx_limit; /* max # of bytes per dma transaction */
int open_limit; /* max # of outstanding allowed */
int open; /* # of outstanding dma transactions */
@@ -66,9 +66,9 @@ struct dma_fifo {
struct dma_pending {
struct list_head link;
void *data;
- unsigned len;
- unsigned next;
- unsigned out;
+ unsigned int len;
+ unsigned int next;
+ unsigned int out;
};
static inline void dp_mark_completed(struct dma_pending *dp)
@@ -82,7 +82,7 @@ static inline bool dp_is_completed(struct dma_pending *dp)
}
void dma_fifo_init(struct dma_fifo *fifo);
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
+int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
int tx_limit, int open_limit, gfp_t gfp_mask);
void dma_fifo_free(struct dma_fifo *fifo);
void dma_fifo_reset(struct dma_fifo *fifo);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 9b23b5c95..c241c0ae3 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -132,7 +132,7 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
#ifdef FWTTY_PROFILING
-static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat)
+static void fwtty_profile_fifo(struct fwtty_port *port, unsigned int *stat)
{
spin_lock_bh(&port->lock);
fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
@@ -143,7 +143,7 @@ static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
{
/* for each stat, print sum of 0 to 2^k, then individually */
int k = 4;
- unsigned sum;
+ unsigned int sum;
int j;
char t[10];
@@ -303,9 +303,10 @@ static void fwtty_restart_tx(struct fwtty_port *port)
* Note: in loopback, the port->lock is being held. Only use functions that
* don't attempt to reclaim the port->lock.
*/
-static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
+static void fwtty_update_port_status(struct fwtty_port *port,
+ unsigned int status)
{
- unsigned delta;
+ unsigned int delta;
struct tty_struct *tty;
/* simulated LSR/MSR status from remote */
@@ -396,9 +397,9 @@ static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
*
* Note: caller must be holding port lock
*/
-static unsigned __fwtty_port_line_status(struct fwtty_port *port)
+static unsigned int __fwtty_port_line_status(struct fwtty_port *port)
{
- unsigned status = 0;
+ unsigned int status = 0;
/* TODO: add module param to tie RNG to DTR as well */
@@ -424,7 +425,7 @@ static int __fwtty_write_port_status(struct fwtty_port *port)
{
struct fwtty_peer *peer;
int err = -ENOENT;
- unsigned status = __fwtty_port_line_status(port);
+ unsigned int status = __fwtty_port_line_status(port);
rcu_read_lock();
peer = rcu_dereference(port->peer);
@@ -454,7 +455,7 @@ static int fwtty_write_port_status(struct fwtty_port *port)
static void fwtty_throttle_port(struct fwtty_port *port)
{
struct tty_struct *tty;
- unsigned old;
+ unsigned int old;
tty = tty_port_tty_get(&port->port);
if (!tty)
@@ -540,7 +541,7 @@ static void fwtty_emit_breaks(struct work_struct *work)
static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
{
int c, n = len;
- unsigned lsr;
+ unsigned int lsr;
int err = 0;
fwtty_dbg(port, "%d\n", n);
@@ -635,7 +636,7 @@ static void fwtty_port_handler(struct fw_card *card,
if (addr != port->rx_handler.offset || len != 4) {
rcode = RCODE_ADDRESS_ERROR;
} else {
- fwtty_update_port_status(port, *(unsigned *)data);
+ fwtty_update_port_status(port, *(unsigned int *)data);
rcode = RCODE_COMPLETE;
}
break;
@@ -828,7 +829,7 @@ static void fwtty_write_xchar(struct fwtty_port *port, char ch)
rcu_read_unlock();
}
-static struct fwtty_port *fwtty_port_get(unsigned index)
+static struct fwtty_port *fwtty_port_get(unsigned int index)
{
struct fwtty_port *port;
@@ -934,9 +935,9 @@ static int fwtty_port_carrier_raised(struct tty_port *tty_port)
return rc;
}
-static unsigned set_termios(struct fwtty_port *port, struct tty_struct *tty)
+static unsigned int set_termios(struct fwtty_port *port, struct tty_struct *tty)
{
- unsigned baud, frame;
+ unsigned int baud, frame;
baud = tty_termios_baud_rate(&tty->termios);
tty_termios_encode_baud_rate(&tty->termios, baud, baud);
@@ -988,7 +989,7 @@ static int fwtty_port_activate(struct tty_port *tty_port,
struct tty_struct *tty)
{
struct fwtty_port *port = to_port(tty_port, port);
- unsigned baud;
+ unsigned int baud;
int err;
set_bit(TTY_IO_ERROR, &tty->flags);
@@ -1264,7 +1265,7 @@ static int set_serial_info(struct fwtty_port *port,
return 0;
}
-static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
+static int fwtty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct fwtty_port *port = tty->driver_data;
@@ -1297,7 +1298,7 @@ static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct fwtty_port *port = tty->driver_data;
- unsigned baud;
+ unsigned int baud;
spin_lock_bh(&port->lock);
baud = set_termios(port, tty);
@@ -1305,7 +1306,7 @@ static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
if ((baud == 0) && (old->c_cflag & CBAUD)) {
port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
} else if ((baud != 0) && !(old->c_cflag & CBAUD)) {
- if (C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (C_CRTSCTS(tty) || !tty_throttled(tty))
port->mctrl |= TIOCM_DTR | TIOCM_RTS;
else
port->mctrl |= TIOCM_DTR;
@@ -1369,7 +1370,7 @@ static int fwtty_break_ctl(struct tty_struct *tty, int state)
static int fwtty_tiocmget(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
- unsigned tiocm;
+ unsigned int tiocm;
spin_lock_bh(&port->lock);
tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
@@ -1380,7 +1381,8 @@ static int fwtty_tiocmget(struct tty_struct *tty)
return tiocm;
}
-static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear)
+static int fwtty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
{
struct fwtty_port *port = tty->driver_data;
@@ -1699,7 +1701,7 @@ static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
spin_unlock_bh(&peer->port->lock);
- if (port->port.console && port->fwcon_ops->notify != NULL)
+ if (port->port.console && port->fwcon_ops->notify)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
@@ -1806,7 +1808,7 @@ static void fwserial_release_port(struct fwtty_port *port, bool reset)
RCU_INIT_POINTER(port->peer, NULL);
spin_unlock_bh(&port->lock);
- if (port->port.console && port->fwcon_ops->notify != NULL)
+ if (port->port.console && port->fwcon_ops->notify)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
}
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 6fa936501..30b2481fe 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -22,7 +22,7 @@
#ifdef FWTTY_PROFILING
#define DISTRIBUTION_MAX_SIZE 8192
#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void fwtty_profile_data(unsigned stat[], unsigned val)
+static inline void fwtty_profile_data(unsigned int stat[], unsigned int val)
{
int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
++stat[n];
@@ -78,7 +78,7 @@ struct fwtty_peer {
u64 guid;
int generation;
int node_id;
- unsigned speed;
+ unsigned int speed;
int max_payload;
u64 mgmt_addr;
@@ -160,17 +160,17 @@ struct fwserial_mgmt_pkt {
#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
struct stats {
- unsigned xchars;
- unsigned dropped;
- unsigned tx_stall;
- unsigned fifo_errs;
- unsigned sent;
- unsigned lost;
- unsigned throttled;
- unsigned reads[DISTRIBUTION_MAX_INDEX + 1];
- unsigned writes[DISTRIBUTION_MAX_INDEX + 1];
- unsigned txns[DISTRIBUTION_MAX_INDEX + 1];
- unsigned unthrottle[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int xchars;
+ unsigned int dropped;
+ unsigned int tx_stall;
+ unsigned int fifo_errs;
+ unsigned int sent;
+ unsigned int lost;
+ unsigned int throttled;
+ unsigned int reads[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int writes[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int txns[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int unthrottle[DISTRIBUTION_MAX_INDEX + 1];
};
struct fwconsole_ops {
@@ -237,7 +237,7 @@ struct fwconsole_ops {
struct fwtty_port {
struct tty_port port;
struct device *device;
- unsigned index;
+ unsigned int index;
struct fw_serial *serial;
struct fw_address_handler rx_handler;
@@ -246,21 +246,21 @@ struct fwtty_port {
wait_queue_head_t wait_tx;
struct delayed_work emit_breaks;
- unsigned cps;
+ unsigned int cps;
unsigned long break_last;
struct work_struct hangup;
- unsigned mstatus;
+ unsigned int mstatus;
spinlock_t lock;
- unsigned mctrl;
+ unsigned int mctrl;
struct delayed_work drain;
struct dma_fifo tx_fifo;
int max_payload;
- unsigned status_mask;
- unsigned ignore_mask;
- unsigned break_ctl:1,
+ unsigned int status_mask;
+ unsigned int ignore_mask;
+ unsigned int break_ctl:1,
write_only:1,
overrun:1,
loopback:1;
@@ -349,7 +349,7 @@ extern struct tty_driver *fwtty_driver;
* being used for isochronous traffic)
* 2) isochronous arbitration always wins.
*/
-static inline int link_speed_to_max_payload(unsigned speed)
+static inline int link_speed_to_max_payload(unsigned int speed)
{
/* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
return min(512 << speed, 4096);
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 6bedd6683..400969170 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -278,8 +278,9 @@ static void gdm_mux_rcv_complete(struct urb *urb)
}
}
-static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
- int tty_index, struct tty_dev *tty_dev, int complete))
+static int gdm_mux_recv(void *priv_dev,
+ int (*cb)(void *data, int len, int tty_index,
+ struct tty_dev *tty_dev, int complete))
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 9db9b903f..d650d7720 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -708,7 +708,7 @@ static void do_tx(struct work_struct *work)
#define SDU_PARAM_LEN 12
static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
+ unsigned int dft_eps_ID, unsigned int eps_ID,
void (*cb)(void *data), void *cb_data,
int dev_idx, int nic_type)
{
@@ -746,8 +746,8 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
}
sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
- sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
- sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
+ sdu->dft_eps_ID = gdm_cpu_to_dev32(&udev->gdm_ed, dft_eps_ID);
+ sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, eps_ID);
sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
t_sdu->len = send_len + HCI_HEADER_SIZE;
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index 7fba8a687..dbc4446cf 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -58,7 +58,7 @@ struct sdu_header {
struct sdu {
u16 cmd_evt;
u16 len;
- u32 dftEpsId;
+ u32 dft_eps_ID;
u32 bearer_ID;
u32 nic_type;
u8 data[0];
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index 9d8347769..a0232e8ae 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -88,7 +88,8 @@ static void netlink_rcv(struct sk_buff *skb)
}
struct sock *netlink_init(int unit,
- void (*cb)(struct net_device *dev, u16 type, void *msg, int len))
+ void (*cb)(struct net_device *dev, u16 type,
+ void *msg, int len))
{
struct sock *sock;
struct netlink_kernel_cfg cfg = {
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 7b7c9786c..a221f261c 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -93,7 +93,6 @@ static int readlength_bitstream(char *bitdata, int *lendata, int *offset)
return 0;
}
-
/*
* read first 13 bytes to check bitstream magic number
*/
@@ -201,7 +200,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
#endif /* DEBUG_FPGA */
if (!xl_supported_prog_bus_width(bus_bytes)) {
pr_err("unsupported program bus width %d\n",
- bus_bytes);
+ bus_bytes);
return -1;
}
@@ -222,7 +221,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
pr_info("device init done\n");
for (i = 0; i < size; i += bus_bytes)
- xl_shift_bytes_out(bus_bytes, bitdata+i);
+ xl_shift_bytes_out(bus_bytes, bitdata + i);
pr_info("program done\n");
@@ -277,7 +276,7 @@ static int gs_set_download_method(struct fpgaimage *fimage)
static int init_driver(void)
{
firmware_pdev = platform_device_register_simple("fpgaboot", -1,
- NULL, 0);
+ NULL, 0);
return PTR_ERR_OR_ZERO(firmware_pdev);
}
@@ -331,7 +330,6 @@ err_out1:
kfree(fimage);
return -1;
-
}
static int __init gs_fpgaboot_init(void)
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
index f41f4cc79..8cc32555d 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -51,6 +51,6 @@ struct fpgaimage {
char part[MAX_STR];
char date[MAX_STR];
char time[MAX_STR];
- int32_t lendata;
+ int lendata;
char *fpgadata;
};
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
index 819db53da..c9391198f 100644
--- a/drivers/staging/gs_fpgaboot/io.c
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -35,7 +35,6 @@ static inline void byte0_out(unsigned char data);
static inline void byte1_out(unsigned char data);
static inline void xl_cclk_b(int32_t i);
-
/* Assert and Deassert CCLK */
void xl_shift_cclk(int count)
{
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index b5fad29a9..f0eb8441d 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -31,7 +31,8 @@ act2000_isa_reset(unsigned short portbase)
int serial = 0;
found = 0;
- if ((reg = inb(portbase + ISA_COR)) != 0xff) {
+ reg = inb(portbase + ISA_COR);
+ if (reg != 0xff) {
outb(reg | ISA_COR_RESET, portbase + ISA_COR);
mdelay(10);
outb(reg, portbase + ISA_COR);
@@ -232,7 +233,7 @@ act2000_isa_receive(act2000_card *card)
{
u_char c;
- if (test_and_set_bit(ACT2000_LOCK_RX, (void *) &card->ilock) != 0)
+ if (test_and_set_bit(ACT2000_LOCK_RX, (void *)&card->ilock) != 0)
return;
while (!act2000_isa_readb(card, &c)) {
if (card->idat.isa.rcvidx < 8) {
@@ -247,7 +248,7 @@ act2000_isa_receive(act2000_card *card)
card->idat.isa.rcvignore = 1;
printk(KERN_WARNING
"act2000_isa_receive: no memory\n");
- test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
return;
}
memcpy(skb_put(card->idat.isa.rcvskb, 8), card->idat.isa.rcvhdr, 8);
@@ -287,7 +288,7 @@ act2000_isa_receive(act2000_card *card)
(card->idat.isa.rcvidx < card->idat.isa.rcvlen)))
act2000_schedule_poll(card);
}
- test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
}
void
@@ -298,12 +299,13 @@ act2000_isa_send(act2000_card *card)
actcapi_msg *msg;
int l;
- if (test_and_set_bit(ACT2000_LOCK_TX, (void *) &card->ilock) != 0)
+ if (test_and_set_bit(ACT2000_LOCK_TX, (void *)&card->ilock) != 0)
return;
while (1) {
spin_lock_irqsave(&card->lock, flags);
if (!(card->sbuf)) {
- if ((card->sbuf = skb_dequeue(&card->sndq))) {
+ card->sbuf = skb_dequeue(&card->sndq);
+ if (card->sbuf) {
card->ack_msg = card->sbuf->data;
msg = (actcapi_msg *)card->sbuf->data;
if ((msg->hdr.cmd.cmd == 0x86) &&
@@ -317,7 +319,7 @@ act2000_isa_send(act2000_card *card)
spin_unlock_irqrestore(&card->lock, flags);
if (!(card->sbuf)) {
/* No more data to send */
- test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
return;
}
skb = card->sbuf;
@@ -325,7 +327,7 @@ act2000_isa_send(act2000_card *card)
while (skb->len) {
if (act2000_isa_writeb(card, *(skb->data))) {
/* Fifo is full, but more data to send */
- test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
/* Schedule myself */
act2000_schedule_tx(card);
return;
@@ -356,7 +358,6 @@ act2000_isa_send(act2000_card *card)
static int
act2000_isa_getid(act2000_card *card)
{
-
act2000_fwid fid;
u_char *p = (u_char *)&fid;
int count = 0;
@@ -378,7 +379,8 @@ act2000_isa_getid(act2000_card *card)
printk(KERN_WARNING "act2000: Wrong Firmware-ID!\n");
return -EPROTO;
}
- if ((p = strchr(fid.revision, '\n')))
+ p = strchr(fid.revision, '\n');
+ if (p)
*p = '\0';
printk(KERN_INFO "act2000: Firmware-ID: %s\n", fid.revision);
if (card->flags & ACT2000_FLAGS_IVALID) {
@@ -439,5 +441,5 @@ act2000_isa_download(act2000_card *card, act2000_ddef __user *cb)
}
kfree(buf);
msleep_interruptible(500);
- return (act2000_isa_getid(card));
+ return act2000_isa_getid(card);
}
diff --git a/drivers/staging/i4l/pcbit/capi.h b/drivers/staging/i4l/pcbit/capi.h
index 635f63476..6f6f4dd07 100644
--- a/drivers/staging/i4l/pcbit/capi.h
+++ b/drivers/staging/i4l/pcbit/capi.h
@@ -17,7 +17,7 @@
#define REQ_DISPLAY 0x04
#define REQ_USER_TO_USER 0x08
-#define AppInfoMask REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER
+#define AppInfoMask (REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER)
/* Connection Setup */
extern int capi_conn_req(const char *calledPN, struct sk_buff **buf,
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index 4172e22ae..c5270e229 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -284,7 +284,7 @@ static int pcbit_command(isdn_ctrl *ctl)
default:
printk(KERN_DEBUG "pcbit_command: unknown command\n");
break;
- };
+ }
return 0;
}
@@ -699,8 +699,8 @@ void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
*/
static char statbuf[STATBUF_LEN];
-static int stat_st = 0;
-static int stat_end = 0;
+static int stat_st;
+static int stat_end;
static int pcbit_stat(u_char __user *buf, int len, int driver, int channel)
{
@@ -968,7 +968,7 @@ static int pcbit_ioctl(isdn_ctrl *ctl)
default:
printk("error: unknown ioctl\n");
break;
- };
+ }
return 0;
}
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index b2262ba6f..e72c16420 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -254,7 +254,7 @@ static void pcbit_fsm_timer(unsigned long data)
dev = chan2dev(chan);
- if (dev == NULL) {
+ if (!dev) {
printk(KERN_WARNING "pcbit: timer for unknown device\n");
return;
}
diff --git a/drivers/staging/i4l/pcbit/layer2.h b/drivers/staging/i4l/pcbit/layer2.h
index be1327bc1..6b9063e38 100644
--- a/drivers/staging/i4l/pcbit/layer2.h
+++ b/drivers/staging/i4l/pcbit/layer2.h
@@ -109,7 +109,7 @@
#define SCHED_READ 0x01
#define SCHED_WRITE 0x02
-#define SET_RUN_TIMEOUT 2 * HZ /* 2 seconds */
+#define SET_RUN_TIMEOUT (2 * HZ) /* 2 seconds */
struct frame_buf {
ulong msg;
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index fa67da940..f066aa30f 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -27,18 +27,6 @@ config ADIS16203
To compile this driver as a module, say M here: the module will be
called adis16203.
-config ADIS16204
- tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16204 Programmable
- High-g Digital Impact Sensor and Recorder.
-
- To compile this driver as a module, say M here: the module will be
- called adis16204.
-
config ADIS16209
tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
@@ -51,17 +39,6 @@ config ADIS16209
To compile this driver as a module, say M here: the module will be
called adis16209.
-config ADIS16220
- tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
- depends on SPI
- select IIO_ADIS_LIB
- help
- Say Y here to build support for Analog Devices adis16220 programmable
- digital vibration sensor.
-
- To compile this driver as a module, say M here: the module will be
- called adis16220.
-
config ADIS16240
tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1ed137f1a..415329c96 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -8,15 +8,9 @@ obj-$(CONFIG_ADIS16201) += adis16201.o
adis16203-y := adis16203_core.o
obj-$(CONFIG_ADIS16203) += adis16203.o
-adis16204-y := adis16204_core.o
-obj-$(CONFIG_ADIS16204) += adis16204.o
-
adis16209-y := adis16209_core.o
obj-$(CONFIG_ADIS16209) += adis16209.o
-adis16220-y := adis16220_core.o
-obj-$(CONFIG_ADIS16220) += adis16220.o
-
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index e6b8c9af6..64844adca 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -3,51 +3,129 @@
#define ADIS16201_STARTUP_DELAY 220 /* ms */
-#define ADIS16201_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16201_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16201_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16201_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16201_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16201_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16201_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16201_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16201_XACCL_OFFS 0x10 /* Calibration, x-axis acceleration offset */
-#define ADIS16201_YACCL_OFFS 0x12 /* Calibration, y-axis acceleration offset */
-#define ADIS16201_XACCL_SCALE 0x14 /* x-axis acceleration scale factor */
-#define ADIS16201_YACCL_SCALE 0x16 /* y-axis acceleration scale factor */
-#define ADIS16201_XINCL_OFFS 0x18 /* Calibration, x-axis inclination offset */
-#define ADIS16201_YINCL_OFFS 0x1A /* Calibration, y-axis inclination offset */
-#define ADIS16201_XINCL_SCALE 0x1C /* x-axis inclination scale factor */
-#define ADIS16201_YINCL_SCALE 0x1E /* y-axis inclination scale factor */
-#define ADIS16201_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16201_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16201_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16201_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16201_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16201_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16201_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16201_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16201_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16201_GLOB_CMD 0x3E /* Operation, system command register */
+/* Flash memory write count */
+#define ADIS16201_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16201_SUPPLY_OUT 0x02
+
+/* Output, x-axis accelerometer */
+#define ADIS16201_XACCL_OUT 0x04
+
+/* Output, y-axis accelerometer */
+#define ADIS16201_YACCL_OUT 0x06
+
+/* Output, auxiliary ADC input */
+#define ADIS16201_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16201_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16201_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16201_YINCL_OUT 0x0E
+
+/* Calibration, x-axis acceleration offset */
+#define ADIS16201_XACCL_OFFS 0x10
+
+/* Calibration, y-axis acceleration offset */
+#define ADIS16201_YACCL_OFFS 0x12
+
+/* x-axis acceleration scale factor */
+#define ADIS16201_XACCL_SCALE 0x14
+
+/* y-axis acceleration scale factor */
+#define ADIS16201_YACCL_SCALE 0x16
+
+/* Calibration, x-axis inclination offset */
+#define ADIS16201_XINCL_OFFS 0x18
+
+/* Calibration, y-axis inclination offset */
+#define ADIS16201_YINCL_OFFS 0x1A
+
+/* x-axis inclination scale factor */
+#define ADIS16201_XINCL_SCALE 0x1C
+
+/* y-axis inclination scale factor */
+#define ADIS16201_YINCL_SCALE 0x1E
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16201_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16201_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16201_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16201_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16201_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16201_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16201_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16201_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16201_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16201_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16201_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16201_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16201_GLOB_CMD 0x3E
/* MSC_CTRL */
-#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* Self-test enable */
+#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
/* DIAG_STAT */
-#define ADIS16201_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
+
+/* SPI communications failure */
+#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 06c0b75ed..6f3f8ff2a 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -167,6 +167,7 @@ static const struct adis_data adis16201_data = {
.diag_stat_reg = ADIS16201_DIAG_STAT,
.self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16201_STARTUP_DELAY,
.status_error_msgs = adis16201_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 6426e38bf..b483e4e64 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -3,45 +3,111 @@
#define ADIS16203_STARTUP_DELAY 220 /* ms */
-#define ADIS16203_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16203_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16203_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16203_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16203_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16203_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16203_INCL_NULL 0x18 /* Incline null calibration */
-#define ADIS16203_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16203_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16203_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16203_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16203_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16203_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16203_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16203_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16203_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16203_GLOB_CMD 0x3E /* Operation, system command register */
+/* Flash memory write count */
+#define ADIS16203_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16203_SUPPLY_OUT 0x02
+
+/* Output, auxiliary ADC input */
+#define ADIS16203_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16203_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16203_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16203_YINCL_OUT 0x0E
+
+/* Incline null calibration */
+#define ADIS16203_INCL_NULL 0x18
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16203_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16203_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16203_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16203_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16203_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16203_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16203_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16203_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16203_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16203_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16203_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16203_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16203_GLOB_CMD 0x3E
/* MSC_CTRL */
-#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9) /* Reverses rotation of both inclination outputs */
-#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
+/* Reverses rotation of both inclination outputs */
+#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9)
+
+/* Self-test enable */
+#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
/* DIAG_STAT */
-#define ADIS16203_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag */
-#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM1 BIT(8)
+
+/* Self-test diagnostic error flag */
+#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
+/* SPI communications failure */
+#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index de5b84ac8..c70671778 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -134,6 +134,7 @@ static const struct adis_data adis16203_data = {
.diag_stat_reg = ADIS16203_DIAG_STAT,
.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16203_STARTUP_DELAY,
.status_error_msgs = adis16203_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
deleted file mode 100644
index 0b23f0b5c..000000000
--- a/drivers/staging/iio/accel/adis16204.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef SPI_ADIS16204_H_
-#define SPI_ADIS16204_H_
-
-#define ADIS16204_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16204_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16204_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16204_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16204_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16204_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16204_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16204_X_PEAK_OUT 0x0C /* Twos complement */
-#define ADIS16204_Y_PEAK_OUT 0x0E /* Twos complement */
-#define ADIS16204_XACCL_NULL 0x10 /* Calibration, x-axis acceleration offset null */
-#define ADIS16204_YACCL_NULL 0x12 /* Calibration, y-axis acceleration offset null */
-#define ADIS16204_XACCL_SCALE 0x14 /* X-axis scale factor calibration register */
-#define ADIS16204_YACCL_SCALE 0x16 /* Y-axis scale factor calibration register */
-#define ADIS16204_XY_RSS_OUT 0x18 /* XY combined acceleration (RSS) */
-#define ADIS16204_XY_PEAK_OUT 0x1A /* Peak, XY combined output (RSS) */
-#define ADIS16204_CAP_BUF_1 0x1C /* Capture buffer output register 1 */
-#define ADIS16204_CAP_BUF_2 0x1E /* Capture buffer output register 2 */
-#define ADIS16204_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16204_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16204_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16204_CAPT_PNTR 0x2A /* Capture register address pointer */
-#define ADIS16204_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16204_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16204_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16204_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16204_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16204_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16204_GLOB_CMD 0x3E /* Operation, system command register */
-
-/* MSC_CTRL */
-#define ADIS16204_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16204_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16204_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16204_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16204_MSC_CTRL_DATA_RDY_DIO2 BIT(0) /* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-
-/* DIAG_STAT */
-#define ADIS16204_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag: 1 = error condition,
- 0 = normal operation */
-#define ADIS16204_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16204_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16204_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16204_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 2.975 V */
-
-/* GLOB_CMD */
-#define ADIS16204_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16204_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16204_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16204_ERROR_ACTIVE BIT(14)
-
-enum adis16204_scan {
- ADIS16204_SCAN_ACC_X,
- ADIS16204_SCAN_ACC_Y,
- ADIS16204_SCAN_ACC_XY,
- ADIS16204_SCAN_SUPPLY,
- ADIS16204_SCAN_AUX_ADC,
- ADIS16204_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
deleted file mode 100644
index 20a9df64f..000000000
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * ADIS16204 Programmable High-g Digital Impact Sensor and Recorder
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16204.h"
-
-/* Unique to this driver currently */
-
-static const u8 adis16204_addresses[][2] = {
- [ADIS16204_SCAN_ACC_X] = { ADIS16204_XACCL_NULL, ADIS16204_X_PEAK_OUT },
- [ADIS16204_SCAN_ACC_Y] = { ADIS16204_YACCL_NULL, ADIS16204_Y_PEAK_OUT },
- [ADIS16204_SCAN_ACC_XY] = { 0, ADIS16204_XY_PEAK_OUT },
-};
-
-static int adis16204_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
- int addrind;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16204_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.61 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* 0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- switch (chan->channel2) {
- case IIO_MOD_X:
- case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
- *val2 = IIO_G_TO_M_S_2(17125); /* 17.125 mg */
- break;
- case IIO_MOD_Y:
- case IIO_MOD_Z:
- *val2 = IIO_G_TO_M_S_2(8407); /* 8.407 mg */
- break;
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- case IIO_CHAN_INFO_PEAK:
- if (mask == IIO_CHAN_INFO_CALIBBIAS) {
- bits = 12;
- addrind = 0;
- } else { /* PEAK_SEPARATE */
- bits = 14;
- addrind = 1;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16204_addresses[chan->scan_index][addrind];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16204_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16204_addresses[chan->scan_index][1];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16204_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16204_SUPPLY_OUT, ADIS16204_SCAN_SUPPLY, 0, 12),
- ADIS_AUX_ADC_CHAN(ADIS16204_AUX_ADC, ADIS16204_SCAN_AUX_ADC, 0, 12),
- ADIS_TEMP_CHAN(ADIS16204_TEMP_OUT, ADIS16204_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16204_XACCL_OUT, ADIS16204_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16204_YACCL_OUT, ADIS16204_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(ROOT_SUM_SQUARED_X_Y, ADIS16204_XY_RSS_OUT,
- ADIS16204_SCAN_ACC_XY, BIT(IIO_CHAN_INFO_PEAK), 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(5),
-};
-
-static const struct iio_info adis16204_info = {
- .read_raw = &adis16204_read_raw,
- .write_raw = &adis16204_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16204_status_error_msgs[] = {
- [ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16204_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16204_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16204_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16204_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
-};
-
-static const struct adis_data adis16204_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16204_MSC_CTRL,
- .glob_cmd_reg = ADIS16204_GLOB_CMD,
- .diag_stat_reg = ADIS16204_DIAG_STAT,
-
- .self_test_mask = ADIS16204_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16204_STARTUP_DELAY,
-
- .status_error_msgs = adis16204_status_error_msgs,
- .status_error_mask = BIT(ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16204_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16204_info;
- indio_dev->channels = adis16204_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16204_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16204_data);
- if (ret)
- return ret;
-
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16204_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16204_driver = {
- .driver = {
- .name = "adis16204",
- },
- .probe = adis16204_probe,
- .remove = adis16204_remove,
-};
-module_spi_driver(adis16204_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 813698d18..315f1c0c4 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -5,88 +5,127 @@
/* Flash memory write count */
#define ADIS16209_FLASH_CNT 0x00
+
/* Output, power supply */
#define ADIS16209_SUPPLY_OUT 0x02
+
/* Output, x-axis accelerometer */
#define ADIS16209_XACCL_OUT 0x04
+
/* Output, y-axis accelerometer */
#define ADIS16209_YACCL_OUT 0x06
+
/* Output, auxiliary ADC input */
#define ADIS16209_AUX_ADC 0x08
+
/* Output, temperature */
#define ADIS16209_TEMP_OUT 0x0A
+
/* Output, x-axis inclination */
#define ADIS16209_XINCL_OUT 0x0C
+
/* Output, y-axis inclination */
#define ADIS16209_YINCL_OUT 0x0E
+
/* Output, +/-180 vertical rotational position */
#define ADIS16209_ROT_OUT 0x10
+
/* Calibration, x-axis acceleration offset null */
#define ADIS16209_XACCL_NULL 0x12
+
/* Calibration, y-axis acceleration offset null */
#define ADIS16209_YACCL_NULL 0x14
+
/* Calibration, x-axis inclination offset null */
#define ADIS16209_XINCL_NULL 0x16
+
/* Calibration, y-axis inclination offset null */
#define ADIS16209_YINCL_NULL 0x18
+
/* Calibration, vertical rotation offset null */
#define ADIS16209_ROT_NULL 0x1A
+
/* Alarm 1 amplitude threshold */
#define ADIS16209_ALM_MAG1 0x20
+
/* Alarm 2 amplitude threshold */
#define ADIS16209_ALM_MAG2 0x22
+
/* Alarm 1, sample period */
#define ADIS16209_ALM_SMPL1 0x24
+
/* Alarm 2, sample period */
#define ADIS16209_ALM_SMPL2 0x26
+
/* Alarm control */
#define ADIS16209_ALM_CTRL 0x28
+
/* Auxiliary DAC data */
#define ADIS16209_AUX_DAC 0x30
+
/* General-purpose digital input/output control */
#define ADIS16209_GPIO_CTRL 0x32
+
/* Miscellaneous control */
#define ADIS16209_MSC_CTRL 0x34
+
/* Internal sample period (rate) control */
#define ADIS16209_SMPL_PRD 0x36
+
/* Operation, filter configuration */
#define ADIS16209_AVG_CNT 0x38
+
/* Operation, sleep mode control */
#define ADIS16209_SLP_CNT 0x3A
+
/* Diagnostics, system status register */
#define ADIS16209_DIAG_STAT 0x3C
+
/* Operation, system command register */
#define ADIS16209_GLOB_CMD 0x3E
/* MSC_CTRL */
+
/* Self-test at power-on: 1 = disabled, 0 = enabled */
#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
/* Self-test enable */
#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8)
+
/* Data-ready enable: 1 = enabled, 0 = disabled */
#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2)
+
/* Data-ready polarity: 1 = active high, 0 = active low */
#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
/* DIAG_STAT */
+
/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16209_DIAG_STAT_ALARM2 BIT(9)
+
/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16209_DIAG_STAT_ALARM1 BIT(8)
+
/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
/* SPI communications failure */
#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3
+
/* Flash update failure */
#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2
+
/* Power supply above 3.625 V */
#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1
+
/* Power supply below 3.15 V */
#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16209_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4)
#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 8b42bf8c3..8dbad5862 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -168,6 +168,7 @@ static const struct adis_data adis16209_data = {
.diag_stat_reg = ADIS16209_DIAG_STAT,
.self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16209_STARTUP_DELAY,
.status_error_msgs = adis16209_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
deleted file mode 100644
index eab863311..000000000
--- a/drivers/staging/iio/accel/adis16220.h
+++ /dev/null
@@ -1,140 +0,0 @@
-#ifndef SPI_ADIS16220_H_
-#define SPI_ADIS16220_H_
-
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16220_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16220_FLASH_CNT 0x00
-/* Control, acceleration offset adjustment control */
-#define ADIS16220_ACCL_NULL 0x02
-/* Control, AIN1 offset adjustment control */
-#define ADIS16220_AIN1_NULL 0x04
-/* Control, AIN2 offset adjustment control */
-#define ADIS16220_AIN2_NULL 0x06
-/* Output, power supply during capture */
-#define ADIS16220_CAPT_SUPPLY 0x0A
-/* Output, temperature during capture */
-#define ADIS16220_CAPT_TEMP 0x0C
-/* Output, peak acceleration during capture */
-#define ADIS16220_CAPT_PEAKA 0x0E
-/* Output, peak AIN1 level during capture */
-#define ADIS16220_CAPT_PEAK1 0x10
-/* Output, peak AIN2 level during capture */
-#define ADIS16220_CAPT_PEAK2 0x12
-/* Output, capture buffer for acceleration */
-#define ADIS16220_CAPT_BUFA 0x14
-/* Output, capture buffer for AIN1 */
-#define ADIS16220_CAPT_BUF1 0x16
-/* Output, capture buffer for AIN2 */
-#define ADIS16220_CAPT_BUF2 0x18
-/* Control, capture buffer address pointer */
-#define ADIS16220_CAPT_PNTR 0x1A
-/* Control, capture control register */
-#define ADIS16220_CAPT_CTRL 0x1C
-/* Control, capture period (automatic mode) */
-#define ADIS16220_CAPT_PRD 0x1E
-/* Control, Alarm A, acceleration peak threshold */
-#define ADIS16220_ALM_MAGA 0x20
-/* Control, Alarm 1, AIN1 peak threshold */
-#define ADIS16220_ALM_MAG1 0x22
-/* Control, Alarm 2, AIN2 peak threshold */
-#define ADIS16220_ALM_MAG2 0x24
-/* Control, Alarm S, peak threshold */
-#define ADIS16220_ALM_MAGS 0x26
-/* Control, alarm configuration register */
-#define ADIS16220_ALM_CTRL 0x28
-/* Control, general I/O configuration */
-#define ADIS16220_GPIO_CTRL 0x32
-/* Control, self-test control, AIN configuration */
-#define ADIS16220_MSC_CTRL 0x34
-/* Control, digital I/O configuration */
-#define ADIS16220_DIO_CTRL 0x36
-/* Control, filter configuration */
-#define ADIS16220_AVG_CNT 0x38
-/* Status, system status */
-#define ADIS16220_DIAG_STAT 0x3C
-/* Control, system commands */
-#define ADIS16220_GLOB_CMD 0x3E
-/* Status, self-test response */
-#define ADIS16220_ST_DELTA 0x40
-/* Lot Identification Code 1 */
-#define ADIS16220_LOT_ID1 0x52
-/* Lot Identification Code 2 */
-#define ADIS16220_LOT_ID2 0x54
-/* Product identifier; convert to decimal = 16220 */
-#define ADIS16220_PROD_ID 0x56
-/* Serial number */
-#define ADIS16220_SERIAL_NUM 0x58
-
-#define ADIS16220_CAPTURE_SIZE 2048
-
-/* MSC_CTRL */
-#define ADIS16220_MSC_CTRL_SELF_TEST_EN BIT(8)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN1 BIT(1)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN2 BIT(0)
-
-/* DIO_CTRL */
-#define ADIS16220_MSC_CTRL_DIO2_BUSY_IND (BIT(5) | BIT(4))
-#define ADIS16220_MSC_CTRL_DIO1_BUSY_IND (BIT(3) | BIT(2))
-#define ADIS16220_MSC_CTRL_DIO2_ACT_HIGH BIT(1)
-#define ADIS16220_MSC_CTRL_DIO1_ACT_HIGH BIT(0)
-
-/* DIAG_STAT */
-/* AIN2 sample > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_ALM_MAG2 BIT(14)
-/* AIN1 sample > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_ALM_MAG1 BIT(13)
-/* Acceleration sample > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_ALM_MAGA BIT(12)
-/* Error condition programmed into ALM_MAGS[11:0] and ALM_CTRL[5:4] is true */
-#define ADIS16220_DIAG_STAT_ALM_MAGS BIT(11)
-/* |Peak value in AIN2 data capture| > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN2 BIT(10)
-/* |Peak value in AIN1 data capture| > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN1 BIT(9)
-/* |Peak value in acceleration data capture| > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_PEAK_ACCEL BIT(8)
-/* Data ready, capture complete */
-#define ADIS16220_DIAG_STAT_DATA_RDY BIT(7)
-#define ADIS16220_DIAG_STAT_FLASH_CHK BIT(6)
-#define ADIS16220_DIAG_STAT_SELF_TEST BIT(5)
-/* Capture period violation/interruption */
-#define ADIS16220_DIAG_STAT_VIOLATION_BIT 4
-/* SPI communications failure */
-#define ADIS16220_DIAG_STAT_SPI_FAIL_BIT 3
-/* Flash update failure */
-#define ADIS16220_DIAG_STAT_FLASH_UPT_BIT 2
-/* Power supply above 3.625 V */
-#define ADIS16220_DIAG_STAT_POWER_HIGH_BIT 1
-/* Power supply below 3.15 V */
-#define ADIS16220_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16220_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16220_GLOB_CMD_SELF_TEST BIT(2)
-#define ADIS16220_GLOB_CMD_PWR_DOWN BIT(1)
-
-#define ADIS16220_MAX_TX 2048
-#define ADIS16220_MAX_RX 2048
-
-#define ADIS16220_SPI_BURST (u32)(1000 * 1000)
-#define ADIS16220_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct adis16220_state - device instance specific data
- * @adis: adis device
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct adis16220_state {
- struct adis adis;
-
- struct mutex buf_lock;
- u8 tx[ADIS16220_MAX_TX] ____cacheline_aligned;
- u8 rx[ADIS16220_MAX_RX];
-};
-
-#endif /* SPI_ADIS16220_H_ */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
deleted file mode 100644
index d0165218b..000000000
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * ADIS16220 Programmable Digital Vibration Sensor driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#include "adis16220.h"
-
-static ssize_t adis16220_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16220_state *st = iio_priv(indio_dev);
- ssize_t ret;
- u16 val;
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis_read_reg_16(&st->adis, this_attr->address, &val);
- mutex_unlock(&indio_dev->mlock);
- if (ret)
- return ret;
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t adis16220_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = adis_write_reg_16(&st->adis, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int adis16220_capture(struct iio_dev *indio_dev)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
-
- /* initiates a manual data capture */
- ret = adis_write_reg_16(&st->adis, ADIS16220_GLOB_CMD, 0xBF08);
- if (ret)
- dev_err(&indio_dev->dev, "problem beginning capture");
-
- usleep_range(10000, 11000); /* delay for capture to finish */
-
- return ret;
-}
-
-static ssize_t adis16220_write_capture(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool val;
- int ret;
-
- ret = strtobool(buf, &val);
- if (ret)
- return ret;
- if (!val)
- return -EINVAL;
- ret = adis16220_capture(indio_dev);
- if (ret)
- return ret;
-
- return len;
-}
-
-static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
- char *buf,
- loff_t off,
- size_t count,
- int addr)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .delay_usecs = 25,
- }, {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .cs_change = 1,
- .delay_usecs = 25,
- },
- };
- int ret;
- int i;
-
- if (unlikely(!count))
- return count;
-
- if ((off >= ADIS16220_CAPTURE_SIZE) || (count & 1) || (off & 1))
- return -EINVAL;
-
- if (off + count > ADIS16220_CAPTURE_SIZE)
- count = ADIS16220_CAPTURE_SIZE - off;
-
- /* write the begin position of capture buffer */
- ret = adis_write_reg_16(&st->adis,
- ADIS16220_CAPT_PNTR,
- off > 1);
- if (ret)
- return -EIO;
-
- /* read count/2 values from capture buffer */
- mutex_lock(&st->buf_lock);
-
- for (i = 0; i < count; i += 2) {
- st->tx[i] = ADIS_READ_REG(addr);
- st->tx[i + 1] = 0;
- }
- xfers[1].len = count;
-
- ret = spi_sync_transfer(st->adis.spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- mutex_unlock(&st->buf_lock);
- return -EIO;
- }
-
- memcpy(buf, st->rx, count);
-
- mutex_unlock(&st->buf_lock);
- return count;
-}
-
-static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf,
- loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUFA);
-}
-
-static struct bin_attribute accel_bin = {
- .attr = {
- .name = "accel_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_accel_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF1);
-}
-
-static struct bin_attribute adc1_bin = {
- .attr = {
- .name = "in0_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc1_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF2);
-}
-
-static struct bin_attribute adc2_bin = {
- .attr = {
- .name = "in1_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc2_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-#define IIO_DEV_ATTR_CAPTURE(_store) \
- IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
-
-static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
-
-#define IIO_DEV_ATTR_CAPTURE_COUNT(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(capture_count, _mode, _show, _store, _addr)
-
-static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO,
- adis16220_read_16bit,
- adis16220_write_16bit,
- ADIS16220_CAPT_PNTR);
-
-enum adis16220_channel {
- in_supply, in_1, in_2, accel, temp
-};
-
-struct adis16220_address_spec {
- u8 addr;
- u8 bits;
- bool sign;
-};
-
-/* Address / bits / signed */
-static const struct adis16220_address_spec adis16220_addresses[][3] = {
- [in_supply] = { { ADIS16220_CAPT_SUPPLY, 12, 0 }, },
- [in_1] = { { ADIS16220_CAPT_BUF1, 16, 1 },
- { ADIS16220_AIN1_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK1, 16, 1 }, },
- [in_2] = { { ADIS16220_CAPT_BUF2, 16, 1 },
- { ADIS16220_AIN2_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK2, 16, 1 }, },
- [accel] = { { ADIS16220_CAPT_BUFA, 16, 1 },
- { ADIS16220_ACCL_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAKA, 16, 1 }, },
- [temp] = { { ADIS16220_CAPT_TEMP, 12, 0 }, }
-};
-
-static int adis16220_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- const struct adis16220_address_spec *addr;
- int ret = -EINVAL;
- int addrind = 0;
- u16 uval;
- s16 sval;
- u8 bits;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- addrind = 0;
- break;
- case IIO_CHAN_INFO_OFFSET:
- if (chan->type == IIO_TEMP) {
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- }
- addrind = 1;
- break;
- case IIO_CHAN_INFO_PEAK:
- addrind = 2;
- break;
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val2 = IIO_G_TO_M_S_2(19073); /* 19.073 g */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220700; /* 1.2207 mV */
- } else {
- /* Should really be dependent on VDD */
- *val2 = 305180; /* 305.18 uV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- default:
- return -EINVAL;
- }
- addr = &adis16220_addresses[chan->address][addrind];
- if (addr->sign) {
- ret = adis_read_reg_16(&st->adis, addr->addr, &sval);
- if (ret)
- return ret;
- bits = addr->bits;
- sval &= (1 << bits) - 1;
- sval = (s16)(sval << (16 - bits)) >> (16 - bits);
- *val = sval;
- return IIO_VAL_INT;
- }
- ret = adis_read_reg_16(&st->adis, addr->addr, &uval);
- if (ret)
- return ret;
- bits = addr->bits;
- uval &= (1 << bits) - 1;
- *val = uval;
- return IIO_VAL_INT;
-}
-
-static const struct iio_chan_spec adis16220_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "supply",
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_supply,
- }, {
- .type = IIO_ACCEL,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_PEAK),
- .address = accel,
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = temp,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_1,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 2,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = in_2,
- }
-};
-
-static struct attribute *adis16220_attributes[] = {
- &iio_dev_attr_capture.dev_attr.attr,
- &iio_dev_attr_capture_count.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16220_attribute_group = {
- .attrs = adis16220_attributes,
-};
-
-static const struct iio_info adis16220_info = {
- .attrs = &adis16220_attribute_group,
- .driver_module = THIS_MODULE,
- .read_raw = &adis16220_read_raw,
-};
-
-static const char * const adis16220_status_error_msgs[] = {
- [ADIS16220_DIAG_STAT_VIOLATION_BIT] = "Capture period violation/interruption",
- [ADIS16220_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16220_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16220_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16220_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16220_data = {
- .read_delay = 35,
- .write_delay = 35,
- .msc_ctrl_reg = ADIS16220_MSC_CTRL,
- .glob_cmd_reg = ADIS16220_GLOB_CMD,
- .diag_stat_reg = ADIS16220_DIAG_STAT,
-
- .self_test_mask = ADIS16220_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16220_STARTUP_DELAY,
-
- .status_error_msgs = adis16220_status_error_msgs,
- .status_error_mask = BIT(ADIS16220_DIAG_STAT_VIOLATION_BIT) |
- BIT(ADIS16220_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16220_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16220_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16220_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16220_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adis16220_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- if (ret)
- goto error_rm_accel_bin;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- if (ret)
- goto error_rm_adc1_bin;
-
- ret = adis_init(&st->adis, indio_dev, spi, &adis16220_data);
- if (ret)
- goto error_rm_adc2_bin;
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(&st->adis);
- if (ret)
- goto error_rm_adc2_bin;
- return 0;
-
-error_rm_adc2_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
-error_rm_adc1_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
-error_rm_accel_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
- return ret;
-}
-
-static int adis16220_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
-
- return 0;
-}
-
-static struct spi_driver adis16220_driver = {
- .driver = {
- .name = "adis16220",
- },
- .probe = adis16220_probe,
- .remove = adis16220_remove,
-};
-module_spi_driver(adis16220_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
index 66b5ad2f4..b2cb37b95 100644
--- a/drivers/staging/iio/accel/adis16240.h
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -5,110 +5,160 @@
/* Flash memory write count */
#define ADIS16240_FLASH_CNT 0x00
+
/* Output, power supply */
#define ADIS16240_SUPPLY_OUT 0x02
+
/* Output, x-axis accelerometer */
#define ADIS16240_XACCL_OUT 0x04
+
/* Output, y-axis accelerometer */
#define ADIS16240_YACCL_OUT 0x06
+
/* Output, z-axis accelerometer */
#define ADIS16240_ZACCL_OUT 0x08
+
/* Output, auxiliary ADC input */
#define ADIS16240_AUX_ADC 0x0A
+
/* Output, temperature */
#define ADIS16240_TEMP_OUT 0x0C
+
/* Output, x-axis acceleration peak */
#define ADIS16240_XPEAK_OUT 0x0E
+
/* Output, y-axis acceleration peak */
#define ADIS16240_YPEAK_OUT 0x10
+
/* Output, z-axis acceleration peak */
#define ADIS16240_ZPEAK_OUT 0x12
+
/* Output, sum-of-squares acceleration peak */
#define ADIS16240_XYZPEAK_OUT 0x14
+
/* Output, Capture Buffer 1, X and Y acceleration */
#define ADIS16240_CAPT_BUF1 0x16
+
/* Output, Capture Buffer 2, Z acceleration */
#define ADIS16240_CAPT_BUF2 0x18
+
/* Diagnostic, error flags */
#define ADIS16240_DIAG_STAT 0x1A
+
/* Diagnostic, event counter */
#define ADIS16240_EVNT_CNTR 0x1C
+
/* Diagnostic, check sum value from firmware test */
#define ADIS16240_CHK_SUM 0x1E
+
/* Calibration, x-axis acceleration offset adjustment */
#define ADIS16240_XACCL_OFF 0x20
+
/* Calibration, y-axis acceleration offset adjustment */
#define ADIS16240_YACCL_OFF 0x22
+
/* Calibration, z-axis acceleration offset adjustment */
#define ADIS16240_ZACCL_OFF 0x24
+
/* Clock, hour and minute */
#define ADIS16240_CLK_TIME 0x2E
+
/* Clock, month and day */
#define ADIS16240_CLK_DATE 0x30
+
/* Clock, year */
#define ADIS16240_CLK_YEAR 0x32
+
/* Wake-up setting, hour and minute */
#define ADIS16240_WAKE_TIME 0x34
+
/* Wake-up setting, month and day */
#define ADIS16240_WAKE_DATE 0x36
+
/* Alarm 1 amplitude threshold */
#define ADIS16240_ALM_MAG1 0x38
+
/* Alarm 2 amplitude threshold */
#define ADIS16240_ALM_MAG2 0x3A
+
/* Alarm control */
#define ADIS16240_ALM_CTRL 0x3C
+
/* Capture, external trigger control */
#define ADIS16240_XTRIG_CTRL 0x3E
+
/* Capture, address pointer */
#define ADIS16240_CAPT_PNTR 0x40
+
/* Capture, configuration and control */
#define ADIS16240_CAPT_CTRL 0x42
+
/* General-purpose digital input/output control */
#define ADIS16240_GPIO_CTRL 0x44
+
/* Miscellaneous control */
#define ADIS16240_MSC_CTRL 0x46
+
/* Internal sample period (rate) control */
#define ADIS16240_SMPL_PRD 0x48
+
/* System command */
#define ADIS16240_GLOB_CMD 0x4A
/* MSC_CTRL */
+
/* Enables sum-of-squares output (XYZPEAK_OUT) */
#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
+
/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
+
/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
+
/* Data-ready enable: 1 = enabled, 0 = disabled */
#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
+
/* Data-ready polarity: 1 = active high, 0 = active low */
#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
/* DIAG_STAT */
+
/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
+
/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
+
/* Capture buffer full: 1 = capture buffer is full */
#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
+
/* Flash test, checksum flag: 1 = mismatch, 0 = match */
#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
+
/* Power-on, self-test flag: 1 = failure, 0 = pass */
#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
+
/* Power-on self-test: 1 = in-progress, 0 = complete */
#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
+
/* SPI communications failure */
#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
+
/* Flash update failure */
#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
+
/* Power supply above 3.625 V */
#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
+
/* Power supply below 3.15 V */
#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16240_GLOB_CMD_RESUME BIT(8)
#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 1b5b685a8..d5b99e610 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -29,13 +29,13 @@
static ssize_t adis16240_spi_read_signed(struct device *dev,
struct device_attribute *attr,
char *buf,
- unsigned bits)
+ unsigned int bits)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct adis *st = iio_priv(indio_dev);
int ret;
s16 val = 0;
- unsigned shift = 16 - bits;
+ unsigned int shift = 16 - bits;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = adis_read_reg_16(st,
@@ -222,6 +222,7 @@ static const struct adis_data adis16240_data = {
.diag_stat_reg = ADIS16240_DIAG_STAT,
.self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16240_STARTUP_DELAY,
.status_error_msgs = adis16240_status_error_msgs,
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index f843f19cf..1cf6b7980 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -35,10 +35,10 @@
#define AD7192_REG_DATA 3 /* Data Register (RO, 24/32-bit) */
#define AD7192_REG_ID 4 /* ID Register (RO, 8-bit) */
#define AD7192_REG_GPOCON 5 /* GPOCON Register (RO, 8-bit) */
-#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit
- * (AD7792)/24-bit (AD7192)) */
-#define AD7192_REG_FULLSALE 7 /* Full-Scale Register
- * (RW, 16-bit (AD7792)/24-bit (AD7192)) */
+#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit */
+ /* (AD7792)/24-bit (AD7192)) */
+#define AD7192_REG_FULLSALE 7 /* Full-Scale Register */
+ /* (RW, 16-bit (AD7792)/24-bit (AD7192)) */
/* Communications Register Bit Designations (AD7192_REG_COMM) */
#define AD7192_COMM_WEN BIT(7) /* Write Enable */
@@ -80,13 +80,13 @@
#define AD7192_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
/* Mode Register: AD7192_MODE_CLKSRC options */
-#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected
- * from MCLK1 to MCLK2 */
+#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected*/
+ /* from MCLK1 to MCLK2 */
#define AD7192_CLK_EXT_MCLK2 1 /* External Clock applied to MCLK2 */
-#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not
- * available at the MCLK2 pin */
-#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available
- * at the MCLK2 pin */
+#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not */
+ /* available at the MCLK2 pin */
+#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available*/
+ /* at the MCLK2 pin */
/* Configuration Register Bit Designations (AD7192_REG_CONF) */
@@ -349,11 +349,9 @@ static ssize_t ad7192_write_frequency(struct device *dev,
if (lval == 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
div = st->mclk / (lval * st->f_order * 1024);
if (div < 1 || div > 1023) {
@@ -366,7 +364,7 @@ static ssize_t ad7192_write_frequency(struct device *dev,
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
out:
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
@@ -434,11 +432,9 @@ static ssize_t ad7192_set(struct device *dev,
if (ret < 0)
return ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch ((u32)this_attr->address) {
case AD7192_REG_GPOCON:
@@ -461,7 +457,7 @@ static ssize_t ad7192_set(struct device *dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
@@ -555,11 +551,9 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
int ret, i;
unsigned int tmp;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -582,7 +576,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 62e5ecacf..a06b46cb8 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -155,7 +155,7 @@ static void ad7280_crc8_build_table(unsigned char *crc_tab)
}
}
-static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
+static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned int val)
{
unsigned char crc;
@@ -165,7 +165,7 @@ static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
return crc ^ (val & 0xFF);
}
-static int ad7280_check_crc(struct ad7280_state *st, unsigned val)
+static int ad7280_check_crc(struct ad7280_state *st, unsigned int val)
{
unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10);
@@ -191,7 +191,7 @@ static void ad7280_delay(struct ad7280_state *st)
usleep_range(250, 500);
}
-static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
+static int __ad7280_read32(struct ad7280_state *st, unsigned int *val)
{
int ret;
struct spi_transfer t = {
@@ -211,10 +211,10 @@ static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
return 0;
}
-static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
- unsigned addr, bool all, unsigned val)
+static int ad7280_write(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr, bool all, unsigned int val)
{
- unsigned reg = devaddr << 27 | addr << 21 |
+ unsigned int reg = devaddr << 27 | addr << 21 |
(val & 0xFF) << 13 | all << 12;
reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
@@ -223,11 +223,11 @@ static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
return spi_write(st->spi, &st->buf[0], 4);
}
-static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
+static int ad7280_read(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr)
{
int ret;
- unsigned tmp;
+ unsigned int tmp;
/* turns off the read operation on all parts */
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
@@ -261,11 +261,11 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
return (tmp >> 13) & 0xFF;
}
-static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
+static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr)
{
int ret;
- unsigned tmp;
+ unsigned int tmp;
ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
if (ret)
@@ -299,11 +299,11 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
return (tmp >> 11) & 0xFFF;
}
-static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
- unsigned *array)
+static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt,
+ unsigned int *array)
{
int i, ret;
- unsigned tmp, sum = 0;
+ unsigned int tmp, sum = 0;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
AD7280A_CELL_VOLTAGE_1 << 2);
@@ -338,7 +338,7 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
static int ad7280_chain_setup(struct ad7280_state *st)
{
- unsigned val, n;
+ unsigned int val, n;
int ret;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
@@ -401,7 +401,7 @@ static ssize_t ad7280_store_balance_sw(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
bool readin;
int ret;
- unsigned devaddr, ch;
+ unsigned int devaddr, ch;
ret = strtobool(buf, &readin);
if (ret)
@@ -431,7 +431,7 @@ static ssize_t ad7280_show_balance_timer(struct device *dev,
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- unsigned msecs;
+ unsigned int msecs;
mutex_lock(&indio_dev->mlock);
ret = ad7280_read(st, this_attr->address >> 8,
@@ -602,7 +602,7 @@ static ssize_t ad7280_read_channel_config(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned val;
+ unsigned int val;
switch ((u32)this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
@@ -683,7 +683,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct ad7280_state *st = iio_priv(indio_dev);
- unsigned *channels;
+ unsigned int *channels;
int i, ret;
channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
diff --git a/drivers/staging/iio/adc/ad7280a.h b/drivers/staging/iio/adc/ad7280a.h
index 732347a9b..ccfb90d20 100644
--- a/drivers/staging/iio/adc/ad7280a.h
+++ b/drivers/staging/iio/adc/ad7280a.h
@@ -29,10 +29,10 @@
#define AD7280A_ALERT_REMOVE_AUX4_AUX5 BIT(1)
struct ad7280_platform_data {
- unsigned acquisition_time;
- unsigned conversion_averaging;
- unsigned chain_last_alert_ignore;
- bool thermistor_term_en;
+ unsigned int acquisition_time;
+ unsigned int conversion_averaging;
+ unsigned int chain_last_alert_ignore;
+ bool thermistor_term_en;
};
#endif /* IIO_ADC_AD7280_H_ */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index cca946924..39f50440d 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -28,16 +28,16 @@
*/
struct ad7606_platform_data {
- unsigned default_os;
- unsigned default_range;
- unsigned gpio_convst;
- unsigned gpio_reset;
- unsigned gpio_range;
- unsigned gpio_os0;
- unsigned gpio_os1;
- unsigned gpio_os2;
- unsigned gpio_frstdata;
- unsigned gpio_stby;
+ unsigned int default_os;
+ unsigned int default_range;
+ unsigned int gpio_convst;
+ unsigned int gpio_reset;
+ unsigned int gpio_range;
+ unsigned int gpio_os0;
+ unsigned int gpio_os1;
+ unsigned int gpio_os2;
+ unsigned int gpio_frstdata;
+ unsigned int gpio_stby;
};
/**
@@ -52,7 +52,7 @@ struct ad7606_chip_info {
const char *name;
u16 int_vref_mv;
const struct iio_chan_spec *channels;
- unsigned num_channels;
+ unsigned int num_channels;
};
/**
@@ -67,8 +67,8 @@ struct ad7606_state {
struct work_struct poll_work;
wait_queue_head_t wq_data_avail;
const struct ad7606_bus_ops *bops;
- unsigned range;
- unsigned oversampling;
+ unsigned int range;
+ unsigned int oversampling;
bool done;
void __iomem *base_address;
@@ -86,7 +86,7 @@ struct ad7606_bus_ops {
};
struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address, unsigned id,
+ void __iomem *base_address, unsigned int id,
const struct ad7606_bus_ops *bops);
int ad7606_remove(struct iio_dev *indio_dev, int irq);
int ad7606_reset(struct ad7606_state *st);
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index fe6caeee0..f79ee6185 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -36,7 +36,7 @@ int ad7606_reset(struct ad7606_state *st)
return -ENODEV;
}
-static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch)
+static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
{
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
@@ -88,12 +88,12 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7606_scan_direct(indio_dev, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7606_scan_direct(indio_dev, chan->address);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -155,7 +155,7 @@ static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
return sprintf(buf, "%u\n", st->oversampling);
}
-static int ad7606_oversampling_get_index(unsigned val)
+static int ad7606_oversampling_get_index(unsigned int val)
{
unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
int i;
@@ -446,7 +446,7 @@ static const struct iio_info ad7606_info_range = {
struct iio_dev *ad7606_probe(struct device *dev, int irq,
void __iomem *base_address,
- unsigned id,
+ unsigned int id,
const struct ad7606_bus_ops *bops)
{
struct ad7606_platform_data *pdata = dev->platform_data;
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index d873a5164..9587fa86d 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -22,6 +22,7 @@ static int ad7606_spi_read_block(struct device *dev,
struct spi_device *spi = to_spi_device(dev);
int i, ret;
unsigned short *data = buf;
+ __be16 *bdata = buf;
ret = spi_read(spi, buf, count * 2);
if (ret < 0) {
@@ -30,7 +31,7 @@ static int ad7606_spi_read_block(struct device *dev,
}
for (i = 0; i < count; i++)
- data[i] = be16_to_cpu(data[i]);
+ data[i] = be16_to_cpu(bdata[i]);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 1439cfdbb..c9a0c2aa6 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -63,7 +63,7 @@ static int ad7780_set_mode(struct ad_sigma_delta *sigma_delta,
enum ad_sigma_delta_mode mode)
{
struct ad7780_state *st = ad_sigma_delta_to_ad7780(sigma_delta);
- unsigned val;
+ unsigned int val;
switch (mode) {
case AD_SD_MODE_SINGLE:
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 18b27a198..358400b22 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -31,7 +31,7 @@ static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
}
static int ad9832_write_frequency(struct ad9832_state *st,
- unsigned addr, unsigned long fout)
+ unsigned int addr, unsigned long fout)
{
unsigned long regval;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index d1218d896..170ac980a 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -12,20 +12,16 @@
#include <linux/sysfs.h>
#include <linux/i2c.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <asm/div64.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
-#include "ad5933.h"
-
/* AD5933/AD5934 Registers */
#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */
#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */
@@ -86,6 +82,18 @@
#define AD5933_POLL_TIME_ms 10
#define AD5933_INIT_EXCITATION_TIME_ms 100
+/**
+ * struct ad5933_platform_data - platform specific data
+ * @ext_clk_Hz: the external clock frequency in Hz, if not set
+ * the driver uses the internal clock (16.776 MHz)
+ * @vref_mv: the external reference voltage in millivolt
+ */
+
+struct ad5933_platform_data {
+ unsigned long ext_clk_Hz;
+ unsigned short vref_mv;
+};
+
struct ad5933_state {
struct i2c_client *client;
struct regulator *reg;
@@ -93,14 +101,14 @@ struct ad5933_state {
unsigned long mclk_hz;
unsigned char ctrl_hb;
unsigned char ctrl_lb;
- unsigned range_avail[4];
+ unsigned int range_avail[4];
unsigned short vref_mv;
unsigned short settling_cycles;
unsigned short freq_points;
- unsigned freq_start;
- unsigned freq_inc;
- unsigned state;
- unsigned poll_time_jiffies;
+ unsigned int freq_start;
+ unsigned int freq_inc;
+ unsigned int state;
+ unsigned int poll_time_jiffies;
};
static struct ad5933_platform_data ad5933_default_pdata = {
@@ -214,7 +222,7 @@ static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
}
static int ad5933_set_freq(struct ad5933_state *st,
- unsigned reg, unsigned long freq)
+ unsigned int reg, unsigned long freq)
{
unsigned long long freqreg;
union {
@@ -274,7 +282,7 @@ static int ad5933_setup(struct ad5933_state *st)
static void ad5933_calc_out_ranges(struct ad5933_state *st)
{
int i;
- unsigned normalized_3v3[4] = {1980, 198, 383, 970};
+ unsigned int normalized_3v3[4] = {1980, 198, 383, 970};
for (i = 0; i < 4; i++)
st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
@@ -307,10 +315,10 @@ static ssize_t ad5933_show_frequency(struct device *dev,
freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
- freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4);
+ freqreg = (u64)freqreg * (u64)(st->mclk_hz / 4);
do_div(freqreg, 1 << 27);
- return sprintf(buf, "%d\n", (int) freqreg);
+ return sprintf(buf, "%d\n", (int)freqreg);
}
static ssize_t ad5933_store_frequency(struct device *dev,
@@ -358,7 +366,7 @@ static ssize_t ad5933_show(struct device *dev,
int ret = 0, len = 0;
mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%u\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -409,7 +417,7 @@ static ssize_t ad5933_store(struct device *dev,
}
mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
@@ -436,10 +444,10 @@ static ssize_t ad5933_store(struct device *dev,
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
- if (val > 511)
- val = (val >> 1) | (1 << 9);
- else if (val > 1022)
+ if (val > 1022)
val = (val >> 2) | (3 << 9);
+ else if (val > 511)
+ val = (val >> 1) | (1 << 9);
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
@@ -683,8 +691,9 @@ static void ad5933_work(struct work_struct *work)
}
if (status & AD5933_STAT_SWEEP_DONE) {
- /* last sample received - power down do nothing until
- * the ring enable is toggled */
+ /* last sample received - power down do
+ * nothing until the ring enable is toggled
+ */
ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
} else {
/* we just received a valid datum, move on to the next */
@@ -699,7 +708,7 @@ static int ad5933_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret, voltage_uv = 0;
- struct ad5933_platform_data *pdata = client->dev.platform_data;
+ struct ad5933_platform_data *pdata = dev_get_platdata(&client->dev);
struct ad5933_state *st;
struct iio_dev *indio_dev;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.h b/drivers/staging/iio/impedance-analyzer/ad5933.h
deleted file mode 100644
index b140e42d6..000000000
--- a/drivers/staging/iio/impedance-analyzer/ad5933.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * AD5933 AD5934 Impedance Converter, Network Analyzer
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef IIO_ADC_AD5933_H_
-#define IIO_ADC_AD5933_H_
-
-/*
- * TODO: struct ad5933_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad5933_platform_data - platform specific data
- * @ext_clk_Hz: the external clock frequency in Hz, if not set
- * the driver uses the internal clock (16.776 MHz)
- * @vref_mv: the external reference voltage in millivolt
- */
-
-struct ad5933_platform_data {
- unsigned long ext_clk_Hz;
- unsigned short vref_mv;
-};
-
-#endif /* IIO_ADC_AD5933_H_ */
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 6e2ba458c..2e3b1d64e 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -69,7 +69,6 @@ enum als_ir_mode {
};
struct isl29028_chip {
- struct device *dev;
struct mutex lock;
struct regmap *regmap;
@@ -166,20 +165,21 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
{
+ struct device *dev = regmap_get_device(chip->regmap);
unsigned int lsb;
unsigned int msb;
int ret;
ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_L, &lsb);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading register ALSIR_L err %d\n", ret);
return ret;
}
ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_U, &msb);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading register ALSIR_U err %d\n", ret);
return ret;
}
@@ -190,12 +190,13 @@ static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox)
{
+ struct device *dev = regmap_get_device(chip->regmap);
unsigned int data;
int ret;
ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
if (ret < 0) {
- dev_err(chip->dev, "Error in reading register %d, error %d\n",
+ dev_err(dev, "Error in reading register %d, error %d\n",
ISL29028_REG_PROX_DATA, ret);
return ret;
}
@@ -218,13 +219,14 @@ static int isl29028_proxim_get(struct isl29028_chip *chip, int *prox_data)
static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
int als_ir_data;
if (chip->als_ir_mode != MODE_ALS) {
ret = isl29028_set_als_ir_mode(chip, MODE_ALS);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in enabling ALS mode err %d\n", ret);
return ret;
}
@@ -251,12 +253,13 @@ static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
if (chip->als_ir_mode != MODE_IR) {
ret = isl29028_set_als_ir_mode(chip, MODE_IR);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in enabling IR mode err %d\n", ret);
return ret;
}
@@ -271,25 +274,26 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(chip->regmap);
int ret = -EINVAL;
mutex_lock(&chip->lock);
switch (chan->type) {
case IIO_PROXIMITY:
if (mask != IIO_CHAN_INFO_SAMP_FREQ) {
- dev_err(chip->dev,
+ dev_err(dev,
"proximity: mask value 0x%08lx not supported\n",
mask);
break;
}
if (val < 1 || val > 100) {
- dev_err(chip->dev,
+ dev_err(dev,
"Samp_freq %d is not in range[1:100]\n", val);
break;
}
ret = isl29028_set_proxim_sampling(chip, val);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Setting proximity samp_freq fail, err %d\n",
ret);
break;
@@ -299,19 +303,19 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
case IIO_LIGHT:
if (mask != IIO_CHAN_INFO_SCALE) {
- dev_err(chip->dev,
+ dev_err(dev,
"light: mask value 0x%08lx not supported\n",
mask);
break;
}
if ((val != 125) && (val != 2000)) {
- dev_err(chip->dev,
+ dev_err(dev,
"lux scale %d is invalid [125, 2000]\n", val);
break;
}
ret = isl29028_set_als_scale(chip, val);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Setting lux scale fail with error %d\n", ret);
break;
}
@@ -319,7 +323,7 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
break;
default:
- dev_err(chip->dev, "Unsupported channel type\n");
+ dev_err(dev, "Unsupported channel type\n");
break;
}
mutex_unlock(&chip->lock);
@@ -331,6 +335,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(chip->regmap);
int ret = -EINVAL;
mutex_lock(&chip->lock);
@@ -370,7 +375,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
break;
default:
- dev_err(chip->dev, "mask value 0x%08lx not supported\n", mask);
+ dev_err(dev, "mask value 0x%08lx not supported\n", mask);
break;
}
mutex_unlock(&chip->lock);
@@ -417,6 +422,7 @@ static const struct iio_info isl29028_info = {
static int isl29028_chip_init(struct isl29028_chip *chip)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
chip->enable_prox = false;
@@ -426,35 +432,33 @@ static int isl29028_chip_init(struct isl29028_chip *chip)
ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_TEST1_MODE, ret);
return ret;
}
ret = regmap_write(chip->regmap, ISL29028_REG_TEST2_MODE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_TEST2_MODE, ret);
return ret;
}
ret = regmap_write(chip->regmap, ISL29028_REG_CONFIGURE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_CONFIGURE, ret);
return ret;
}
ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
if (ret < 0) {
- dev_err(chip->dev, "setting the proximity, err = %d\n",
- ret);
+ dev_err(dev, "setting the proximity, err = %d\n", ret);
return ret;
}
ret = isl29028_set_als_scale(chip, chip->lux_scale);
if (ret < 0)
- dev_err(chip->dev,
- "setting als scale failed, err = %d\n", ret);
+ dev_err(dev, "setting als scale failed, err = %d\n", ret);
return ret;
}
@@ -496,19 +500,19 @@ static int isl29028_probe(struct i2c_client *client,
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- chip->dev = &client->dev;
mutex_init(&chip->lock);
chip->regmap = devm_regmap_init_i2c(client, &isl29028_regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
- dev_err(chip->dev, "regmap initialization failed: %d\n", ret);
+ dev_err(&client->dev, "regmap initialization failed: %d\n",
+ ret);
return ret;
}
ret = isl29028_chip_init(chip);
if (ret < 0) {
- dev_err(chip->dev, "chip initialization failed: %d\n", ret);
+ dev_err(&client->dev, "chip initialization failed: %d\n", ret);
return ret;
}
@@ -520,7 +524,8 @@ static int isl29028_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
if (ret < 0) {
- dev_err(chip->dev, "iio registration fails with error %d\n",
+ dev_err(&client->dev,
+ "iio registration fails with error %d\n",
ret);
return ret;
}
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 5f308bae4..d553c8e18 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -187,9 +187,11 @@ struct tsl2X7X_chip {
const struct tsl2x7x_chip_info *chip_info;
const struct iio_info *info;
s64 event_timestamp;
- /* This structure is intentionally large to accommodate
- * updates via sysfs. */
- /* Sized to 9 = max 8 segments + 1 termination segment */
+ /*
+ * This structure is intentionally large to accommodate
+ * updates via sysfs.
+ * Sized to 9 = max 8 segments + 1 termination segment
+ */
struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
};
@@ -349,13 +351,13 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
/* device is not enabled */
dev_err(&chip->client->dev, "%s: device is not enabled\n",
- __func__);
+ __func__);
ret = -EBUSY;
goto out_unlock;
}
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to read STATUS Reg\n", __func__);
@@ -371,8 +373,8 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
for (i = 0; i < 4; i++) {
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i)),
- &buf[i]);
+ (TSL2X7X_CMD_REG |
+ (TSL2X7X_ALS_CHAN0LO + i)), &buf[i]);
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to read. err=%x\n", ret);
@@ -382,9 +384,9 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* clear any existing interrupt status */
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG |
- TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_ALS_INT_CLR));
+ (TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_ALS_INT_CLR));
if (ret < 0) {
dev_err(&chip->client->dev,
"i2c_write_command failed - err = %d\n", ret);
@@ -411,7 +413,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* calculate ratio */
ratio = (ch1 << 15) / ch0;
/* convert to unscaled lux using the pointer to the table */
- p = (struct tsl2x7x_lux *) chip->tsl2x7x_device_lux;
+ p = (struct tsl2x7x_lux *)chip->tsl2x7x_device_lux;
while (p->ratio != 0 && p->ratio < ratio)
p++;
@@ -488,7 +490,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
}
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
if (ret < 0) {
dev_err(&chip->client->dev, "i2c err=%d\n", ret);
goto prox_poll_err;
@@ -515,8 +517,8 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
for (i = 0; i < 2; i++) {
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG |
- (TSL2X7X_PRX_LO + i)), &chdata[i]);
+ (TSL2X7X_CMD_REG |
+ (TSL2X7X_PRX_LO + i)), &chdata[i]);
if (ret < 0)
goto prox_poll_err;
}
@@ -542,19 +544,19 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
{
/* If Operational settings defined elsewhere.. */
if (chip->pdata && chip->pdata->platform_default_settings)
- memcpy(&(chip->tsl2x7x_settings),
- chip->pdata->platform_default_settings,
- sizeof(tsl2x7x_default_settings));
+ memcpy(&chip->tsl2x7x_settings,
+ chip->pdata->platform_default_settings,
+ sizeof(tsl2x7x_default_settings));
else
- memcpy(&(chip->tsl2x7x_settings),
- &tsl2x7x_default_settings,
- sizeof(tsl2x7x_default_settings));
+ memcpy(&chip->tsl2x7x_settings,
+ &tsl2x7x_default_settings,
+ sizeof(tsl2x7x_default_settings));
/* Load up the proper lux table. */
if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
memcpy(chip->tsl2x7x_device_lux,
- chip->pdata->platform_lux_table,
- sizeof(chip->pdata->platform_lux_table));
+ chip->pdata->platform_lux_table,
+ sizeof(chip->pdata->platform_lux_table));
else
memcpy(chip->tsl2x7x_device_lux,
(struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
@@ -576,7 +578,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
int lux_val;
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to write CNTRL register, ret=%d\n", ret);
@@ -592,7 +594,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
}
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to write ctrl reg: ret=%d\n", ret);
@@ -609,7 +611,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
lux_val = tsl2x7x_get_lux(indio_dev);
if (lux_val < 0) {
dev_err(&chip->client->dev,
- "%s: failed to get lux\n", __func__);
+ "%s: failed to get lux\n", __func__);
return lux_val;
}
@@ -620,9 +622,9 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
dev_info(&chip->client->dev,
- "%s als_calibrate completed\n", chip->client->name);
+ "%s als_calibrate completed\n", chip->client->name);
- return (int) gain_trim_val;
+ return (int)gain_trim_val;
}
static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
@@ -695,23 +697,28 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->als_saturation = als_count * 922; /* 90% of full scale */
chip->als_time_scale = (als_time + 25) / 50;
- /* TSL2X7X Specific power-on / adc enable sequence
- * Power on the device 1st. */
+ /*
+ * TSL2X7X Specific power-on / adc enable sequence
+ * Power on the device 1st.
+ */
utmp = TSL2X7X_CNTL_PWR_ON;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on CNTRL reg.\n", __func__);
return ret;
}
- /* Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers. */
+ /*
+ * Use the following shadow copy for our delay before enabling ADC.
+ * Write all the registers.
+ */
for (i = 0, dev_reg = chip->tsl2x7x_config;
i < TSL2X7X_MAX_CONFIG_REG; i++) {
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG + i, *dev_reg++);
+ TSL2X7X_CMD_REG + i,
+ *dev_reg++);
if (ret < 0) {
dev_err(&chip->client->dev,
"failed on write to reg %d.\n", i);
@@ -721,13 +728,15 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
mdelay(3); /* Power-on settling time */
- /* NOW enable the ADC
- * initialize the desired mode of operation */
+ /*
+ * NOW enable the ADC
+ * initialize the desired mode of operation
+ */
utmp = TSL2X7X_CNTL_PWR_ON |
TSL2X7X_CNTL_ADC_ENBL |
TSL2X7X_CNTL_PROX_DET_ENBL;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on 2nd CTRL reg.\n", __func__);
@@ -741,12 +750,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
- (chip->tsl2x7x_settings.interrupts_en == 0x30))
+ (chip->tsl2x7x_settings.interrupts_en == 0x30))
reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
reg_val |= chip->tsl2x7x_settings.interrupts_en;
ret = i2c_smbus_write_byte_data(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL), reg_val);
+ (TSL2X7X_CMD_REG |
+ TSL2X7X_CNTRL), reg_val);
if (ret < 0)
dev_err(&chip->client->dev,
"%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
@@ -754,8 +764,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
/* Clear out any initial interrupts */
ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
+ TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to clear Int status\n",
@@ -776,7 +787,7 @@ static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
if (chip->pdata && chip->pdata->power_off)
chip->pdata->power_off(chip->client);
@@ -819,7 +830,7 @@ int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
static
void tsl2x7x_prox_calculate(int *data, int length,
- struct tsl2x7x_prox_stat *statP)
+ struct tsl2x7x_prox_stat *statP)
{
int i;
int sample_sum;
@@ -843,7 +854,7 @@ void tsl2x7x_prox_calculate(int *data, int length,
tmp = data[i] - statP->mean;
sample_sum += tmp * tmp;
}
- statP->stddev = int_sqrt((long)sample_sum)/length;
+ statP->stddev = int_sqrt((long)sample_sum) / length;
}
/**
@@ -886,20 +897,21 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
tsl2x7x_get_prox(indio_dev);
prox_history[i] = chip->prox_data;
dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
- i, chip->prox_data);
+ i, chip->prox_data);
}
tsl2x7x_chip_off(indio_dev);
calP = &prox_stat_data[PROX_STAT_CAL];
tsl2x7x_prox_calculate(prox_history,
- chip->tsl2x7x_settings.prox_max_samples_cal, calP);
+ chip->tsl2x7x_settings.prox_max_samples_cal,
+ calP);
chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
- calP->min, calP->mean, calP->max);
+ calP->min, calP->mean, calP->max);
dev_info(&chip->client->dev,
- "%s proximity threshold set to %d\n",
- chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
+ "%s proximity threshold set to %d\n",
+ chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
/* back to the way they were */
chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
@@ -908,7 +920,8 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
}
static ssize_t tsl2x7x_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -916,7 +929,8 @@ static ssize_t tsl2x7x_power_state_show(struct device *dev,
}
static ssize_t tsl2x7x_power_state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -933,7 +947,8 @@ static ssize_t tsl2x7x_power_state_store(struct device *dev,
}
static ssize_t tsl2x7x_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -950,13 +965,15 @@ static ssize_t tsl2x7x_gain_available_show(struct device *dev,
}
static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
}
static ssize_t tsl2x7x_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z;
@@ -970,7 +987,8 @@ static ssize_t tsl2x7x_als_time_show(struct device *dev,
}
static ssize_t tsl2x7x_als_time_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -986,7 +1004,7 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev,
TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
dev_info(&chip->client->dev, "%s: als time = %d",
- __func__, chip->tsl2x7x_settings.als_time);
+ __func__, chip->tsl2x7x_settings.als_time);
tsl2x7x_invoke_change(indio_dev);
@@ -997,7 +1015,8 @@ static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
".00272 - .696");
static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -1006,7 +1025,8 @@ static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
}
static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1025,7 +1045,8 @@ static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
/* persistence settings */
static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
@@ -1041,7 +1062,8 @@ static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
}
static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1063,7 +1085,7 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
dev_info(&chip->client->dev, "%s: als persistence = %d",
- __func__, filter_delay);
+ __func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
@@ -1071,7 +1093,8 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
}
static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
@@ -1087,7 +1110,8 @@ static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
}
static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1109,7 +1133,7 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
dev_info(&chip->client->dev, "%s: prox persistence = %d",
- __func__, filter_delay);
+ __func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
@@ -1117,7 +1141,8 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
}
static ssize_t tsl2x7x_do_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1134,7 +1159,8 @@ static ssize_t tsl2x7x_do_calibrate(struct device *dev,
}
static ssize_t tsl2x7x_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int i = 0;
@@ -1146,8 +1172,10 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
chip->tsl2x7x_device_lux[i].ch0,
chip->tsl2x7x_device_lux[i].ch1);
if (chip->tsl2x7x_device_lux[i].ratio == 0) {
- /* We just printed the first "0" entry.
- * Now get rid of the extra "," and break. */
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
offset--;
break;
}
@@ -1159,11 +1187,12 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
}
static ssize_t tsl2x7x_luxtable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(chip->tsl2x7x_device_lux)*3 + 1];
+ int value[ARRAY_SIZE(chip->tsl2x7x_device_lux) * 3 + 1];
int n;
get_options(buf, ARRAY_SIZE(value), value);
@@ -1175,7 +1204,7 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
*/
n = value[0];
if ((n % 3) || n < 6 ||
- n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
+ n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
return -EINVAL;
}
@@ -1198,7 +1227,8 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
}
static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1391,10 +1421,10 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
}
static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1529,7 +1559,7 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
u8 value;
value = i2c_smbus_read_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_STATUS);
+ TSL2X7X_CMD_REG | TSL2X7X_STATUS);
/* What type of interrupt do we need to process */
if (value & TSL2X7X_STA_PRX_INTR) {
@@ -1545,16 +1575,16 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
if (value & TSL2X7X_STA_ALS_INTR) {
tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_EITHER),
- timestamp);
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
}
/* Clear interrupt now that we have handled it. */
ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
+ TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0)
dev_err(&chip->client->dev,
"Failed to clear irq from event handler. err = %d\n",
@@ -1616,6 +1646,7 @@ static struct attribute *tsl2X7X_ALS_event_attrs[] = {
&dev_attr_in_intensity0_thresh_period.attr,
NULL,
};
+
static struct attribute *tsl2X7X_PRX_event_attrs[] = {
&dev_attr_in_proximity0_thresh_period.attr,
NULL,
@@ -1857,7 +1888,7 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
};
static int tsl2x7x_probe(struct i2c_client *clientp,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
int ret;
unsigned char device_id;
@@ -1873,14 +1904,14 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
i2c_set_clientdata(clientp, indio_dev);
ret = tsl2x7x_i2c_read(chip->client,
- TSL2X7X_CHIPID, &device_id);
+ TSL2X7X_CHIPID, &device_id);
if (ret < 0)
return ret;
if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
- (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
+ (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
dev_info(&chip->client->dev,
- "%s: i2c device found does not match expected id\n",
+ "%s: i2c device found does not match expected id\n",
__func__);
return -EINVAL;
}
@@ -1892,8 +1923,10 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
return ret;
}
- /* ALS and PROX functions can be invoked via user space poll
- * or H/W interrupt. If busy return last sample. */
+ /*
+ * ALS and PROX functions can be invoked via user space poll
+ * or H/W interrupt. If busy return last sample.
+ */
mutex_init(&chip->als_mutex);
mutex_init(&chip->prox_mutex);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 69287108f..4b5f05fda 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -333,7 +333,8 @@ static int ade7753_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(3);
@@ -528,7 +529,6 @@ static int ade7753_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-/* fixme, confirm ordering in this function */
static int ade7753_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index f4188e17d..c46bef641 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -351,7 +351,8 @@ static int ade7754_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(14); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(14);
@@ -558,7 +559,6 @@ powerdown_on_error:
return ret;
}
-/* fixme, confirm ordering in this function */
static int ade7754_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
index f6739e2c2..1d04ec952 100644
--- a/drivers/staging/iio/meter/ade7758.h
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -129,6 +129,7 @@ struct ade7758_state {
unsigned char tx_buf[8];
};
+
#ifdef CONFIG_IIO_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
@@ -138,25 +139,22 @@ void ade7758_remove_trigger(struct iio_dev *indio_dev);
int ade7758_probe_trigger(struct iio_dev *indio_dev);
ssize_t ade7758_read_data_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-
+ struct device_attribute *attr, char *buf);
int ade7758_configure_ring(struct iio_dev *indio_dev);
void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
int ade7758_set_irq(struct device *dev, bool enable);
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address, u8 val);
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address, u8 *val);
+int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val);
+int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val);
#else /* CONFIG_IIO_BUFFER */
static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
{
}
+
static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
{
return 0;
@@ -166,16 +164,20 @@ static int ade7758_configure_ring(struct iio_dev *indio_dev)
{
return 0;
}
+
static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
{
}
+
static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
{
return 0;
}
+
static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
{
}
+
#endif /* CONFIG_IIO_BUFFER */
#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 40f5afaa9..ebb8a1993 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -24,9 +24,7 @@
#include "meter.h"
#include "ade7758.h"
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
+int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -42,9 +40,8 @@ int ade7758_spi_write_reg_8(struct device *dev,
return ret;
}
-static int ade7758_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
+static int ade7758_spi_write_reg_16(struct device *dev, u8 reg_address,
+ u16 value)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -68,9 +65,8 @@ static int ade7758_spi_write_reg_16(struct device *dev,
return ret;
}
-static int ade7758_spi_write_reg_24(struct device *dev,
- u8 reg_address,
- u32 value)
+static int ade7758_spi_write_reg_24(struct device *dev, u8 reg_address,
+ u32 value)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -95,9 +91,7 @@ static int ade7758_spi_write_reg_24(struct device *dev,
return ret;
}
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
+int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -124,7 +118,7 @@ int ade7758_spi_read_reg_8(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
*val = st->rx[0];
@@ -134,9 +128,8 @@ error_ret:
return ret;
}
-static int ade7758_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
+static int ade7758_spi_read_reg_16(struct device *dev, u8 reg_address,
+ u16 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -156,7 +149,6 @@ static int ade7758_spi_read_reg_16(struct device *dev,
},
};
-
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7758_READ_REG(reg_address);
st->tx[1] = 0;
@@ -165,7 +157,7 @@ static int ade7758_spi_read_reg_16(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
@@ -176,9 +168,8 @@ error_ret:
return ret;
}
-static int ade7758_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
+static int ade7758_spi_read_reg_24(struct device *dev, u8 reg_address,
+ u32 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -207,7 +198,7 @@ static int ade7758_spi_read_reg_24(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
*val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
@@ -218,8 +209,7 @@ error_ret:
}
static ssize_t ade7758_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 val = 0;
@@ -233,8 +223,7 @@ static ssize_t ade7758_read_8bit(struct device *dev,
}
static ssize_t ade7758_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u16 val = 0;
@@ -248,8 +237,7 @@ static ssize_t ade7758_read_16bit(struct device *dev,
}
static ssize_t ade7758_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u32 val = 0;
@@ -263,9 +251,8 @@ static ssize_t ade7758_read_24bit(struct device *dev,
}
static ssize_t ade7758_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -281,9 +268,8 @@ error_ret:
}
static ssize_t ade7758_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -427,7 +413,8 @@ int ade7758_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(16); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(16);
@@ -479,16 +466,13 @@ err_ret:
}
static ssize_t ade7758_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 t;
int sps;
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &t);
+ ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
if (ret)
return ret;
@@ -499,9 +483,8 @@ static ssize_t ade7758_read_frequency(struct device *dev,
}
static ssize_t ade7758_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
u16 val;
@@ -532,18 +515,14 @@ static ssize_t ade7758_write_frequency(struct device *dev,
goto out;
}
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &reg);
+ ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg);
if (ret)
goto out;
reg &= ~(5 << 3);
reg |= t << 5;
- ret = ade7758_spi_write_reg_8(dev,
- ADE7758_WAVMODE,
- reg);
+ ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
out:
mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 9a24e0226..a6b76d4b1 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -33,7 +33,7 @@ static int ade7758_spi_read_burst(struct iio_dev *indio_dev)
return ret;
}
-static int ade7758_write_waveform_type(struct device *dev, unsigned type)
+static int ade7758_write_waveform_type(struct device *dev, unsigned int type)
{
int ret;
u8 reg;
@@ -85,7 +85,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
**/
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
- unsigned channel;
+ unsigned int channel;
if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 684e612a8..80144d40d 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -289,7 +289,8 @@ static int ade7759_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(3);
@@ -476,7 +477,6 @@ static int ade7759_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-/* fixme, confirm ordering in this function */
static int ade7759_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 9e439af71..75e8685e6 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -421,7 +421,8 @@ static int ade7854_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(17); /* 1: interrupt enabled when all periodical
- (at 8 kHz rate) DSP computations finish. */
+ * (at 8 kHz rate) DSP computations finish.
+ */
else
irqen &= ~BIT(17);
diff --git a/drivers/staging/iio/resolver/ad2s1210.h b/drivers/staging/iio/resolver/ad2s1210.h
index c7158f6e6..e9b214770 100644
--- a/drivers/staging/iio/resolver/ad2s1210.h
+++ b/drivers/staging/iio/resolver/ad2s1210.h
@@ -12,9 +12,9 @@
#define _AD2S1210_H
struct ad2s1210_platform_data {
- unsigned sample;
- unsigned a[2];
- unsigned res[2];
- bool gpioin;
+ unsigned int sample;
+ unsigned int a[2];
+ unsigned int res[2];
+ bool gpioin;
};
#endif /* _AD2S1210_H */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 035dd456d..38dca69a0 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -55,12 +55,12 @@ static struct bfin_timer iio_bfin_timer_code[MAX_BLACKFIN_GPTIMERS] = {
};
struct bfin_tmr_state {
- struct iio_trigger *trig;
- struct bfin_timer *t;
- unsigned timer_num;
- bool output_enable;
- unsigned int duty;
- int irq;
+ struct iio_trigger *trig;
+ struct bfin_timer *t;
+ unsigned int timer_num;
+ bool output_enable;
+ unsigned int duty;
+ int irq;
};
static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state)
@@ -178,7 +178,7 @@ static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
{
- struct iio_bfin_timer_trigger_pdata *pdata = pdev->dev.platform_data;
+ struct iio_bfin_timer_trigger_pdata *pdata;
struct bfin_tmr_state *st;
unsigned int config;
int ret;
@@ -221,6 +221,7 @@ static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
config = PWM_OUT | PERIOD_CNT | IRQ_ENA;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->output_enable) {
unsigned long long val;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 40af75c42..4141afb10 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -60,41 +60,12 @@
#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
/*
- * libcfs pseudo device operations
- *
- * It's just draft now.
- */
-
-struct cfs_psdev_file {
- unsigned long off;
- void *private_data;
- unsigned long reserved1;
- unsigned long reserved2;
-};
-
-struct cfs_psdev_ops {
- int (*p_open)(unsigned long, void *);
- int (*p_close)(unsigned long, void *);
- int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *);
-};
-
-/*
- * Drop into debugger, if possible. Implementation is provided by platform.
- */
-
-void cfs_enter_debugger(void);
-
-/*
* Defined by platform
*/
-int unshare_fs_struct(void);
sigset_t cfs_block_allsigs(void);
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
void cfs_restore_sigs(sigset_t);
-int cfs_signal_pending(void);
void cfs_clear_sigpending(void);
/*
@@ -117,7 +88,25 @@ void cfs_get_random_bytes(void *buf, int size);
#include "libcfs_workitem.h"
#include "libcfs_hash.h"
#include "libcfs_fail.h"
-#include "libcfs_crypto.h"
+
+struct libcfs_ioctl_handler {
+ struct list_head item;
+ int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
+};
+
+#define DECLARE_IOCTL_HANDLER(ident, func) \
+ struct libcfs_ioctl_handler ident = { \
+ .item = LIST_HEAD_INIT(ident.item), \
+ .handle_ioctl = func \
+ }
+
+int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
+int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
+
+int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
+ const struct libcfs_ioctl_hdr __user *uparam);
+int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
+int libcfs_ioctl(unsigned long cmd, void __user *arg);
/* container_of depends on "likely" which is defined in libcfs_private.h */
static inline void *__container_of(void *ptr, unsigned long shift)
@@ -143,8 +132,6 @@ extern struct miscdevice libcfs_dev;
extern char lnet_upcall[1024];
extern char lnet_debug_log_upcall[1024];
-extern struct cfs_psdev_ops libcfs_psdev_ops;
-
extern struct cfs_wi_sched *cfs_sched_rehash;
struct lnet_debugfs_symlink_def {
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 9e62c5971..81d8079e3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -203,6 +203,85 @@ int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
*/
int cfs_cpu_ht_nsiblings(int cpu);
+/*
+ * allocate per-cpu-partition data, returned value is an array of pointers,
+ * variable can be indexed by CPU ID.
+ * cptab != NULL: size of array is number of CPU partitions
+ * cptab == NULL: size of array is number of HW cores
+ */
+void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
+/*
+ * destory per-cpu-partition variable
+ */
+void cfs_percpt_free(void *vars);
+int cfs_percpt_number(void *vars);
+
+#define cfs_percpt_for_each(var, i, vars) \
+ for (i = 0; i < cfs_percpt_number(vars) && \
+ ((var) = (vars)[i]) != NULL; i++)
+
+/*
+ * percpu partition lock
+ *
+ * There are some use-cases like this in Lustre:
+ * . each CPU partition has it's own private data which is frequently changed,
+ * and mostly by the local CPU partition.
+ * . all CPU partitions share some global data, these data are rarely changed.
+ *
+ * LNet is typical example.
+ * CPU partition lock is designed for this kind of use-cases:
+ * . each CPU partition has it's own private lock
+ * . change on private data just needs to take the private lock
+ * . read on shared data just needs to take _any_ of private locks
+ * . change on shared data needs to take _all_ private locks,
+ * which is slow and should be really rare.
+ */
+enum {
+ CFS_PERCPT_LOCK_EX = -1, /* negative */
+};
+
+struct cfs_percpt_lock {
+ /* cpu-partition-table for this lock */
+ struct cfs_cpt_table *pcl_cptab;
+ /* exclusively locked */
+ unsigned int pcl_locked;
+ /* private lock table */
+ spinlock_t **pcl_locks;
+};
+
+/* return number of private locks */
+#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab)
+
+/*
+ * create a cpu-partition lock based on CPU partition table \a cptab,
+ * each private lock has extra \a psize bytes padding data
+ */
+struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+ struct lock_class_key *keys);
+/* destroy a cpu-partition lock */
+void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
+
+/* lock private lock \a index of \a pcl */
+void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
+
+/* unlock private lock \a index of \a pcl */
+void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
+
+#define CFS_PERCPT_LOCK_KEYS 256
+
+/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
+#define cfs_percpt_lock_alloc(cptab) \
+({ \
+ static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
+ struct cfs_percpt_lock *___lk; \
+ \
+ if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
+ ___lk = cfs_percpt_lock_create(cptab, NULL); \
+ else \
+ ___lk = cfs_percpt_lock_create(cptab, ___keys); \
+ ___lk; \
+})
+
/**
* iterate over all CPU partitions in \a cptab
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index e8663697e..02be7d760 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -46,7 +46,8 @@ enum cfs_crypto_hash_alg {
CFS_HASH_ALG_SHA384,
CFS_HASH_ALG_SHA512,
CFS_HASH_ALG_CRC32C,
- CFS_HASH_ALG_MAX
+ CFS_HASH_ALG_MAX,
+ CFS_HASH_ALG_UNKNOWN = 0xff
};
static struct cfs_crypto_hash_type hash_types[] = {
@@ -59,11 +60,22 @@ static struct cfs_crypto_hash_type hash_types[] = {
[CFS_HASH_ALG_SHA256] = { "sha256", 0, 32 },
[CFS_HASH_ALG_SHA384] = { "sha384", 0, 48 },
[CFS_HASH_ALG_SHA512] = { "sha512", 0, 64 },
+ [CFS_HASH_ALG_MAX] = { NULL, 0, 64 },
};
-/** Return pointer to type of hash for valid hash algorithm identifier */
+/* Maximum size of hash_types[].cht_size */
+#define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64
+
+/**
+ * Return hash algorithm information for the specified algorithm identifier
+ *
+ * Hash information includes algorithm name, initial seed, hash size.
+ *
+ * \retval cfs_crypto_hash_type for valid ID (CFS_HASH_ALG_*)
+ * \retval NULL for unknown algorithm identifier
+ */
static inline const struct cfs_crypto_hash_type *
- cfs_crypto_hash_type(unsigned char hash_alg)
+cfs_crypto_hash_type(enum cfs_crypto_hash_alg hash_alg)
{
struct cfs_crypto_hash_type *ht;
@@ -75,8 +87,16 @@ static inline const struct cfs_crypto_hash_type *
return NULL;
}
-/** Return hash name for valid hash algorithm identifier or "unknown" */
-static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
+/**
+ * Return hash name for hash algorithm identifier
+ *
+ * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval string name of known hash algorithm
+ * \retval "unknown" if hash algorithm is unknown
+ */
+static inline const char *
+cfs_crypto_hash_name(enum cfs_crypto_hash_alg hash_alg)
{
const struct cfs_crypto_hash_type *ht;
@@ -86,8 +106,15 @@ static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
return "unknown";
}
-/** Return digest size for valid algorithm identifier or 0 */
-static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
+/**
+ * Return digest size for hash algorithm type
+ *
+ * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval hash algorithm digest size in bytes
+ * \retval 0 if hash algorithm type is unknown
+ */
+static inline int cfs_crypto_hash_digestsize(enum cfs_crypto_hash_alg hash_alg)
{
const struct cfs_crypto_hash_type *ht;
@@ -97,36 +124,24 @@ static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
return 0;
}
-/** Return hash identifier for valid hash algorithm name or 0xFF */
+/**
+ * Find hash algorithm ID for the specified algorithm name
+ *
+ * \retval hash algorithm ID for valid ID (CFS_HASH_ALG_*)
+ * \retval CFS_HASH_ALG_UNKNOWN for unknown algorithm name
+ */
static inline unsigned char cfs_crypto_hash_alg(const char *algname)
{
- unsigned char i;
+ enum cfs_crypto_hash_alg hash_alg;
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- if (!strcmp(hash_types[i].cht_name, algname))
- break;
- return (i == CFS_HASH_ALG_MAX ? 0xFF : i);
+ for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
+ if (strcmp(hash_types[hash_alg].cht_name, algname) == 0)
+ return hash_alg;
+
+ return CFS_HASH_ALG_UNKNOWN;
}
-/** Calculate hash digest for buffer.
- * @param alg id of hash algorithm
- * @param buf buffer of data
- * @param buf_len buffer len
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @param hash [out] pointer to hash, if it is NULL, hash_len is
- * set to valid digest size in bytes, retval -ENOSPC.
- * @param hash_len [in,out] size of hash buffer
- * @returns status of operation
- * @retval -EINVAL if buf, buf_len, hash_len or alg_id is invalid
- * @retval -ENODEV if this algorithm is unsupported
- * @retval -ENOSPC if pointer to hash is NULL, or hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
-int cfs_crypto_hash_digest(unsigned char alg,
+int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
const void *buf, unsigned int buf_len,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len);
@@ -134,66 +149,17 @@ int cfs_crypto_hash_digest(unsigned char alg,
/* cfs crypto hash descriptor */
struct cfs_crypto_hash_desc;
-/** Allocate and initialize descriptor for hash algorithm.
- * @param alg algorithm id
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @returns pointer to descriptor of hash instance
- * @retval ERR_PTR(error) when errors occurred.
- */
-struct cfs_crypto_hash_desc*
- cfs_crypto_hash_init(unsigned char alg,
- unsigned char *key, unsigned int key_len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param page data page
- * @param offset data offset
- * @param len data len
- * @returns status of operation
- * @retval 0 for success.
- */
+struct cfs_crypto_hash_desc *
+cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
+ unsigned char *key, unsigned int key_len);
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
struct page *page, unsigned int offset,
unsigned int len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param buf pointer to data buffer
- * @param buf_len size of data at buffer
- * @returns status of operation
- * @retval 0 for success.
- */
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
unsigned int buf_len);
-
-/** Finalize hash calculation, copy hash digest to buffer, destroy hash
- * descriptor.
- * @param desc hash descriptor
- * @param hash buffer pointer to store hash digest
- * @param hash_len pointer to hash buffer size, if NULL
- * destroy hash descriptor
- * @returns status of operation
- * @retval -ENOSPC if hash is NULL, or *hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
unsigned char *hash, unsigned int *hash_len);
-/**
- * Register crypto hash algorithms
- */
int cfs_crypto_register(void);
-
-/**
- * Unregister
- */
void cfs_crypto_unregister(void);
-
-/** Return hash speed in Mbytes per second for valid hash algorithm
- * identifier. If test was unsuccessful -1 would be returned.
- */
-int cfs_crypto_hash_speed(unsigned char hash_alg);
+int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg);
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 98430e710..455c54d0d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -85,7 +85,6 @@ struct ptldebug_header {
#define PH_FLAG_FIRST_RECORD 1
/* Debugging subsystems (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
#define S_UNDEFINED 0x00000001
#define S_MDC 0x00000002
#define S_MDS 0x00000004
@@ -118,10 +117,14 @@ struct ptldebug_header {
#define S_MGS 0x20000000
#define S_FID 0x40000000 /* b_new_cmd */
#define S_FLD 0x80000000 /* b_new_cmd */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
+
+#define LIBCFS_DEBUG_SUBSYS_NAMES { \
+ "undefined", "mdc", "mds", "osc", "ost", "class", "log", \
+ "llite", "rpc", "mgmt", "lnet", "lnd", "pinger", "filter", "", \
+ "echo", "ldlm", "lov", "lquota", "osd", "lfsck", "", "", "lmv", \
+ "", "sec", "gss", "", "mgc", "mgs", "fid", "fld", NULL }
/* Debugging masks (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
#define D_INODE 0x00000002
#define D_SUPER 0x00000004
@@ -151,9 +154,14 @@ struct ptldebug_header {
#define D_QUOTA 0x04000000
#define D_SEC 0x08000000
#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
-/* keep these in sync with lnet/{utils,libcfs}/debug.c */
+#define D_HSM 0x20000000
-#define D_HSM D_TRACE
+#define LIBCFS_DEBUG_MASKS_NAMES { \
+ "trace", "inode", "super", "ext2", "malloc", "cache", "info", \
+ "ioctl", "neterror", "net", "warning", "buffs", "other", \
+ "dentry", "nettrace", "page", "dlmtrace", "error", "emerg", \
+ "ha", "rpctrace", "vfstrace", "reada", "mmap", "config", \
+ "console", "quota", "sec", "lfsck", "hsm", NULL }
#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index aa69c6a33..2e008bffc 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -38,6 +38,7 @@
extern unsigned long cfs_fail_loc;
extern unsigned int cfs_fail_val;
+extern int cfs_fail_err;
extern wait_queue_head_t cfs_race_waitq;
extern int cfs_race_state;
@@ -70,9 +71,14 @@ enum {
#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */
#define CFS_FAIL_USR1 0x04000000 /* user flag */
-#define CFS_FAIL_PRECHECK(id) (cfs_fail_loc && \
- (cfs_fail_loc & CFS_FAIL_MASK_LOC) == \
- ((id) & CFS_FAIL_MASK_LOC))
+#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
+
+static inline bool CFS_FAIL_PRECHECK(__u32 id)
+{
+ return cfs_fail_loc != 0 &&
+ ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
+ (cfs_fail_loc & id & CFS_FAULT));
+}
static inline int cfs_fail_check_set(__u32 id, __u32 value,
int set, int quiet)
@@ -144,6 +150,9 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
+#define CFS_FAULT_CHECK(id) \
+ CFS_FAIL_CHECK(CFS_FAULT | (id))
+
/* The idea here is to synchronise two threads to force a race. The
* first thread that calls this with a matching fail_loc is put to
* sleep. The next thread that calls with the same fail_loc wakes up
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index c3f2332fa..119986bc7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -245,7 +245,7 @@ struct cfs_hash {
/** # of iterators (caller of cfs_hash_for_each_*) */
__u32 hs_iterators;
/** rehash workitem */
- cfs_workitem_t hs_rehash_wi;
+ struct cfs_workitem hs_rehash_wi;
/** refcount on this hash table */
atomic_t hs_refcount;
/** rehash buckets-table */
@@ -262,7 +262,7 @@ struct cfs_hash {
/** bits when we found the max depth */
unsigned int hs_dep_bits;
/** workitem to output max depth */
- cfs_workitem_t hs_dep_wi;
+ struct cfs_workitem hs_dep_wi;
#endif
/** name of htable */
char hs_name[0];
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 5ca99bd6f..4b9102bd9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -34,13 +34,16 @@
* libcfs/include/libcfs/libcfs_ioctl.h
*
* Low-level ioctl data structures. Kernel ioctl functions declared here,
- * and user space functions are in libcfsutil_ioctl.h.
+ * and user space functions are in libcfs/util/ioctl.h.
*
*/
#ifndef __LIBCFS_IOCTL_H__
#define __LIBCFS_IOCTL_H__
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
#define LIBCFS_IOCTL_VERSION 0x0001000a
#define LIBCFS_IOCTL_VERSION2 0x0001000b
@@ -49,6 +52,9 @@ struct libcfs_ioctl_hdr {
__u32 ioc_version;
};
+/** max size to copy from userspace */
+#define LIBCFS_IOC_DATA_MAX (128 * 1024)
+
struct libcfs_ioctl_data {
struct libcfs_ioctl_hdr ioc_hdr;
@@ -73,67 +79,48 @@ struct libcfs_ioctl_data {
char ioc_bulk[0];
};
-#define ioc_priority ioc_u32[0]
-
struct libcfs_debug_ioctl_data {
struct libcfs_ioctl_hdr hdr;
unsigned int subs;
unsigned int debug;
};
-#define LIBCFS_IOC_INIT(data) \
-do { \
- memset(&data, 0, sizeof(data)); \
- data.ioc_version = LIBCFS_IOCTL_VERSION; \
- data.ioc_len = sizeof(data); \
-} while (0)
-
-struct libcfs_ioctl_handler {
- struct list_head item;
- int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
-};
-
-#define DECLARE_IOCTL_HANDLER(ident, func) \
- struct libcfs_ioctl_handler ident = { \
- /* .item = */ LIST_HEAD_INIT(ident.item), \
- /* .handle_ioctl = */ func \
- }
+/* 'f' ioctls are defined in lustre_ioctl.h and lustre_user.h except for: */
+#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
+#define IOCTL_LIBCFS_TYPE long
-/* FIXME check conflict with lustre_lib.h */
-#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
-
-#define IOC_LIBCFS_TYPE 'e'
-#define IOC_LIBCFS_MIN_NR 30
+#define IOC_LIBCFS_TYPE ('e')
+#define IOC_LIBCFS_MIN_NR 30
/* libcfs ioctls */
-#define IOC_LIBCFS_PANIC _IOWR('e', 30, long)
-#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
-#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
-#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
+/* IOC_LIBCFS_PANIC obsolete in 2.8.0, was _IOWR('e', 30, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_MEMHOG obsolete in 2.8.0, was _IOWR('e', 36, IOCTL_LIBCFS_TYPE) */
/* lnet ioctls */
-#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
-#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
-#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long)
-#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long)
-/* #define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) */
-#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long)
-#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long)
-#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long)
-#define IOC_LIBCFS_PING _IOWR('e', 61, long)
-/* #define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) */
-#define IOC_LIBCFS_LNETST _IOWR('e', 63, long)
-#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, long)
+#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, IOCTL_LIBCFS_TYPE)
/* lnd ioctls */
-#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long)
-#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long)
-#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, long)
-#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, long)
-#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, long)
-#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, long)
-#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, long)
+#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
/* ioctl 77 is free for use */
-#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, long)
-#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long)
-#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long)
+#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
/*
* DLC Specific IOCTL numbers.
@@ -155,76 +142,4 @@ struct libcfs_ioctl_handler {
#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE)
#define IOC_LIBCFS_MAX_NR 91
-static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
-{
- int len = sizeof(*data);
-
- len += cfs_size_round(data->ioc_inllen1);
- len += cfs_size_round(data->ioc_inllen2);
- return len;
-}
-
-static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
-{
- if (data->ioc_hdr.ioc_len > (1 << 30)) {
- CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen1 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen2 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
- CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
- CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf1 && !data->ioc_plen1) {
- CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf2 && !data->ioc_plen2) {
- CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_plen1 && !data->ioc_pbuf1) {
- CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
- return 1;
- }
- if (data->ioc_plen2 && !data->ioc_pbuf2) {
- CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
- return 1;
- }
- if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
- CERROR("LIBCFS ioctl: packlen != ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen1 &&
- data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
- return 1;
- }
- if (data->ioc_inllen2 &&
- data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
- data->ioc_inllen2 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
- return 1;
- }
- return 0;
-}
-
-int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
- __u32 *buf_len);
-int libcfs_ioctl_popdata(void __user *arg, void *buf, int size);
-int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
-
#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 082fe6de9..ac4e8cfe6 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -40,21 +40,32 @@
#ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__
-void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
-
/*
* Memory
*/
-#ifndef memory_pressure_get
-#define memory_pressure_get() (0)
-#endif
-#ifndef memory_pressure_set
-#define memory_pressure_set() do {} while (0)
-#endif
-#ifndef memory_pressure_clr
-#define memory_pressure_clr() do {} while (0)
+#if BITS_PER_LONG == 32
+/* limit to lowmem on 32-bit systems */
+#define NUM_CACHEPAGES \
+ min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
+#else
+#define NUM_CACHEPAGES totalram_pages
#endif
+static inline unsigned int memory_pressure_get(void)
+{
+ return current->flags & PF_MEMALLOC;
+}
+
+static inline void memory_pressure_set(void)
+{
+ current->flags |= PF_MEMALLOC;
+}
+
+static inline void memory_pressure_clr(void)
+{
+ current->flags &= ~PF_MEMALLOC;
+}
+
static inline int cfs_memory_pressure_get_and_set(void)
{
int old = memory_pressure_get();
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 13335437c..2fd2a9690 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -182,25 +182,6 @@ int libcfs_debug_clear_buffer(void);
int libcfs_debug_mark_buffer(const char *text);
/*
- * allocate per-cpu-partition data, returned value is an array of pointers,
- * variable can be indexed by CPU ID.
- * cptable != NULL: size of array is number of CPU partitions
- * cptable == NULL: size of array is number of HW cores
- */
-void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
-/*
- * destroy per-cpu-partition variable
- */
-void cfs_percpt_free(void *vars);
-int cfs_percpt_number(void *vars);
-void *cfs_percpt_current(void *vars);
-void *cfs_percpt_index(void *vars, int idx);
-
-#define cfs_percpt_for_each(var, i, vars) \
- for (i = 0; i < cfs_percpt_number(vars) && \
- ((var) = (vars)[i]) != NULL; i++)
-
-/*
* allocate a variable array, returned value is an array of pointers.
* Caller can specify length of array by count.
*/
@@ -302,62 +283,6 @@ do { \
#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
-/*
- * percpu partition lock
- *
- * There are some use-cases like this in Lustre:
- * . each CPU partition has it's own private data which is frequently changed,
- * and mostly by the local CPU partition.
- * . all CPU partitions share some global data, these data are rarely changed.
- *
- * LNet is typical example.
- * CPU partition lock is designed for this kind of use-cases:
- * . each CPU partition has it's own private lock
- * . change on private data just needs to take the private lock
- * . read on shared data just needs to take _any_ of private locks
- * . change on shared data needs to take _all_ private locks,
- * which is slow and should be really rare.
- */
-
-enum {
- CFS_PERCPT_LOCK_EX = -1, /* negative */
-};
-
-struct cfs_percpt_lock {
- /* cpu-partition-table for this lock */
- struct cfs_cpt_table *pcl_cptab;
- /* exclusively locked */
- unsigned int pcl_locked;
- /* private lock table */
- spinlock_t **pcl_locks;
-};
-
-/* return number of private locks */
-static inline int
-cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
-{
- return cfs_cpt_number(pcl->pcl_cptab);
-}
-
-/*
- * create a cpu-partition lock based on CPU partition table \a cptab,
- * each private lock has extra \a psize bytes padding data
- */
-struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
-/* destroy a cpu-partition lock */
-void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
-
-/* lock private lock \a index of \a pcl */
-void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
-/* unlock private lock \a index of \a pcl */
-void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
-/* create percpt (atomic) refcount based on @cptab */
-atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
-/* destroy percpt refcount */
-void cfs_percpt_atomic_free(atomic_t **refs);
-/* return sum of all percpu refs */
-int cfs_percpt_atomic_summary(atomic_t **refs);
-
/** Compile-time assertion.
* Check an invariant described by a constant expression at compile time by
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index 5cc64f327..f9b20c5ac 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -73,7 +73,7 @@ int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
struct cfs_workitem;
typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
-typedef struct cfs_workitem {
+struct cfs_workitem {
/** chain on runq or rerunq */
struct list_head wi_list;
/** working function */
@@ -84,10 +84,10 @@ typedef struct cfs_workitem {
unsigned short wi_running:1;
/** scheduled */
unsigned short wi_scheduled:1;
-} cfs_workitem_t;
+};
static inline void
-cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
+cfs_wi_init(struct cfs_workitem *wi, void *data, cfs_wi_action_t action)
{
INIT_LIST_HEAD(&wi->wi_list);
@@ -97,9 +97,9 @@ cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
wi->wi_action = action;
}
-void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
+void cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
+int cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
+void cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
int cfs_wi_startup(void);
void cfs_wi_shutdown(void);
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index d94b26616..a268ef7aa 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -60,6 +60,7 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/pagemap.h>
#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
@@ -83,7 +84,6 @@
#include <stdarg.h>
#include "linux-cpu.h"
#include "linux-time.h"
-#include "linux-mem.h"
#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index c04979ae0..f63cb47bc 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -23,7 +23,7 @@
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * libcfs/include/libcfs/linux/linux-mem.h
+ * libcfs/include/libcfs/linux/linux-cpu.h
*
* Basic library routines.
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
deleted file mode 100644
index 837eb2274..000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-mem.h
- *
- * Basic library routines.
- */
-
-#ifndef __LIBCFS_LINUX_CFS_MEM_H__
-#define __LIBCFS_LINUX_CFS_MEM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/memcontrol.h>
-#include <linux/mm_inline.h>
-
-#ifndef HAVE_LIBCFS_CPT
-/* Need this for cfs_cpt_table */
-#include "../libcfs_cpu.h"
-#endif
-
-#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
-#define page_index(p) ((p)->index)
-
-#define memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
-
-#if BITS_PER_LONG == 32
-/* limit to lowmem on 32-bit systems */
-#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
-#else
-#define NUM_CACHEPAGES totalram_pages
-#endif
-
-#define DECL_MMSPACE mm_segment_t __oldfs
-#define MMSPACE_OPEN \
- do { __oldfs = get_fs(); set_fs(get_ds()); } while (0)
-#define MMSPACE_CLOSE set_fs(__oldfs)
-
-#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index ed8764b11..7656b09b8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -70,12 +70,12 @@ static inline unsigned long cfs_time_current(void)
static inline long cfs_time_seconds(int seconds)
{
- return ((long)seconds) * HZ;
+ return ((long)seconds) * msecs_to_jiffies(MSEC_PER_SEC);
}
static inline long cfs_duration_sec(long d)
{
- return d / HZ;
+ return d / msecs_to_jiffies(MSEC_PER_SEC);
}
#define cfs_time_current_64 get_jiffies_64
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
index 84a19e96e..6ce9accb9 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -37,10 +37,37 @@
#define LNET_MAX_SHOW_NUM_CPT 128
#define LNET_UNDEFINED_HOPS ((__u32) -1)
+struct lnet_ioctl_config_lnd_cmn_tunables {
+ __u32 lct_version;
+ __u32 lct_peer_timeout;
+ __u32 lct_peer_tx_credits;
+ __u32 lct_peer_rtr_credits;
+ __u32 lct_max_tx_credits;
+};
+
+struct lnet_ioctl_config_o2iblnd_tunables {
+ __u32 lnd_version;
+ __u32 lnd_peercredits_hiw;
+ __u32 lnd_map_on_demand;
+ __u32 lnd_concurrent_sends;
+ __u32 lnd_fmr_pool_size;
+ __u32 lnd_fmr_flush_trigger;
+ __u32 lnd_fmr_cache;
+ __u32 pad;
+};
+
+struct lnet_ioctl_config_lnd_tunables {
+ struct lnet_ioctl_config_lnd_cmn_tunables lt_cmn;
+ union {
+ struct lnet_ioctl_config_o2iblnd_tunables lt_o2ib;
+ } lt_tun_u;
+};
+
struct lnet_ioctl_net_config {
char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN];
__u32 ni_status;
__u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT];
+ char cfg_bulk[0];
};
#define LNET_TINY_BUF_IDX 0
@@ -81,7 +108,7 @@ struct lnet_ioctl_config_data {
__s32 net_peer_rtr_credits;
__s32 net_max_tx_credits;
__u32 net_cksum_algo;
- __u32 net_pad;
+ __u32 net_interface_count;
} cfg_net;
struct {
__u32 buf_enable;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index dfc0208dc..513a8225f 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -463,10 +463,6 @@ int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
-int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid,
- int *peer_timeout, int *peer_tx_credits,
- int *peer_rtr_cr, int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config);
int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
void lnet_router_debugfs_init(void);
@@ -478,9 +474,8 @@ int lnet_rtrpools_enable(void);
void lnet_rtrpools_disable(void);
void lnet_rtrpools_free(int keep_pools);
lnet_remotenet_t *lnet_find_net_locked(__u32 net);
-int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
- __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
- __s32 credits);
+int lnet_dyn_add_ni(lnet_pid_t requested_pid,
+ struct lnet_ioctl_config_data *conf);
int lnet_dyn_del_ni(__u32 net);
int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 29c72f8c2..24c4a08e6 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -273,6 +273,8 @@ typedef struct lnet_ni {
int **ni_refs; /* percpt reference count */
time64_t ni_last_alive;/* when I was last alive */
lnet_ni_status_t *ni_status; /* my health status */
+ /* per NI LND tunables */
+ struct lnet_ioctl_config_lnd_tunables *ni_lnd_tunables;
/* equivalent interfaces to use */
char *ni_interfaces[LNET_MAX_INTERFACES];
} lnet_ni_t;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 0d32e6541..6c59f2ff2 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -335,8 +335,8 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
peer->ibp_nid = nid;
peer->ibp_error = 0;
peer->ibp_last_alive = 0;
- peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
- peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
+ peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
+ peer->ibp_queue_depth = ni->ni_peertxcredits;
atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
@@ -1283,65 +1283,86 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
}
}
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags)
{
- __u16 nfrags = (negotiated_nfrags != -1) ?
- negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
+ kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ __u16 nfrags;
+ int mod;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
LASSERT(hdev->ibh_mrs);
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- nfrags <= rd->rd_nfrags)
+ if (mod > 0 && nfrags <= rd->rd_nfrags)
return NULL;
return hdev->ibh_mrs;
}
-static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
+static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
{
- LASSERT(!pool->fpo_map_count);
+ LASSERT(!fpo->fpo_map_count);
- if (pool->fpo_fmr_pool)
- ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+ if (fpo->fpo_is_fmr) {
+ if (fpo->fmr.fpo_fmr_pool)
+ ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ } else {
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i = 0;
+
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ i++;
+ }
+ if (i < fpo->fast_reg.fpo_pool_size)
+ CERROR("FastReg pool still has %d regions registered\n",
+ fpo->fast_reg.fpo_pool_size - i);
+ }
- if (pool->fpo_hdev)
- kiblnd_hdev_decref(pool->fpo_hdev);
+ if (fpo->fpo_hdev)
+ kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(pool, sizeof(*pool));
+ LIBCFS_FREE(fpo, sizeof(*fpo));
}
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *pool;
+ kib_fmr_pool_t *fpo, *tmp;
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
- list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
+ list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
+ list_del(&fpo->fpo_list);
+ kiblnd_destroy_fmr_pool(fpo);
}
}
-static int kiblnd_fmr_pool_size(int ncpts)
+static int
+kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+ int size = tunables->lnd_fmr_pool_size / ncpts;
return max(IBLND_FMR_POOL, size);
}
-static int kiblnd_fmr_flush_trigger(int ncpts)
+static int
+kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+ int size = tunables->lnd_fmr_flush_trigger / ncpts;
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
- kib_fmr_pool_t **pp_fpo)
+static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
{
- /* FMR pool for RDMA */
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
struct ib_fmr_pool_param param = {
.max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
.page_shift = PAGE_SHIFT,
@@ -1351,7 +1372,78 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
.dirty_watermark = fps->fps_flush_trigger,
.flush_function = NULL,
.flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ .cache = !!fps->fps_cache };
+ int rc = 0;
+
+ fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
+ &param);
+ if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
+ rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
+ if (rc != -ENOSYS)
+ CERROR("Failed to create FMR pool: %d\n", rc);
+ else
+ CERROR("FMRs are not supported\n");
+ }
+
+ return rc;
+}
+
+static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+{
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i, rc;
+
+ INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size = 0;
+ for (i = 0; i < fps->fps_pool_size; i++) {
+ LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
+ sizeof(*frd));
+ if (!frd) {
+ CERROR("Failed to allocate a new fast_reg descriptor\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+ IB_MR_TYPE_MEM_REG,
+ LNET_MAX_PAYLOAD / PAGE_SIZE);
+ if (IS_ERR(frd->frd_mr)) {
+ rc = PTR_ERR(frd->frd_mr);
+ CERROR("Failed to allocate ib_alloc_mr: %d\n", rc);
+ frd->frd_mr = NULL;
+ goto out_middle;
+ }
+
+ frd->frd_valid = true;
+
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size++;
+ }
+
+ return 0;
+
+out_middle:
+ if (frd->frd_mr)
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+
+out:
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ }
+
+ return rc;
+}
+
+static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
+ kib_fmr_pool_t **pp_fpo)
+{
+ kib_dev_t *dev = fps->fps_net->ibn_dev;
+ struct ib_device_attr *dev_attr;
+ kib_fmr_pool_t *fpo;
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1359,22 +1451,41 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
return -ENOMEM;
fpo->fpo_hdev = kiblnd_current_hdev(dev);
-
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
- if (IS_ERR(fpo->fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fpo_fmr_pool);
- CERROR("Failed to create FMR pool: %d\n", rc);
-
- kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(*fpo));
- return rc;
+ dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
+
+ /* Check for FMR or FastReg support */
+ fpo->fpo_is_fmr = 0;
+ if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
+ LCONSOLE_INFO("Using FMR for registration\n");
+ fpo->fpo_is_fmr = 1;
+ } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ LCONSOLE_INFO("Using FastReg for registration\n");
+ } else {
+ rc = -ENOSYS;
+ LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
+ goto out_fpo;
}
+ if (fpo->fpo_is_fmr)
+ rc = kiblnd_alloc_fmr_pool(fps, fpo);
+ else
+ rc = kiblnd_alloc_freg_pool(fps, fpo);
+ if (rc)
+ goto out_fpo;
+
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
+ fpo->fpo_owner = fps;
*pp_fpo = fpo;
return 0;
+
+out_fpo:
+ kiblnd_hdev_decref(fpo->fpo_hdev);
+ LIBCFS_FREE(fpo, sizeof(*fpo));
+ return rc;
}
static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
@@ -1407,9 +1518,10 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
}
}
-static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
- kib_net_t *net, int pool_size,
- int flush_trigger)
+static int
+kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
+ kib_net_t *net,
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
kib_fmr_pool_t *fpo;
int rc;
@@ -1418,8 +1530,11 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
fps->fps_net = net;
fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
+
+ fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
+ fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
+ fps->fps_cache = tunables->lnd_fmr_cache;
+
spin_lock_init(&fps->fps_lock);
INIT_LIST_HEAD(&fps->fps_pool_list);
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
@@ -1440,25 +1555,64 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
return cfs_time_aftereq(now, fpo->fpo_deadline);
}
+static int
+kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+{
+ __u64 *pages = tx->tx_pages;
+ kib_hca_dev_t *hdev;
+ int npages;
+ int size;
+ int i;
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
+ }
+
+ return npages;
+}
+
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
LIST_HEAD(zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
+ kib_fmr_poolset_t *fps;
unsigned long now = cfs_time_current();
kib_fmr_pool_t *tmp;
int rc;
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(!rc);
+ if (!fpo)
+ return;
- if (status) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(!rc);
- }
+ fps = fpo->fpo_owner;
+ if (fpo->fpo_is_fmr) {
+ if (fmr->fmr_pfmr) {
+ rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ LASSERT(!rc);
+ fmr->fmr_pfmr = NULL;
+ }
+
+ if (status) {
+ rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ LASSERT(!rc);
+ }
+ } else {
+ struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
+ if (frd) {
+ frd->frd_valid = false;
+ spin_lock(&fps->fps_lock);
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ spin_unlock(&fps->fps_lock);
+ fmr->fmr_frd = NULL;
+ }
+ }
fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--; /* decref the pool */
@@ -1479,11 +1633,15 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
kiblnd_destroy_fmr_pool_list(&zombies);
}
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u64 iov, kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
+ kib_fmr_t *fmr)
{
- struct ib_pool_fmr *pfmr;
+ __u64 *pages = tx->tx_pages;
+ bool is_rx = (rd != tx->tx_rd);
+ bool tx_pages_mapped = 0;
kib_fmr_pool_t *fpo;
+ int npages = 0;
__u64 version;
int rc;
@@ -1493,21 +1651,95 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_map_count++;
- spin_unlock(&fps->fps_lock);
- pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_pool = fpo;
- fmr->fmr_pfmr = pfmr;
- return 0;
+ if (fpo->fpo_is_fmr) {
+ struct ib_pool_fmr *pfmr;
+
+ spin_unlock(&fps->fps_lock);
+
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
+ pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_key = is_rx ? pfmr->fmr->rkey :
+ pfmr->fmr->lkey;
+ fmr->fmr_frd = NULL;
+ fmr->fmr_pfmr = pfmr;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ rc = PTR_ERR(pfmr);
+ } else {
+ if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
+ struct kib_fast_reg_descriptor *frd;
+ struct ib_reg_wr *wr;
+ struct ib_mr *mr;
+ int n;
+
+ frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
+ list_del(&frd->frd_list);
+ spin_unlock(&fps->fps_lock);
+
+ mr = frd->frd_mr;
+
+ if (!frd->frd_valid) {
+ __u32 key = is_rx ? mr->rkey : mr->lkey;
+ struct ib_send_wr *inv_wr;
+
+ inv_wr = &frd->frd_inv_wr;
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = IBLND_WID_MR;
+ inv_wr->ex.invalidate_rkey = key;
+
+ /* Bump the key */
+ key = ib_inc_rkey(key);
+ ib_update_fast_reg_key(mr, key);
+ }
+
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, NULL, PAGE_SIZE);
+ if (unlikely(n != tx->tx_nfrags)) {
+ CERROR("Failed to map mr %d/%d elements\n",
+ n, tx->tx_nfrags);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ mr->iova = iov;
+
+ /* Prepare FastReg WR */
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = is_rx ? mr->rkey : mr->lkey;
+ wr->access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+
+ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
+ fmr->fmr_frd = frd;
+ fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ spin_unlock(&fps->fps_lock);
+ rc = -EBUSY;
}
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--;
- if (PTR_ERR(pfmr) != -EAGAIN) {
+ if (rc != -EAGAIN) {
spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
+ return rc;
}
/* EAGAIN and ... */
@@ -1932,25 +2164,28 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
}
}
-static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
+ int ncpts)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
unsigned long flags;
int cpt;
- int rc = 0;
+ int rc;
int i;
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (!*kiblnd_tunables.kib_map_on_demand) {
+ if (!tunables->lnd_map_on_demand) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
goto create_tx_pool;
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
+ tunables->lnd_fmr_pool_size,
*kiblnd_tunables.kib_ntx / 4);
rc = -EINVAL;
goto failed;
@@ -1965,8 +2200,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
/*
* premapping can fail if ibd_nmr > 1, so we always create
* FMR pool and map-on-demand if premapping failed
+ *
+ * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset
+ * The number of struct kib_fmr_poolsets create is equal to the
+ * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
*/
-
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_fmr_poolset_t));
if (!net->ibn_fmr_ps) {
@@ -1977,9 +2215,8 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
for (i = 0; i < ncpts; i++) {
cpt = !cpts ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
+ net, tunables);
if (rc) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
@@ -1991,6 +2228,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
LASSERT(i == ncpts);
create_tx_pool:
+ /*
+ * cfs_precpt_alloc is creating an array of struct kib_tx_poolset
+ * The number of struct kib_tx_poolsets create is equal to the
+ * number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
+ */
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_tx_poolset_t));
if (!net->ibn_tx_ps) {
@@ -2694,10 +2936,9 @@ static int kiblnd_startup(lnet_ni_t *ni)
net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
tv.tv_nsec / NSEC_PER_USEC;
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+ rc = kiblnd_tunables_setup(ni);
+ if (rc)
+ goto net_failed;
if (ni->ni_interfaces[0]) {
/* Use the IPoIB interface specified in 'networks=' */
@@ -2736,7 +2977,7 @@ static int kiblnd_startup(lnet_ni_t *ni)
if (rc)
goto failed;
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
+ rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
if (rc) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
@@ -2779,8 +3020,6 @@ static void __exit ko2iblnd_exit(void)
static int __init ko2iblnd_init(void)
{
- int rc;
-
CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
CLASSERT(offsetof(kib_msg_t,
ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
@@ -2789,9 +3028,7 @@ static int __init ko2iblnd_init(void)
ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
- rc = kiblnd_tunables_init();
- if (rc)
- return rc;
+ kiblnd_tunables_init();
lnet_register_lnd(&the_o2iblnd);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index bfcbdd167..b22984fd9 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -87,22 +87,10 @@ typedef struct {
int *kib_timeout; /* comms timeout (seconds) */
int *kib_keepalive; /* keepalive timeout (seconds) */
int *kib_ntx; /* # tx descs */
- int *kib_credits; /* # concurrent sends */
- int *kib_peertxcredits; /* # concurrent sends to 1 peer */
- int *kib_peerrtrcredits; /* # per-peer router buffer credits */
- int *kib_peercredits_hiw; /* # when eagerly to return credits */
- int *kib_peertimeout; /* seconds to consider peer dead */
char **kib_default_ipif; /* default IPoIB interface */
int *kib_retry_count;
int *kib_rnr_retry_count;
- int *kib_concurrent_sends; /* send work queue sizing */
int *kib_ib_mtu; /* IB MTU */
- int *kib_map_on_demand; /* map-on-demand if RD has more */
- /* fragments than this value, 0 */
- /* disable map-on-demand */
- int *kib_fmr_pool_size; /* # FMRs in pool */
- int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
- int *kib_fmr_cache; /* enable FMR pool cache? */
int *kib_require_priv_port; /* accept only privileged ports */
int *kib_use_priv_port; /* use privileged port for active connect */
int *kib_nscheds; /* # threads on each CPT */
@@ -116,43 +104,21 @@ extern kib_tunables_t kiblnd_tunables;
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
-#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MSG_QUEUE_SIZE_V1 : \
- *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
-#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_CREDIT_HIGHWATER_V1 : \
- *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+/* when eagerly to return credits */
+#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_CREDIT_HIGHWATER_V1 : \
+ t->lnd_peercredits_hiw)
#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
cb, dev, \
ps, qpt)
-static inline int
-kiblnd_concurrent_sends_v1(void)
-{
- if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
- return IBLND_MSG_QUEUE_SIZE_V1 * 2;
-
- if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
- return IBLND_MSG_QUEUE_SIZE_V1 / 2;
-
- return *kiblnd_tunables.kib_concurrent_sends;
-}
-
-#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- kiblnd_concurrent_sends_v1() : \
- *kiblnd_tunables.kib_concurrent_sends)
/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \
- *kiblnd_tunables.kib_map_on_demand : \
- IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
-#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
/************************/
/* derived constants... */
@@ -171,7 +137,8 @@ kiblnd_concurrent_sends_v1(void)
/* WRs and CQEs (per connection) */
#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
#define IBLND_SEND_WRS(c) \
- ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version))
+ ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
+ c->ibc_peer->ibp_ni))
#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
struct kib_hca_dev;
@@ -286,24 +253,44 @@ typedef struct {
int fps_cpt; /* CPT id */
int fps_pool_size;
int fps_flush_trigger;
+ int fps_cache;
int fps_increasing; /* is allocating new pool */
unsigned long fps_next_retry; /* time stamp for retry if*/
/* failed to allocate */
} kib_fmr_poolset_t;
+struct kib_fast_reg_descriptor { /* For fast registration */
+ struct list_head frd_list;
+ struct ib_send_wr frd_inv_wr;
+ struct ib_reg_wr frd_fastreg_wr;
+ struct ib_mr *frd_mr;
+ bool frd_valid;
+};
+
typedef struct {
struct list_head fpo_list; /* chain on pool list */
struct kib_hca_dev *fpo_hdev; /* device for this pool */
kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
- struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ union {
+ struct {
+ struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ } fmr;
+ struct { /* For fast registration */
+ struct list_head fpo_pool_list;
+ int fpo_pool_size;
+ } fast_reg;
+ };
unsigned long fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
+ int fpo_is_fmr;
} kib_fmr_pool_t;
typedef struct {
- struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
- kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
+ struct kib_fast_reg_descriptor *fmr_frd;
+ u32 fmr_key;
} kib_fmr_t;
typedef struct kib_net {
@@ -615,6 +602,48 @@ extern kib_data_t kiblnd_data;
void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
+
+/* max # of fragments configured by user */
+static inline int
+kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int mod;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ return mod ? mod : IBLND_MAX_RDMA_FRAGS;
+}
+
+static inline int
+kiblnd_rdma_frags(int version, struct lnet_ni *ni)
+{
+ return version == IBLND_MSG_VERSION_1 ?
+ IBLND_MAX_RDMA_FRAGS :
+ kiblnd_cfg_rdma_frags(ni);
+}
+
+static inline int
+kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int concurrent_sends;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ concurrent_sends = tunables->lnd_concurrent_sends;
+
+ if (version == IBLND_MSG_VERSION_1) {
+ if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+ if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+ }
+
+ return concurrent_sends;
+}
+
static inline void
kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
{
@@ -737,10 +766,14 @@ kiblnd_send_keepalive(kib_conn_t *conn)
static inline int
kiblnd_need_noop(kib_conn_t *conn)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
+ IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
@@ -799,7 +832,8 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
#define IBLND_WID_TX 1
#define IBLND_WID_RX 2
#define IBLND_WID_RDMA 3
-#define IBLND_WID_MASK 3UL
+#define IBLND_WID_MR 4
+#define IBLND_WID_MASK 7UL
static inline __u64
kiblnd_ptr2wreqid(void *ptr, int type)
@@ -947,20 +981,20 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
- int npages, __u64 iov, kib_fmr_t *fmr);
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
+ kib_fmr_t *fmr);
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-int kiblnd_tunables_init(void);
-void kiblnd_tunables_fini(void);
+int kiblnd_tunables_setup(struct lnet_ni *ni);
+void kiblnd_tunables_init(void);
int kiblnd_connd(void *arg);
int kiblnd_scheduler(void *arg);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 2323e8d3a..845e49a52 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -561,36 +561,23 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
}
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
{
kib_hca_dev_t *hdev;
- __u64 *pages = tx->tx_pages;
kib_fmr_poolset_t *fps;
- int npages;
- int size;
int cpt;
int rc;
- int i;
LASSERT(tx->tx_pool);
LASSERT(tx->tx_pool->tpo_pool.po_owner);
hdev = tx->tx_pool->tpo_hdev;
-
- for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
- for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += hdev->ibh_page_size) {
- pages[npages++] = (rd->rd_frags[i].rf_addr &
- hdev->ibh_page_mask) + size;
- }
- }
-
cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
fps = net->ibn_fmr_ps[cpt];
- rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
+ rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
if (rc) {
- CERROR("Can't map %d pages: %d\n", npages, rc);
+ CERROR("Can't map %u bytes: %d\n", nob, rc);
return rc;
}
@@ -598,8 +585,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
* If rd is not tx_rd, it's going to get sent to a peer, who will need
* the rkey
*/
- rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
- tx->fmr.fmr_pfmr->fmr->lkey;
+ rd->rd_key = tx->fmr.fmr_key;
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
@@ -613,10 +599,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
LASSERT(net);
- if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
+ if (net->ibn_fmr_ps)
kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
- tx->fmr.fmr_pfmr = NULL;
- }
if (tx->tx_nfrags) {
kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
@@ -628,8 +612,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
int nfrags)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
struct ib_mr *mr = NULL;
__u32 nob;
int i;
@@ -652,7 +636,7 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
nob += rd->rd_frags[i].rf_nob;
}
- mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ?
+ mr = kiblnd_find_rd_dma_mr(ni, rd, tx->tx_conn ?
tx->tx_conn->ibc_max_frags : -1);
if (mr) {
/* found pre-mapping MR */
@@ -704,7 +688,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
sg_set_page(sg, page, fragnob, page_offset);
- sg++;
+ sg = sg_next(sg);
if (offset + fragnob < iov->iov_len) {
offset += fragnob;
@@ -748,7 +732,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
sg_set_page(sg, kiov->kiov_page, fragnob,
kiov->kiov_offset + offset);
- sg++;
+ sg = sg_next(sg);
offset = 0;
kiov++;
@@ -765,6 +749,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
{
kib_msg_t *msg = tx->tx_msg;
kib_peer_t *peer = conn->ibc_peer;
+ struct lnet_ni *ni = peer->ibp_ni;
int ver = conn->ibc_version;
int rc;
int done;
@@ -780,7 +765,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
LASSERT(conn->ibc_credits >= 0);
LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
- if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
+ if (conn->ibc_nsends_posted == kiblnd_concurrent_sends(ver, ni)) {
/* tx completions outstanding... */
CDEBUG(D_NET, "%s: posted enough\n",
libcfs_nid2str(peer->ibp_nid));
@@ -851,14 +836,26 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+ struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
+ struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+ struct ib_send_wr *wrq = &tx->tx_wrq[0].wr;
+
+ if (frd) {
+ if (!frd->frd_valid) {
+ wrq = &frd->frd_inv_wr;
+ wrq->next = &frd->frd_fastreg_wr.wr;
+ } else {
+ wrq = &frd->frd_fastreg_wr.wr;
+ }
+ frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
+ }
- LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
"bad wr_id %llx, opc %d, flags %d, peer: %s\n",
- wrq->wr_id, wrq->opcode, wrq->send_flags,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- wrq = NULL;
- rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq);
+ bad->wr_id, bad->opcode, bad->send_flags,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ bad = NULL;
+ rc = ib_post_send(conn->ibc_cmid->qp, wrq, &bad);
}
conn->ibc_last_send = jiffies;
@@ -919,7 +916,7 @@ kiblnd_check_sends(kib_conn_t *conn)
spin_lock(&conn->ibc_lock);
- LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
+ LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni));
LASSERT(!IBLND_OOB_CAPABLE(ver) ||
conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
LASSERT(conn->ibc_reserved_credits >= 0);
@@ -1066,7 +1063,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
kib_msg_t *ibmsg = tx->tx_msg;
kib_rdma_desc_t *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
+ struct ib_rdma_wr *wrq, *next;
int rc = resid;
int srcidx = 0;
int dstidx = 0;
@@ -2333,11 +2330,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
- IBLND_MSG_QUEUE_SIZE(version)) {
+ kiblnd_msg_queue_size(version, ni)) {
CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
libcfs_nid2str(nid),
reqmsg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(version));
+ kiblnd_msg_queue_size(version, ni));
if (version == IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
@@ -2346,24 +2343,24 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
if (reqmsg->ibm_u.connparams.ibcp_max_frags >
- IBLND_RDMA_FRAGS(version)) {
+ kiblnd_rdma_frags(version, ni)) {
CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ kiblnd_rdma_frags(version, ni));
if (version >= IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
} else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
- IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) {
+ kiblnd_rdma_frags(version, ni) && !net->ibn_fmr_ps) {
CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ kiblnd_rdma_frags(version, ni));
- if (version >= IBLND_MSG_VERSION)
+ if (version == IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
@@ -2524,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
return 0;
failed:
- if (ni)
+ if (ni) {
lnet_ni_decref(ni);
+ rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+ rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+ }
rej.ibr_version = version;
- rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
kiblnd_reject(cmid, &rej);
return -ECONNREFUSED;
@@ -2580,12 +2578,15 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
reason = "Unknown";
break;
- case IBLND_REJECT_RDMA_FRAGS:
+ case IBLND_REJECT_RDMA_FRAGS: {
+ struct lnet_ioctl_config_lnd_tunables *tunables;
+
if (!cp) {
reason = "can't negotiate max frags";
goto out;
}
- if (!*kiblnd_tunables.kib_map_on_demand) {
+ tunables = peer->ibp_ni->ni_lnd_tunables;
+ if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) {
reason = "map_on_demand must be enabled";
goto out;
}
@@ -2597,7 +2598,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
peer->ibp_max_frags = frag_num;
reason = "rdma fragments";
break;
-
+ }
case IBLND_REJECT_MSG_QUEUE_SIZE:
if (!cp) {
reason = "can't negotiate queue depth";
@@ -3430,6 +3431,12 @@ kiblnd_complete(struct ib_wc *wc)
default:
LBUG();
+ case IBLND_WID_MR:
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)
+ CNETERR("FastReg failed: %d\n", wc->status);
+ break;
+
case IBLND_WID_RDMA:
/*
* We only get RDMA completion notification if it fails. All
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b4607dad3..f8fdd4ae3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -152,74 +152,135 @@ kib_tunables_t kiblnd_tunables = {
.kib_timeout = &timeout,
.kib_keepalive = &keepalive,
.kib_ntx = &ntx,
- .kib_credits = &credits,
- .kib_peertxcredits = &peer_credits,
- .kib_peercredits_hiw = &peer_credits_hiw,
- .kib_peerrtrcredits = &peer_buffer_credits,
- .kib_peertimeout = &peer_timeout,
.kib_default_ipif = &ipif_name,
.kib_retry_count = &retry_count,
.kib_rnr_retry_count = &rnr_retry_count,
- .kib_concurrent_sends = &concurrent_sends,
.kib_ib_mtu = &ib_mtu,
- .kib_map_on_demand = &map_on_demand,
- .kib_fmr_pool_size = &fmr_pool_size,
- .kib_fmr_flush_trigger = &fmr_flush_trigger,
- .kib_fmr_cache = &fmr_cache,
.kib_require_priv_port = &require_privileged_port,
.kib_use_priv_port = &use_privileged_port,
.kib_nscheds = &nscheds
};
-int
-kiblnd_tunables_init(void)
+static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
+
+/* # messages/RDMAs in-flight */
+int kiblnd_msg_queue_size(int version, lnet_ni_t *ni)
{
+ if (version == IBLND_MSG_VERSION_1)
+ return IBLND_MSG_QUEUE_SIZE_V1;
+ else if (ni)
+ return ni->ni_peertxcredits;
+ else
+ return peer_credits;
+}
+
+int kiblnd_tunables_setup(struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+ /*
+ * if there was no tunables specified, setup the tunables to be
+ * defaulted
+ */
+ if (!ni->ni_lnd_tunables) {
+ LIBCFS_ALLOC(ni->ni_lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ if (!ni->ni_lnd_tunables)
+ return -ENOMEM;
+
+ memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib,
+ &default_tunables, sizeof(*tunables));
+ }
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
+ /* Current API version */
+ tunables->lnd_version = 0;
+
if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
*kiblnd_tunables.kib_ib_mtu);
return -EINVAL;
}
- if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
+ if (!ni->ni_peertimeout)
+ ni->ni_peertimeout = peer_timeout;
+
+ if (!ni->ni_maxtxcredits)
+ ni->ni_maxtxcredits = credits;
+
+ if (!ni->ni_peertxcredits)
+ ni->ni_peertxcredits = peer_credits;
- if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
+ if (!ni->ni_peerrtrcredits)
+ ni->ni_peerrtrcredits = peer_buffer_credits;
- if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
- *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
+ if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT)
+ ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT;
- if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
+ if (ni->ni_peertxcredits > IBLND_CREDITS_MAX)
+ ni->ni_peertxcredits = IBLND_CREDITS_MAX;
- if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
+ if (ni->ni_peertxcredits > credits)
+ ni->ni_peertxcredits = credits;
- if (*kiblnd_tunables.kib_map_on_demand < 0 ||
- *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
- *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
+ if (!tunables->lnd_peercredits_hiw)
+ tunables->lnd_peercredits_hiw = peer_credits_hiw;
- if (*kiblnd_tunables.kib_map_on_demand == 1)
- *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
+ if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2)
+ tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2;
- if (!*kiblnd_tunables.kib_concurrent_sends) {
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
- else
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
+ if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits)
+ tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1;
+
+ if (tunables->lnd_map_on_demand < 0 ||
+ tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
+ /* disable map-on-demand */
+ tunables->lnd_map_on_demand = 0;
+ }
+
+ if (tunables->lnd_map_on_demand == 1) {
+ /* don't make sense to create map if only one fragment */
+ tunables->lnd_map_on_demand = 2;
+ }
+
+ if (!tunables->lnd_concurrent_sends) {
+ if (tunables->lnd_map_on_demand > 0 &&
+ tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
+ tunables->lnd_concurrent_sends =
+ ni->ni_peertxcredits * 2;
+ } else {
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits;
+ }
}
- if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
+ if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2)
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2;
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
+ if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2)
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2;
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
+ if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) {
CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
- *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
+ tunables->lnd_concurrent_sends, ni->ni_peertxcredits);
}
+ if (!tunables->lnd_fmr_pool_size)
+ tunables->lnd_fmr_pool_size = fmr_pool_size;
+ if (!tunables->lnd_fmr_flush_trigger)
+ tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
+ if (!tunables->lnd_fmr_cache)
+ tunables->lnd_fmr_cache = fmr_cache;
+
return 0;
}
+
+void kiblnd_tunables_init(void)
+{
+ default_tunables.lnd_version = 0;
+ default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
+ default_tunables.lnd_map_on_demand = map_on_demand;
+ default_tunables.lnd_concurrent_sends = concurrent_sends;
+ default_tunables.lnd_fmr_pool_size = fmr_pool_size;
+ default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
+ default_tunables.lnd_fmr_cache = fmr_cache;
+}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cca7b2f7f..406c0e7a5 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2582,7 +2582,6 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- return;
}
void
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index d4ce06d0a..964b4e338 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -675,7 +675,6 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
sock->sk->sk_write_space = ksocknal_write_space;
- return;
}
void
@@ -695,8 +694,6 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
* sk_user_data is NULL.
*/
sock->sk->sk_user_data = NULL;
-
- return ;
}
int
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c3d628bac..8c260c3d5 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -232,130 +232,24 @@ int libcfs_panic_in_progress;
static const char *
libcfs_debug_subsys2str(int subsys)
{
- switch (1 << subsys) {
- default:
+ static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
+
+ if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
return NULL;
- case S_UNDEFINED:
- return "undefined";
- case S_MDC:
- return "mdc";
- case S_MDS:
- return "mds";
- case S_OSC:
- return "osc";
- case S_OST:
- return "ost";
- case S_CLASS:
- return "class";
- case S_LOG:
- return "log";
- case S_LLITE:
- return "llite";
- case S_RPC:
- return "rpc";
- case S_LNET:
- return "lnet";
- case S_LND:
- return "lnd";
- case S_PINGER:
- return "pinger";
- case S_FILTER:
- return "filter";
- case S_ECHO:
- return "echo";
- case S_LDLM:
- return "ldlm";
- case S_LOV:
- return "lov";
- case S_LQUOTA:
- return "lquota";
- case S_OSD:
- return "osd";
- case S_LFSCK:
- return "lfsck";
- case S_LMV:
- return "lmv";
- case S_SEC:
- return "sec";
- case S_GSS:
- return "gss";
- case S_MGC:
- return "mgc";
- case S_MGS:
- return "mgs";
- case S_FID:
- return "fid";
- case S_FLD:
- return "fld";
- }
+
+ return libcfs_debug_subsystems[subsys];
}
/* libcfs_debug_token2mask() expects the returned string in lower-case */
static const char *
libcfs_debug_dbg2str(int debug)
{
- switch (1 << debug) {
- default:
+ static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
+
+ if (debug >= ARRAY_SIZE(libcfs_debug_masks))
return NULL;
- case D_TRACE:
- return "trace";
- case D_INODE:
- return "inode";
- case D_SUPER:
- return "super";
- case D_EXT2:
- return "ext2";
- case D_MALLOC:
- return "malloc";
- case D_CACHE:
- return "cache";
- case D_INFO:
- return "info";
- case D_IOCTL:
- return "ioctl";
- case D_NETERROR:
- return "neterror";
- case D_NET:
- return "net";
- case D_WARNING:
- return "warning";
- case D_BUFFS:
- return "buffs";
- case D_OTHER:
- return "other";
- case D_DENTRY:
- return "dentry";
- case D_NETTRACE:
- return "nettrace";
- case D_PAGE:
- return "page";
- case D_DLMTRACE:
- return "dlmtrace";
- case D_ERROR:
- return "error";
- case D_EMERG:
- return "emerg";
- case D_HA:
- return "ha";
- case D_RPCTRACE:
- return "rpctrace";
- case D_VFSTRACE:
- return "vfstrace";
- case D_READA:
- return "reada";
- case D_MMAP:
- return "mmap";
- case D_CONFIG:
- return "config";
- case D_CONSOLE:
- return "console";
- case D_QUOTA:
- return "quota";
- case D_SEC:
- return "sec";
- case D_LFSCK:
- return "lfsck";
- }
+
+ return libcfs_debug_masks[debug];
}
int
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index dadaf7685..086e690bd 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL(cfs_fail_loc);
unsigned int cfs_fail_val;
EXPORT_SYMBOL(cfs_fail_val);
+int cfs_fail_err;
+EXPORT_SYMBOL(cfs_fail_err);
+
DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq);
EXPORT_SYMBOL(cfs_race_waitq);
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index f60feb3a3..cc45ed82b 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -942,10 +942,10 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
* @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort
*/
-static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
+static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(cfs_workitem_t *wi)
+static int cfs_hash_dep_print(struct cfs_workitem *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
int dep;
@@ -1847,7 +1847,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
}
static int
-cfs_hash_rehash_worker(cfs_workitem_t *wi)
+cfs_hash_rehash_worker(struct cfs_workitem *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
struct cfs_hash_bucket **bkts;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 2de9eeae0..83543f928 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free);
* reason we always allocate cacheline-aligned memory block.
*/
struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
+cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+ struct lock_class_key *keys)
{
struct cfs_percpt_lock *pcl;
spinlock_t *lock;
@@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
return NULL;
}
- cfs_percpt_for_each(lock, i, pcl->pcl_locks)
+ if (!keys)
+ CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n");
+
+ cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
spin_lock_init(lock);
+ if (keys != NULL)
+ lockdep_set_class(lock, &keys[i]);
+ }
return pcl;
}
-EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+EXPORT_SYMBOL(cfs_percpt_lock_create);
/**
* lock a CPU partition
@@ -142,44 +149,3 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
}
}
EXPORT_SYMBOL(cfs_percpt_unlock);
-
-/** free cpu-partition refcount */
-void
-cfs_percpt_atomic_free(atomic_t **refs)
-{
- cfs_percpt_free(refs);
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_free);
-
-/** allocate cpu-partition refcount with initial value @init_val */
-atomic_t **
-cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
-{
- atomic_t **refs;
- atomic_t *ref;
- int i;
-
- refs = cfs_percpt_alloc(cptab, sizeof(*ref));
- if (!refs)
- return NULL;
-
- cfs_percpt_for_each(ref, i, refs)
- atomic_set(ref, init_val);
- return refs;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
-
-/** return sum of cpu-partition refs */
-int
-cfs_percpt_atomic_summary(atomic_t **refs)
-{
- atomic_t *ref;
- int i;
- int val = 0;
-
- cfs_percpt_for_each(ref, i, refs)
- val += atomic_read(ref);
-
- return val;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_summary);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index c5a695151..d0e81bb41 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -115,34 +115,6 @@ cfs_percpt_number(void *vars)
EXPORT_SYMBOL(cfs_percpt_number);
/*
- * return memory block shadowed from current CPU
- */
-void *
-cfs_percpt_current(void *vars)
-{
- struct cfs_var_array *arr;
- int cpt;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- cpt = cfs_cpt_current(arr->va_cptab, 0);
- if (cpt < 0)
- return NULL;
-
- return arr->va_ptrs[cpt];
-}
-
-void *
-cfs_percpt_index(void *vars, int idx)
-{
- struct cfs_var_array *arr;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- LASSERT(idx >= 0 && idx < arr->va_count);
- return arr->va_ptrs[idx];
-}
-
-/*
* free variable array, see more detail in cfs_array_alloc
*/
void
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 389fb9eee..b52518c54 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -755,8 +755,13 @@ cfs_cpt_table_create(int ncpt)
struct cfs_cpu_partition *part;
int n;
- if (cpt >= ncpt)
- goto failed;
+ /*
+ * Each emulated NUMA node has all allowed CPUs in
+ * the mask.
+ * End loop when all partitions have assigned CPUs.
+ */
+ if (cpt == ncpt)
+ break;
part = &cptab->ctb_parts[cpt];
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 8c9377ed8..84f9b7b47 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -30,13 +30,34 @@
#include <crypto/hash.h>
#include <linux/scatterlist.h>
#include "../../../include/linux/libcfs/libcfs.h"
+#include "../../../include/linux/libcfs/libcfs_crypto.h"
#include "linux-crypto.h"
+
/**
- * Array of hash algorithm speed in MByte per second
+ * Array of hash algorithm speed in MByte per second
*/
static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
-static int cfs_crypto_hash_alloc(unsigned char alg_id,
+/**
+ * Initialize the state descriptor for the specified hash algorithm.
+ *
+ * An internal routine to allocate the hash-specific state in \a hdesc for
+ * use with cfs_crypto_hash_digest() to compute the hash of a single message,
+ * though possibly in multiple chunks. The descriptor internal state should
+ * be freed with cfs_crypto_hash_final().
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ * \param[out] type pointer to the hash description in hash_types[]
+ * array
+ * \param[in,out] hdesc hash state descriptor to be initialized
+ * \param[in] key initial hash value/state, NULL to use default
+ * value
+ * \param[in] key_len length of \a key
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
+ */
+static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
const struct cfs_crypto_hash_type **type,
struct ahash_request **req,
unsigned char *key,
@@ -45,11 +66,11 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
struct crypto_ahash *tfm;
int err = 0;
- *type = cfs_crypto_hash_type(alg_id);
+ *type = cfs_crypto_hash_type(hash_alg);
if (!*type) {
CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
- alg_id, CFS_HASH_ALG_MAX);
+ hash_alg, CFS_HASH_ALG_MAX);
return -EINVAL;
}
tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
@@ -70,12 +91,6 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
ahash_request_set_callback(*req, 0, NULL, NULL);
- /** Shash have different logic for initialization then digest
- * shash: crypto_hash_setkey, crypto_hash_init
- * digest: crypto_digest_init, crypto_digest_setkey
- * Skip this function for digest, because we use shash logic at
- * cfs_crypto_hash_alloc.
- */
if (key)
err = crypto_ahash_setkey(tfm, key, key_len);
else if ((*type)->cht_key != 0)
@@ -90,7 +105,7 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
- cfs_crypto_hash_speeds[alg_id]);
+ cfs_crypto_hash_speeds[hash_alg]);
err = crypto_ahash_init(*req);
if (err) {
@@ -100,7 +115,33 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
return err;
}
-int cfs_crypto_hash_digest(unsigned char alg_id,
+/**
+ * Calculate hash digest for the passed buffer.
+ *
+ * This should be used when computing the hash on a single contiguous buffer.
+ * It combines the hash initialization, computation, and cleanup.
+ *
+ * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*)
+ * \param[in] buf data buffer on which to compute hash
+ * \param[in] buf_len length of \a buf in bytes
+ * \param[in] key initial value/state for algorithm,
+ * if \a key = NULL use default initial value
+ * \param[in] key_len length of \a key in bytes
+ * \param[out] hash pointer to computed hash value,
+ * if \a hash = NULL then \a hash_len is to digest
+ * size in bytes, retval -ENOSPC
+ * \param[in,out] hash_len size of \a hash buffer
+ *
+ * \retval -EINVAL \a buf, \a buf_len, \a hash_len,
+ * \a hash_alg invalid
+ * \retval -ENOENT \a hash_alg is unsupported
+ * \retval -ENOSPC \a hash is NULL, or \a hash_len less than
+ * digest size
+ * \retval 0 for success
+ * \retval negative errno for other errors from lower
+ * layers.
+ */
+int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
const void *buf, unsigned int buf_len,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len)
@@ -113,7 +154,7 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
if (!buf || buf_len == 0 || !hash_len)
return -EINVAL;
- err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
+ err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
if (err != 0)
return err;
@@ -134,15 +175,32 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
}
EXPORT_SYMBOL(cfs_crypto_hash_digest);
+/**
+ * Allocate and initialize desriptor for hash algorithm.
+ *
+ * This should be used to initialize a hash descriptor for multiple calls
+ * to a single hash function when computing the hash across multiple
+ * separate buffers or pages using cfs_crypto_hash_update{,_page}().
+ *
+ * The hash descriptor should be freed with cfs_crypto_hash_final().
+ *
+ * \param[in] hash_alg algorithm id (CFS_HASH_ALG_*)
+ * \param[in] key initial value/state for algorithm, if \a key = NULL
+ * use default initial value
+ * \param[in] key_len length of \a key in bytes
+ *
+ * \retval pointer to descriptor of hash instance
+ * \retval ERR_PTR(errno) in case of error
+ */
struct cfs_crypto_hash_desc *
- cfs_crypto_hash_init(unsigned char alg_id,
- unsigned char *key, unsigned int key_len)
+cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
+ unsigned char *key, unsigned int key_len)
{
struct ahash_request *req;
int err;
const struct cfs_crypto_hash_type *type;
- err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
+ err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
if (err)
return ERR_PTR(err);
@@ -150,6 +208,17 @@ struct cfs_crypto_hash_desc *
}
EXPORT_SYMBOL(cfs_crypto_hash_init);
+/**
+ * Update hash digest computed on data within the given \a page
+ *
+ * \param[in] hdesc hash state descriptor
+ * \param[in] page data page on which to compute the hash
+ * \param[in] offset offset within \a page at which to start hash
+ * \param[in] len length of data on which to compute hash
+ *
+ * \retval 0 for success
+ * \retval negative errno on failure
+ */
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
struct page *page, unsigned int offset,
unsigned int len)
@@ -158,13 +227,23 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
struct scatterlist sl;
sg_init_table(&sl, 1);
- sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
+ sg_set_page(&sl, page, len, offset & ~PAGE_MASK);
ahash_request_set_crypt(req, &sl, NULL, sl.length);
return crypto_ahash_update(req);
}
EXPORT_SYMBOL(cfs_crypto_hash_update_page);
+/**
+ * Update hash digest computed on the specified data
+ *
+ * \param[in] hdesc hash state descriptor
+ * \param[in] buf data buffer on which to compute the hash
+ * \param[in] buf_len length of \buf on which to compute hash
+ *
+ * \retval 0 for success
+ * \retval negative errno on failure
+ */
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
const void *buf, unsigned int buf_len)
{
@@ -178,7 +257,18 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
}
EXPORT_SYMBOL(cfs_crypto_hash_update);
-/* If hash_len pointer is NULL - destroy descriptor. */
+/**
+ * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
+ *
+ * \param[in] hdesc hash descriptor
+ * \param[out] hash pointer to hash buffer to store hash digest
+ * \param[in,out] hash_len pointer to hash buffer size, if \a hdesc = NULL
+ * only free \a hdesc instead of computing the hash
+ *
+ * \retval 0 for success
+ * \retval -EOVERFLOW if hash_len is too small for the hash digest
+ * \retval negative errno for other errors from lower layers
+ */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
unsigned char *hash, unsigned int *hash_len)
{
@@ -186,99 +276,153 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
struct ahash_request *req = (void *)hdesc;
int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- if (!hash_len) {
- crypto_free_ahash(crypto_ahash_reqtfm(req));
- ahash_request_free(req);
- return 0;
+ if (!hash || !hash_len) {
+ err = 0;
+ goto free_ahash;
}
- if (!hash || *hash_len < size) {
- *hash_len = size;
- return -ENOSPC;
+ if (*hash_len < size) {
+ err = -EOVERFLOW;
+ goto free_ahash;
}
+
ahash_request_set_crypt(req, NULL, hash, 0);
err = crypto_ahash_final(req);
-
- if (err < 0) {
- /* May be caller can fix error */
- return err;
- }
+ if (!err)
+ *hash_len = size;
+free_ahash:
crypto_free_ahash(crypto_ahash_reqtfm(req));
ahash_request_free(req);
return err;
}
EXPORT_SYMBOL(cfs_crypto_hash_final);
-static void cfs_crypto_performance_test(unsigned char alg_id,
- const unsigned char *buf,
- unsigned int buf_len)
+/**
+ * Compute the speed of specified hash function
+ *
+ * Run a speed test on the given hash algorithm on buffer of the given size.
+ * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and
+ * is available through the cfs_crypto_hash_speed() function.
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ * \param[in] buf data buffer on which to compute the hash
+ * \param[in] buf_len length of \buf on which to compute hash
+ */
+static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
{
+ int buf_len = max(PAGE_SIZE, 1048576UL);
+ void *buf;
unsigned long start, end;
int bcount, err = 0;
- int sec = 1; /* do test only 1 sec */
- unsigned char hash[64];
- unsigned int hash_len = 64;
-
- for (start = jiffies, end = start + sec * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0,
- hash, &hash_len);
+ struct page *page;
+ unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+ unsigned int hash_len = sizeof(hash);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ buf = kmap(page);
+ memset(buf, 0xAD, PAGE_SIZE);
+ kunmap(page);
+
+ for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC),
+ bcount = 0; time_before(jiffies, end); bcount++) {
+ struct cfs_crypto_hash_desc *hdesc;
+ int i;
+
+ hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0);
+ if (IS_ERR(hdesc)) {
+ err = PTR_ERR(hdesc);
+ break;
+ }
+
+ for (i = 0; i < buf_len / PAGE_SIZE; i++) {
+ err = cfs_crypto_hash_update_page(hdesc, page, 0,
+ PAGE_SIZE);
+ if (err)
+ break;
+ }
+
+ err = cfs_crypto_hash_final(hdesc, hash, &hash_len);
if (err)
break;
}
end = jiffies;
-
+ __free_page(page);
+out_err:
if (err) {
- cfs_crypto_hash_speeds[alg_id] = -1;
- CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n",
- cfs_crypto_hash_name(alg_id), err);
+ cfs_crypto_hash_speeds[hash_alg] = err;
+ CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
+ cfs_crypto_hash_name(hash_alg), err);
} else {
unsigned long tmp;
tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
1000) / (1024 * 1024);
- cfs_crypto_hash_speeds[alg_id] = (int)tmp;
+ cfs_crypto_hash_speeds[hash_alg] = (int)tmp;
+ CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n",
+ cfs_crypto_hash_name(hash_alg),
+ cfs_crypto_hash_speeds[hash_alg]);
}
- CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n",
- cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]);
}
-int cfs_crypto_hash_speed(unsigned char hash_alg)
+/**
+ * hash speed in Mbytes per second for valid hash algorithm
+ *
+ * Return the performance of the specified \a hash_alg that was previously
+ * computed using cfs_crypto_performance_test().
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval positive speed of the hash function in MB/s
+ * \retval -ENOENT if \a hash_alg is unsupported
+ * \retval negative errno if \a hash_alg speed is unavailable
+ */
+int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg)
{
if (hash_alg < CFS_HASH_ALG_MAX)
return cfs_crypto_hash_speeds[hash_alg];
- return -1;
+ return -ENOENT;
}
EXPORT_SYMBOL(cfs_crypto_hash_speed);
/**
- * Do performance test for all hash algorithms.
+ * Run the performance test for all hash algorithms.
+ *
+ * Run the cfs_crypto_performance_test() benchmark for all of the available
+ * hash functions using a 1MB buffer size. This is a reasonable buffer size
+ * for Lustre RPCs, even if the actual RPC size is larger or smaller.
+ *
+ * Since the setup cost and computation speed of various hash algorithms is
+ * a function of the buffer size (and possibly internal contention of offload
+ * engines), this speed only represents an estimate of the actual speed under
+ * actual usage, but is reasonable for comparing available algorithms.
+ *
+ * The actual speeds are available via cfs_crypto_hash_speed() for later
+ * comparison.
+ *
+ * \retval 0 on success
+ * \retval -ENOMEM if no memory is available for test buffer
*/
static int cfs_crypto_test_hashes(void)
{
- unsigned char i;
- unsigned char *data;
- unsigned int j;
- /* Data block size for testing hash. Maximum
- * kmalloc size for 2.6.18 kernel is 128K
- */
- unsigned int data_len = 1 * 128 * 1024;
-
- data = kmalloc(data_len, 0);
- if (!data)
- return -ENOMEM;
+ enum cfs_crypto_hash_alg hash_alg;
- for (j = 0; j < data_len; j++)
- data[j] = j & 0xff;
+ for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
+ cfs_crypto_performance_test(hash_alg);
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- cfs_crypto_performance_test(i, data, data_len);
-
- kfree(data);
return 0;
}
static int adler32;
+/**
+ * Register available hash functions
+ *
+ * \retval 0
+ */
int cfs_crypto_register(void)
{
request_module("crc32c");
@@ -290,6 +434,9 @@ int cfs_crypto_register(void)
return 0;
}
+/**
+ * Unregister previously registered hash functions
+ */
void cfs_crypto_unregister(void)
{
if (adler32 == 0)
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index ebc60ac9b..d89f71ee4 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -40,10 +40,75 @@
#define LNET_MINOR 240
+static inline size_t libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
+{
+ size_t len = sizeof(*data);
+
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
+ return len;
+}
+
+static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
+{
+ if (data->ioc_hdr.ioc_len > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inllen1 > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inllen2 > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
+ CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
+ CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_pbuf1 && !data->ioc_plen1) {
+ CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_pbuf2 && !data->ioc_plen2) {
+ CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_plen1 && !data->ioc_pbuf1) {
+ CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
+ return true;
+ }
+ if (data->ioc_plen2 && !data->ioc_pbuf2) {
+ CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
+ return true;
+ }
+ if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
+ CERROR("LIBCFS ioctl: packlen != ioc_len\n");
+ return true;
+ }
+ if (data->ioc_inllen1 &&
+ data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
+ CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
+ return true;
+ }
+ if (data->ioc_inllen2 &&
+ data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
+ data->ioc_inllen2 - 1] != '\0') {
+ CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
+ return true;
+ }
+ return false;
+}
+
int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
{
if (libcfs_ioctl_is_invalid(data)) {
- CERROR("LNET: ioctl not correctly formatted\n");
+ CERROR("libcfs ioctl: parameter not correctly formatted\n");
return -EINVAL;
}
@@ -57,68 +122,47 @@ int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
return 0;
}
-int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
- __u32 *len)
+int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
+ const struct libcfs_ioctl_hdr __user *uhdr)
{
struct libcfs_ioctl_hdr hdr;
+ int err = 0;
- if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
return -EFAULT;
if (hdr.ioc_version != LIBCFS_IOCTL_VERSION &&
hdr.ioc_version != LIBCFS_IOCTL_VERSION2) {
- CERROR("LNET: version mismatch expected %#x, got %#x\n",
+ CERROR("libcfs ioctl: version mismatch expected %#x, got %#x\n",
LIBCFS_IOCTL_VERSION, hdr.ioc_version);
return -EINVAL;
}
- *len = hdr.ioc_len;
-
- return 0;
-}
-
-int libcfs_ioctl_popdata(void __user *arg, void *data, int size)
-{
- if (copy_to_user(arg, data, size))
- return -EFAULT;
- return 0;
-}
-
-static int
-libcfs_psdev_open(struct inode *inode, struct file *file)
-{
- int rc = 0;
+ if (hdr.ioc_len < sizeof(struct libcfs_ioctl_data)) {
+ CERROR("libcfs ioctl: user buffer too small for ioctl\n");
+ return -EINVAL;
+ }
- if (!inode)
+ if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) {
+ CERROR("libcfs ioctl: user buffer is too large %d/%d\n",
+ hdr.ioc_len, LIBCFS_IOC_DATA_MAX);
return -EINVAL;
- if (libcfs_psdev_ops.p_open)
- rc = libcfs_psdev_ops.p_open(0, NULL);
- else
- return -EPERM;
- return rc;
-}
+ }
-/* called when closing /dev/device */
-static int
-libcfs_psdev_release(struct inode *inode, struct file *file)
-{
- int rc = 0;
+ LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len);
+ if (!*hdr_pp)
+ return -ENOMEM;
- if (!inode)
- return -EINVAL;
- if (libcfs_psdev_ops.p_close)
- rc = libcfs_psdev_ops.p_close(0, NULL);
- else
- rc = -EPERM;
- return rc;
+ if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len)) {
+ LIBCFS_FREE(*hdr_pp, hdr.ioc_len);
+ err = -EFAULT;
+ }
+ return err;
}
-static long libcfs_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static long
+libcfs_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct cfs_psdev_file pfile;
- int rc = 0;
-
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -130,26 +174,12 @@ static long libcfs_ioctl(struct file *file,
return -EINVAL;
}
- /* Handle platform-dependent IOC requests */
- switch (cmd) {
- case IOC_LIBCFS_PANIC:
- if (!capable(CFS_CAP_SYS_BOOT))
- return -EPERM;
- panic("debugctl-invoked panic");
- return 0;
- }
-
- if (libcfs_psdev_ops.p_ioctl)
- rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg);
- else
- rc = -EPERM;
- return rc;
+ return libcfs_ioctl(cmd, (void __user *)arg);
}
static const struct file_operations libcfs_fops = {
- .unlocked_ioctl = libcfs_ioctl,
- .open = libcfs_psdev_open,
- .release = libcfs_psdev_release,
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = libcfs_psdev_ioctl,
};
struct miscdevice libcfs_dev = {
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 890844602..bbe19a684 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -46,30 +46,6 @@
#include <linux/kgdb.h>
#endif
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if add_wait_queue_exclusive_head is used.
- */
-void
-add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&waitq->lock, flags);
- __add_wait_queue_exclusive(waitq, link);
- spin_unlock_irqrestore(&waitq->lock, flags);
-}
-EXPORT_SYMBOL(add_wait_queue_exclusive_head);
-
sigset_t
cfs_block_allsigs(void)
{
@@ -128,13 +104,6 @@ cfs_restore_sigs(sigset_t old)
}
EXPORT_SYMBOL(cfs_restore_sigs);
-int
-cfs_signal_pending(void)
-{
- return signal_pending(current);
-}
-EXPORT_SYMBOL(cfs_signal_pending);
-
void
cfs_clear_sigpending(void)
{
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index cdc640bfd..f2d041118 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -54,9 +54,6 @@
# define DEBUG_SUBSYSTEM S_LNET
-#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \
- sizeof(struct lnet_ioctl_config_data))
-
#include "../../include/linux/libcfs/libcfs.h"
#include <asm/div64.h>
@@ -68,20 +65,6 @@
static struct dentry *lnet_debugfs_root;
-/* called when opening /dev/device */
-static int libcfs_psdev_open(unsigned long flags, void *args)
-{
- try_module_get(THIS_MODULE);
- return 0;
-}
-
-/* called when closing /dev/device */
-static int libcfs_psdev_release(unsigned long flags, void *args)
-{
- module_put(THIS_MODULE);
- return 0;
-}
-
static DECLARE_RWSEM(ioctl_list_sem);
static LIST_HEAD(ioctl_list);
@@ -115,39 +98,47 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
}
EXPORT_SYMBOL(libcfs_deregister_ioctl);
-static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
- void __user *arg, struct libcfs_ioctl_hdr *hdr)
+int libcfs_ioctl(unsigned long cmd, void __user *uparam)
{
struct libcfs_ioctl_data *data = NULL;
- int err = -EINVAL;
+ struct libcfs_ioctl_hdr *hdr;
+ int err;
+
+ /* 'cmd' and permissions get checked in our arch-specific caller */
+ err = libcfs_ioctl_getdata(&hdr, uparam);
+ if (err) {
+ CDEBUG_LIMIT(D_ERROR,
+ "libcfs ioctl: data header error %d\n", err);
+ return err;
+ }
- /*
- * The libcfs_ioctl_data_adjust() function performs adjustment
- * operations on the libcfs_ioctl_data structure to make
- * it usable by the code. This doesn't need to be called
- * for new data structures added.
- */
if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) {
+ /*
+ * The libcfs_ioctl_data_adjust() function performs adjustment
+ * operations on the libcfs_ioctl_data structure to make
+ * it usable by the code. This doesn't need to be called
+ * for new data structures added.
+ */
data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
err = libcfs_ioctl_data_adjust(data);
if (err)
- return err;
+ goto out;
}
+ CDEBUG(D_IOCTL, "libcfs ioctl cmd %lu\n", cmd);
switch (cmd) {
case IOC_LIBCFS_CLEAR_DEBUG:
libcfs_debug_clear_buffer();
- return 0;
- /*
- * case IOC_LIBCFS_PANIC:
- * Handled in arch/cfs_module.c
- */
+ break;
+
case IOC_LIBCFS_MARK_DEBUG:
- if (!data->ioc_inlbuf1 ||
- data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
- return -EINVAL;
+ if (!data || !data->ioc_inlbuf1 ||
+ data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') {
+ err = -EINVAL;
+ goto out;
+ }
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
- return 0;
+ break;
default: {
struct libcfs_ioctl_handler *hand;
@@ -156,67 +147,23 @@ static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
down_read(&ioctl_list_sem);
list_for_each_entry(hand, &ioctl_list, item) {
err = hand->handle_ioctl(cmd, hdr);
- if (err != -EINVAL) {
- if (err == 0)
- err = libcfs_ioctl_popdata(arg,
- hdr, hdr->ioc_len);
- break;
+ if (err == -EINVAL)
+ continue;
+
+ if (!err) {
+ if (copy_to_user(uparam, hdr, hdr->ioc_len))
+ err = -EFAULT;
}
+ break;
}
up_read(&ioctl_list_sem);
- break;
- }
- }
-
- return err;
-}
-
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd,
- void __user *arg)
-{
- struct libcfs_ioctl_hdr *hdr;
- int err = 0;
- __u32 buf_len;
-
- err = libcfs_ioctl_getdata_len(arg, &buf_len);
- if (err)
- return err;
-
- /*
- * do a check here to restrict the size of the memory
- * to allocate to guard against DoS attacks.
- */
- if (buf_len > LNET_MAX_IOCTL_BUF_LEN) {
- CERROR("LNET: user buffer exceeds kernel buffer\n");
- return -EINVAL;
- }
-
- LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL);
- if (!hdr)
- return -ENOMEM;
-
- /* 'cmd' and permissions get checked in our arch-specific caller */
- if (copy_from_user(hdr, arg, buf_len)) {
- CERROR("LNET ioctl: data error\n");
- err = -EFAULT;
- goto out;
+ break; }
}
-
- err = libcfs_ioctl_handle(pfile, cmd, arg, hdr);
-
out:
- LIBCFS_FREE(hdr, buf_len);
+ LIBCFS_FREE(hdr, hdr->ioc_len);
return err;
}
-struct cfs_psdev_ops libcfs_psdev_ops = {
- libcfs_psdev_open,
- libcfs_psdev_release,
- NULL,
- NULL,
- libcfs_ioctl
-};
-
int lprocfs_call_handler(void *data, int write, loff_t *ppos,
void __user *buffer, size_t *lenp,
int (*handler)(void *data, int write, loff_t pos,
@@ -478,6 +425,13 @@ static struct ctl_table lnet_table[] = {
.proc_handler = &proc_dointvec
},
{
+ .procname = "fail_err",
+ .data = &cfs_fail_err,
+ .maxlen = sizeof(cfs_fail_err),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
}
};
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 244eb89ee..7739b9469 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -707,10 +707,9 @@ int cfs_tracefile_dump_all_pages(char *filename)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
char *buf;
+ mm_segment_t __oldfs;
int rc;
- DECL_MMSPACE;
-
cfs_tracefile_write_lock();
filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
@@ -729,11 +728,12 @@ int cfs_tracefile_dump_all_pages(char *filename)
rc = 0;
goto close;
}
+ __oldfs = get_fs();
+ set_fs(get_ds());
/* ok, for now, just write the pages. in the future we'll be building
* iobufs with the pages and calling generic_direct_IO
*/
- MMSPACE_OPEN;
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
@@ -752,7 +752,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
list_del(&tage->linkage);
cfs_tage_free(tage);
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
rc = vfs_fsync(filp, 1);
if (rc)
pr_err("sync returns %d\n", rc);
@@ -986,13 +986,12 @@ static int tracefiled(void *arg)
struct tracefiled_ctl *tctl = arg;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
+ mm_segment_t __oldfs;
struct file *filp;
char *buf;
int last_loop = 0;
int rc;
- DECL_MMSPACE;
-
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
@@ -1025,8 +1024,8 @@ static int tracefiled(void *arg)
__LASSERT(list_empty(&pc.pc_pages));
goto end_loop;
}
-
- MMSPACE_OPEN;
+ __oldfs = get_fs();
+ set_fs(get_ds());
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
static loff_t f_pos;
@@ -1051,7 +1050,7 @@ static int tracefiled(void *arg)
break;
}
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index c72fe00dc..92236ae59 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -111,7 +111,7 @@ cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
* 1. when it returns no one shall try to schedule the workitem.
*/
void
-cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
* cancel schedule request of workitem \a wi
*/
int
-cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
int rc;
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(cfs_wi_deschedule);
* be added, and even dynamic creation of serialised queues might be supported.
*/
void
-cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -229,12 +229,12 @@ static int cfs_wi_scheduler(void *arg)
while (!sched->ws_stopping) {
int nloops = 0;
int rc;
- cfs_workitem_t *wi;
+ struct cfs_workitem *wi;
while (!list_empty(&sched->ws_runq) &&
nloops < CFS_WI_RESCHED) {
- wi = list_entry(sched->ws_runq.next, cfs_workitem_t,
- wi_list);
+ wi = list_entry(sched->ws_runq.next,
+ struct cfs_workitem, wi_list);
LASSERT(wi->wi_scheduled && !wi->wi_running);
list_del_init(&wi->wi_list);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 876475554..fe0dbe746 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1215,9 +1215,9 @@ lnet_shutdown_lndni(struct lnet_ni *ni)
}
static int
-lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
- __s32 peer_cr, __s32 peer_buf_cr, __s32 credits)
+lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
{
+ struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
int rc = -EINVAL;
int lnd_type;
lnd_t *lnd;
@@ -1275,6 +1275,21 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
ni->ni_lnd = lnd;
+ if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
+ lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
+
+ if (lnd_tunables) {
+ LIBCFS_ALLOC(ni->ni_lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ if (!ni->ni_lnd_tunables) {
+ mutex_unlock(&the_lnet.ln_lnd_mutex);
+ rc = -ENOMEM;
+ goto failed0;
+ }
+ memcpy(ni->ni_lnd_tunables, lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ }
+
rc = lnd->lnd_startup(ni);
mutex_unlock(&the_lnet.ln_lnd_mutex);
@@ -1292,20 +1307,28 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
* If given some LND tunable parameters, parse those now to
* override the values in the NI structure.
*/
- if (peer_buf_cr >= 0)
- ni->ni_peerrtrcredits = peer_buf_cr;
- if (peer_timeout >= 0)
- ni->ni_peertimeout = peer_timeout;
+ if (conf && conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0) {
+ ni->ni_peerrtrcredits =
+ conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
+ }
+ if (conf && conf->cfg_config_u.cfg_net.net_peer_timeout >= 0) {
+ ni->ni_peertimeout =
+ conf->cfg_config_u.cfg_net.net_peer_timeout;
+ }
/*
* TODO
* Note: For now, don't allow the user to change
* peertxcredits as this number is used in the
* IB LND to control queue depth.
- * if (peer_cr != -1)
- * ni->ni_peertxcredits = peer_cr;
+ *
+ * if (conf && conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
+ * ni->ni_peertxcredits =
+ * conf->cfg_config_u.cfg_net.net_peer_tx_credits;
*/
- if (credits >= 0)
- ni->ni_maxtxcredits = credits;
+ if (conf && conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0) {
+ ni->ni_maxtxcredits =
+ conf->cfg_config_u.cfg_net.net_max_tx_credits;
+ }
LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
@@ -1367,7 +1390,7 @@ lnet_startup_lndnis(struct list_head *nilist)
while (!list_empty(nilist)) {
ni = list_entry(nilist->next, lnet_ni_t, ni_list);
list_del(&ni->ni_list);
- rc = lnet_startup_lndni(ni, -1, -1, -1, -1);
+ rc = lnet_startup_lndni(ni, NULL);
if (rc < 0)
goto failed;
@@ -1641,25 +1664,20 @@ EXPORT_SYMBOL(LNetNIFini);
* parameters
*
* \param[in] ni network interface structure
- * \param[out] cpt_count the number of cpts the ni is on
- * \param[out] nid Network Interface ID
- * \param[out] peer_timeout NI peer timeout
- * \param[out] peer_tx_crdits NI peer transmit credits
- * \param[out] peer_rtr_credits NI peer router credits
- * \param[out] max_tx_credits NI max transmit credit
- * \param[out] net_config Network configuration
+ * \param[out] config NI configuration
*/
static void
-lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
- int *peer_timeout, int *peer_tx_credits,
- int *peer_rtr_credits, int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config)
+lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
{
+ struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
+ struct lnet_ioctl_net_config *net_config;
+ size_t min_size, tunable_size = 0;
int i;
- if (!ni)
+ if (!ni || !config)
return;
+ net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
if (!net_config)
return;
@@ -1675,11 +1693,11 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
sizeof(net_config->ni_interfaces[i]));
}
- *nid = ni->ni_nid;
- *peer_timeout = ni->ni_peertimeout;
- *peer_tx_credits = ni->ni_peertxcredits;
- *peer_rtr_credits = ni->ni_peerrtrcredits;
- *max_tx_credits = ni->ni_maxtxcredits;
+ config->cfg_nid = ni->ni_nid;
+ config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout;
+ config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits;
+ config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits;
+ config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits;
net_config->ni_status = ni->ni_status->ns_status;
@@ -1689,18 +1707,40 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
for (i = 0; i < num_cpts; i++)
net_config->ni_cpts[i] = ni->ni_cpts[i];
- *cpt_count = num_cpts;
+ config->cfg_ncpts = num_cpts;
+ }
+
+ /*
+ * See if user land tools sent in a newer and larger version
+ * of struct lnet_tunables than what the kernel uses.
+ */
+ min_size = sizeof(*config) + sizeof(*net_config);
+
+ if (config->cfg_hdr.ioc_len > min_size)
+ tunable_size = config->cfg_hdr.ioc_len - min_size;
+
+ /* Don't copy to much data to user space */
+ min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables));
+ lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
+
+ if (ni->ni_lnd_tunables && lnd_cfg && min_size) {
+ memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size);
+ config->cfg_config_u.cfg_net.net_interface_count = 1;
+
+ /* Tell user land that kernel side has less data */
+ if (tunable_size > sizeof(*ni->ni_lnd_tunables)) {
+ min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
+ config->cfg_hdr.ioc_len -= min_size;
+ }
}
}
-int
-lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
- int *peer_tx_credits, int *peer_rtr_credits,
- int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config)
+static int
+lnet_get_net_config(struct lnet_ioctl_config_data *config)
{
struct lnet_ni *ni;
struct list_head *tmp;
+ int idx = config->cfg_count;
int cpt, i = 0;
int rc = -ENOENT;
@@ -1712,9 +1752,7 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
ni = list_entry(tmp, lnet_ni_t, ni_list);
lnet_ni_lock(ni);
- lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout,
- peer_tx_credits, peer_rtr_credits,
- max_tx_credits, net_config);
+ lnet_fill_ni_info(ni, config);
lnet_ni_unlock(ni);
rc = 0;
break;
@@ -1725,10 +1763,9 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
}
int
-lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
- __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
- __s32 credits)
+lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
{
+ char *nets = conf->cfg_config_u.cfg_net.net_intf;
lnet_ping_info_t *pinfo;
lnet_handle_md_t md_handle;
struct lnet_ni *ni;
@@ -1773,8 +1810,7 @@ lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
list_del_init(&ni->ni_list);
- rc = lnet_startup_lndni(ni, peer_timeout, peer_cr,
- peer_buf_cr, credits);
+ rc = lnet_startup_lndni(ni, conf);
if (rc)
goto failed1;
@@ -1864,6 +1900,10 @@ LNetCtl(unsigned int cmd, void *arg)
int rc;
unsigned long secs_passed;
+ BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX <
+ sizeof(struct lnet_ioctl_net_config) +
+ sizeof(struct lnet_ioctl_config_data));
+
switch (cmd) {
case IOC_LIBCFS_GET_NI:
rc = LNetGetId(data->ioc_count, &id);
@@ -1918,27 +1958,14 @@ LNetCtl(unsigned int cmd, void *arg)
&config->cfg_config_u.cfg_route.rtr_priority);
case IOC_LIBCFS_GET_NET: {
- struct lnet_ioctl_net_config *net_config;
- size_t total = sizeof(*config) + sizeof(*net_config);
-
+ size_t total = sizeof(*config) +
+ sizeof(struct lnet_ioctl_net_config);
config = arg;
if (config->cfg_hdr.ioc_len < total)
return -EINVAL;
- net_config = (struct lnet_ioctl_net_config *)
- config->cfg_bulk;
- if (!net_config)
- return -EINVAL;
-
- return lnet_get_net_config(config->cfg_count,
- &config->cfg_ncpts,
- &config->cfg_nid,
- &config->cfg_config_u.cfg_net.net_peer_timeout,
- &config->cfg_config_u.cfg_net.net_peer_tx_credits,
- &config->cfg_config_u.cfg_net.net_peer_rtr_credits,
- &config->cfg_config_u.cfg_net.net_max_tx_credits,
- net_config);
+ return lnet_get_net_config(config);
}
case IOC_LIBCFS_GET_LNET_STATS: {
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 449069c9e..480cc9c6c 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -107,6 +107,9 @@ lnet_ni_free(struct lnet_ni *ni)
if (ni->ni_cpts)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
+ if (ni->ni_lnd_tunables)
+ LIBCFS_FREE(ni->ni_lnd_tunables, sizeof(*ni->ni_lnd_tunables));
+
for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) {
LIBCFS_FREE(ni->ni_interfaces[i],
strlen(ni->ni_interfaces[i]) + 1);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index f19aa9320..c5d5bedb3 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -407,7 +407,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
LASSERT(niov > 0);
LASSERT(nkiov > 0);
this_nob = min(iov->iov_len - iovoffset,
- (__kernel_size_t) kiov->kiov_len - kiovoffset);
+ (__kernel_size_t)kiov->kiov_len - kiovoffset);
this_nob = min(this_nob, nob);
if (!addr)
@@ -477,7 +477,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
do {
LASSERT(nkiov > 0);
LASSERT(niov > 0);
- this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
+ this_nob = min((__kernel_size_t)kiov->kiov_len - kiovoffset,
iov->iov_len - iovoffset);
this_nob = min(this_nob, nob);
@@ -996,7 +996,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
LASSERT(msg2->msg_txpeer->lp_ni == ni);
LASSERT(msg2->msg_tx_delayed);
- (void) lnet_post_send_locked(msg2, 1);
+ (void)lnet_post_send_locked(msg2, 1);
}
}
@@ -1019,7 +1019,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
LASSERT(msg2->msg_txpeer == txpeer);
LASSERT(msg2->msg_tx_delayed);
- (void) lnet_post_send_locked(msg2, 1);
+ (void)lnet_post_send_locked(msg2, 1);
}
}
@@ -1142,7 +1142,7 @@ routing_off:
lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
- (void) lnet_post_routed_recv_locked(msg2, 1);
+ (void)lnet_post_routed_recv_locked(msg2, 1);
}
}
if (rxpeer) {
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 93037c116..246b5c141 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -108,12 +108,7 @@ lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr)
rc = -EINVAL;
goto out_unlock;
}
- rc = lnet_dyn_add_ni(LNET_PID_LUSTRE,
- conf->cfg_config_u.cfg_net.net_intf,
- conf->cfg_config_u.cfg_net.net_peer_timeout,
- conf->cfg_config_u.cfg_net.net_peer_tx_credits,
- conf->cfg_config_u.cfg_net.net_peer_rtr_credits,
- conf->cfg_config_u.cfg_net.net_max_tx_credits);
+ rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, conf);
out_unlock:
mutex_unlock(&lnet_config_mutex);
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index dcb6e506f..a63d86c4c 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -49,10 +49,10 @@ module_param(brw_inject_errors, int, 0644);
MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
static void
-brw_client_fini(sfw_test_instance_t *tsi)
+brw_client_fini(struct sfw_test_instance *tsi)
{
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ struct srpc_bulk *bulk;
+ struct sfw_test_unit *tsu;
LASSERT(tsi->tsi_is_client);
@@ -67,21 +67,21 @@ brw_client_fini(sfw_test_instance_t *tsi)
}
static int
-brw_client_init(sfw_test_instance_t *tsi)
+brw_client_init(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
int flags;
int npg;
int len;
int opc;
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ struct srpc_bulk *bulk;
+ struct sfw_test_unit *tsu;
LASSERT(sn);
LASSERT(tsi->tsi_is_client);
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+ struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
opc = breq->blk_opc;
flags = breq->blk_flags;
@@ -91,9 +91,8 @@ brw_client_init(sfw_test_instance_t *tsi)
* but we have to keep it for compatibility
*/
len = npg * PAGE_SIZE;
-
} else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+ struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
/*
* I should never get this step if it's unknown feature
@@ -225,7 +224,7 @@ bad_data:
}
static void
-brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -237,7 +236,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
static int
-brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -255,14 +254,14 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
static int
-brw_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
+brw_client_prep_rpc(struct sfw_test_unit *tsu,
+ lnet_process_id_t dest, struct srpc_client_rpc **rpcpp)
{
- srpc_bulk_t *bulk = tsu->tsu_private;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_client_rpc_t *rpc;
- srpc_brw_reqst_t *req;
+ struct srpc_bulk *bulk = tsu->tsu_private;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_client_rpc *rpc;
+ struct srpc_brw_reqst *req;
int flags;
int npg;
int len;
@@ -273,15 +272,14 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
LASSERT(bulk);
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+ struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
len = npg * PAGE_SIZE;
-
} else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+ struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
/*
* I should never get this step if it's unknown feature
@@ -299,7 +297,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
if (rc)
return rc;
- memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
+ memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg]));
if (opc == LST_BRW_WRITE)
brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
else
@@ -315,21 +313,21 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
}
static void
-brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
{
__u64 magic = BRW_MAGIC;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_msg_t *msg = &rpc->crpc_replymsg;
- srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_msg *msg = &rpc->crpc_replymsg;
+ struct srpc_brw_reply *reply = &msg->msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
LASSERT(sn);
if (rpc->crpc_status) {
CERROR("BRW RPC to %s failed with %d\n",
libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_brw_errors);
return;
}
@@ -363,7 +361,7 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
static void
brw_server_rpc_done(struct srpc_server_rpc *rpc)
{
- srpc_bulk_t *blk = rpc->srpc_bulk;
+ struct srpc_bulk *blk = rpc->srpc_bulk;
if (!blk)
return;
@@ -384,9 +382,9 @@ static int
brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
{
__u64 magic = BRW_MAGIC;
- srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
- srpc_brw_reqst_t *reqst;
- srpc_msg_t *reqstmsg;
+ struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst;
+ struct srpc_msg *reqstmsg;
LASSERT(rpc->srpc_bulk);
LASSERT(rpc->srpc_reqstbuf);
@@ -420,10 +418,10 @@ static int
brw_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst;
+ struct srpc_msg *replymsg = &rpc->srpc_replymsg;
+ struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst;
int npg;
int rc;
@@ -459,7 +457,7 @@ brw_server_handle(struct srpc_server_rpc *rpc)
if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
/* compat with old version */
- if (reqst->brw_len & ~CFS_PAGE_MASK) {
+ if (reqst->brw_len & ~PAGE_MASK) {
reply->brw_status = EINVAL;
return 0;
}
@@ -490,7 +488,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
return 0;
}
-sfw_test_client_ops_t brw_test_client;
+struct sfw_test_client_ops brw_test_client;
+
void brw_init_test_client(void)
{
brw_test_client.tso_init = brw_client_init;
@@ -499,7 +498,8 @@ void brw_init_test_client(void)
brw_test_client.tso_done_rpc = brw_client_done_rpc;
};
-srpc_service_t brw_test_service;
+struct srpc_service brw_test_service;
+
void brw_init_test_service(void)
{
brw_test_service.sv_id = SRPC_SERVICE_BRW;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 79ee6c0bf..408c614b6 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -51,9 +51,9 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
char *name;
int rc;
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_key || /* no key is specified */
- !args->lstio_ses_namep || /* session name */
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_key || /* no key is specified */
+ !args->lstio_ses_namep || /* session name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -95,11 +95,11 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_keyp || /* address for output key */
- !args->lstio_ses_featp || /* address for output features */
- !args->lstio_ses_ndinfo || /* address for output ndinfo */
- !args->lstio_ses_namep || /* address for output name */
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_keyp || /* address for output key */
+ !args->lstio_ses_featp || /* address for output features */
+ !args->lstio_ses_ndinfo || /* address for output ndinfo */
+ !args->lstio_ses_namep || /* address for output name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -125,7 +125,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
if (!args->lstio_dbg_resultp)
return -EINVAL;
- if (args->lstio_dbg_namep && /* name of batch/group */
+ if (args->lstio_dbg_namep && /* name of batch/group */
(args->lstio_dbg_nmlen <= 0 ||
args->lstio_dbg_nmlen > LST_NAME_SIZE))
return -EINVAL;
@@ -326,7 +326,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (!args->lstio_grp_idsp || /* array of ids */
+ if (!args->lstio_grp_idsp || /* array of ids */
args->lstio_grp_count <= 0 ||
!args->lstio_grp_resultp ||
!args->lstio_grp_featp ||
@@ -394,13 +394,13 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_grp_entp && /* output: group entry */
- !args->lstio_grp_dentsp) /* output: node entry */
+ if (!args->lstio_grp_entp && /* output: group entry */
+ !args->lstio_grp_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_grp_dentsp) { /* have node entry */
- if (!args->lstio_grp_idxp || /* node index */
- !args->lstio_grp_ndentp) /* # of node entry */
+ if (args->lstio_grp_dentsp) { /* have node entry */
+ if (!args->lstio_grp_idxp || /* node index */
+ !args->lstio_grp_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&ndent, args->lstio_grp_ndentp,
@@ -612,18 +612,18 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (!args->lstio_bat_namep || /* batch name */
+ if (!args->lstio_bat_namep || /* batch name */
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_bat_entp && /* output: batch entry */
- !args->lstio_bat_dentsp) /* output: node entry */
+ if (!args->lstio_bat_entp && /* output: batch entry */
+ !args->lstio_bat_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_bat_dentsp) { /* have node entry */
- if (!args->lstio_bat_idxp || /* node index */
- !args->lstio_bat_ndentp) /* # of node entry */
+ if (args->lstio_bat_dentsp) { /* have node entry */
+ if (!args->lstio_bat_idxp || /* node index */
+ !args->lstio_bat_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&index, args->lstio_bat_idxp,
@@ -722,18 +722,18 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (!args->lstio_tes_resultp ||
!args->lstio_tes_retp ||
- !args->lstio_tes_bat_name || /* no specified batch */
+ !args->lstio_tes_bat_name || /* no specified batch */
args->lstio_tes_bat_nmlen <= 0 ||
args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_sgrp_name || /* no source group */
+ !args->lstio_tes_sgrp_name || /* no source group */
args->lstio_tes_sgrp_nmlen <= 0 ||
args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_dgrp_name || /* no target group */
+ !args->lstio_tes_dgrp_name || /* no target group */
args->lstio_tes_dgrp_nmlen <= 0 ||
args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_tes_loop || /* negative is infinite */
+ if (!args->lstio_tes_loop || /* negative is infinite */
args->lstio_tes_concur <= 0 ||
args->lstio_tes_dist <= 0 ||
args->lstio_tes_span <= 0)
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (args->lstio_tes_param &&
(args->lstio_tes_param_len <= 0 ||
args->lstio_tes_param_len >
- PAGE_SIZE - sizeof(lstcon_test_t)))
+ PAGE_SIZE - sizeof(struct lstcon_test)))
return -EINVAL;
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 35a227d0c..6f6875811 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -46,13 +46,13 @@
#include "conrpc.h"
#include "console.h"
-void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *,
- lstcon_node_t *, lstcon_trans_stat_t *);
+void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *,
+ struct lstcon_node *, lstcon_trans_stat_t *);
static void
-lstcon_rpc_done(srpc_client_rpc_t *rpc)
+lstcon_rpc_done(struct srpc_client_rpc *rpc)
{
- lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
+ struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv;
LASSERT(crpc && rpc == crpc->crp_rpc);
LASSERT(crpc->crp_posted && !crpc->crp_finished);
@@ -90,8 +90,8 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
}
static int
-lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc)
+lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
{
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
@@ -115,16 +115,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
}
static int
-lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
+lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
{
- lstcon_rpc_t *crpc = NULL;
+ struct lstcon_rpc *crpc = NULL;
int rc;
spin_lock(&console_session.ses_rpc_lock);
crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist,
- lstcon_rpc_t, crp_link);
+ struct lstcon_rpc, crp_link);
if (crpc)
list_del_init(&crpc->crp_link);
@@ -148,9 +148,9 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
}
void
-lstcon_rpc_put(lstcon_rpc_t *crpc)
+lstcon_rpc_put(struct lstcon_rpc *crpc)
{
- srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
+ struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk;
int i;
LASSERT(list_empty(&crpc->crp_link));
@@ -183,9 +183,9 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
}
static void
-lstcon_rpc_post(lstcon_rpc_t *crpc)
+lstcon_rpc_post(struct lstcon_rpc *crpc)
{
- lstcon_rpc_trans_t *trans = crpc->crp_trans;
+ struct lstcon_rpc_trans *trans = crpc->crp_trans;
LASSERT(trans);
@@ -236,9 +236,9 @@ lstcon_rpc_trans_name(int transop)
int
lstcon_rpc_trans_prep(struct list_head *translist, int transop,
- lstcon_rpc_trans_t **transpp)
+ struct lstcon_rpc_trans **transpp)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
if (translist) {
list_for_each_entry(trans, translist, tas_link) {
@@ -278,26 +278,26 @@ lstcon_rpc_trans_prep(struct list_head *translist, int transop,
}
void
-lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
+lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc)
{
list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
crpc->crp_trans = trans;
}
void
-lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
+lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_node_t *nd;
+ struct srpc_client_rpc *rpc;
+ struct lstcon_rpc *crpc;
+ struct lstcon_node *nd;
list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp) { /* rpc done or aborted already */
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = -EINTR;
@@ -326,7 +326,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
}
static int
-lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
+lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans)
{
if (console_session.ses_shutdown &&
!list_empty(&trans->tas_olink)) /* Not an end session RPC */
@@ -336,9 +336,9 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
}
int
-lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
+lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout)
{
- lstcon_rpc_t *crpc;
+ struct lstcon_rpc *crpc;
int rc;
if (list_empty(&trans->tas_rpcs_list))
@@ -386,11 +386,11 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
}
static int
-lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
+lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp)
{
- lstcon_node_t *nd = crpc->crp_node;
- srpc_client_rpc_t *rpc = crpc->crp_rpc;
- srpc_generic_reply_t *rep;
+ struct lstcon_node *nd = crpc->crp_node;
+ struct srpc_client_rpc *rpc = crpc->crp_rpc;
+ struct srpc_generic_reply *rep;
LASSERT(nd && rpc);
LASSERT(crpc->crp_stamp);
@@ -423,10 +423,10 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
}
void
-lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
+lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, lstcon_trans_stat_t *stat)
{
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *rep;
int error;
LASSERT(stat);
@@ -466,17 +466,17 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
}
int
-lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent)
{
struct list_head tmp;
struct list_head __user *next;
lstcon_rpc_ent_t *ent;
- srpc_generic_reply_t *rep;
- lstcon_rpc_t *crpc;
- srpc_msg_t *msg;
- lstcon_node_t *nd;
+ struct srpc_generic_reply *rep;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *msg;
+ struct lstcon_node *nd;
long dur;
struct timeval tv;
int error;
@@ -520,7 +520,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
continue;
/* RPC is done */
- rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
+ rep = (struct srpc_generic_reply *)&msg->msg_body.reply;
if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) ||
copy_to_user(&ent->rpe_fwk_errno, &rep->status,
@@ -531,7 +531,6 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
continue;
error = readent(trans->tas_opc, msg, ent);
-
if (error)
return error;
}
@@ -540,11 +539,11 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
}
void
-lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
+lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *tmp;
+ struct srpc_client_rpc *rpc;
+ struct lstcon_rpc *crpc;
+ struct lstcon_rpc *tmp;
int count = 0;
list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
@@ -563,10 +562,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
}
/*
- * rpcs can be still not callbacked (even LNetMDUnlink is called)
- * because huge timeout for inaccessible network, don't make
- * user wait for them, just abandon them, they will be recycled
- * in callback
+ * rpcs can be still not callbacked (even LNetMDUnlink is
+ * called) because huge timeout for inaccessible network,
+ * don't make user wait for them, just abandon them, they
+ * will be recycled in callback
*/
LASSERT(crpc->crp_status);
@@ -593,11 +592,11 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
}
int
-lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
- unsigned feats, lstcon_rpc_t **crpc)
+lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_mksn_reqst_t *msrq;
- srpc_rmsn_reqst_t *rsrq;
+ struct srpc_mksn_reqst *msrq;
+ struct srpc_rmsn_reqst *rsrq;
int rc;
switch (transop) {
@@ -632,9 +631,9 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
}
int
-lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_debug_reqst_t *drq;
+ struct srpc_debug_reqst *drq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
@@ -650,11 +649,11 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
}
int
-lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc)
+lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+ struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
{
- lstcon_batch_t *batch;
- srpc_batch_reqst_t *brq;
+ struct lstcon_batch *batch;
+ struct srpc_batch_reqst *brq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
@@ -676,16 +675,16 @@ lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
LASSERT(!tsb->tsb_index);
- batch = (lstcon_batch_t *)tsb;
+ batch = (struct lstcon_batch *)tsb;
brq->bar_arg = batch->bat_arg;
return 0;
}
int
-lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_stat_reqst_t *srq;
+ struct srpc_stat_reqst *srq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
@@ -716,12 +715,12 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
}
static int
-lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
+lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
int dist, int span, int nkiov, lnet_kiov_t *kiov)
{
lnet_process_id_packed_t *pid;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int start;
int end;
int i = 0;
@@ -770,9 +769,9 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
}
static int
-lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
+lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
{
- test_ping_req_t *prq = &req->tsr_u.ping;
+ struct test_ping_req *prq = &req->tsr_u.ping;
prq->png_size = param->png_size;
prq->png_flags = param->png_flags;
@@ -781,9 +780,9 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
}
static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
{
- test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
+ struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
brq->blk_opc = param->blk_opc;
brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
@@ -794,9 +793,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
}
static int
-lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
{
- test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
+ struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
brq->blk_opc = param->blk_opc;
brq->blk_flags = param->blk_flags;
@@ -807,13 +806,13 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
}
int
-lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_test_t *test, lstcon_rpc_t **crpc)
+lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+ struct lstcon_test *test, struct lstcon_rpc **crpc)
{
- lstcon_group_t *sgrp = test->tes_src_grp;
- lstcon_group_t *dgrp = test->tes_dst_grp;
- srpc_test_reqst_t *trq;
- srpc_bulk_t *bulk;
+ struct lstcon_group *sgrp = test->tes_src_grp;
+ struct lstcon_group *dgrp = test->tes_dst_grp;
+ struct srpc_test_reqst *trq;
+ struct srpc_bulk *bulk;
int i;
int npg = 0;
int nob = 0;
@@ -841,7 +840,6 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
trq->tsr_ndest = 0;
trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
-
} else {
bulk = &(*crpc)->crp_rpc->crpc_bulk;
@@ -917,10 +915,10 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
}
static int
-lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
- lstcon_node_t *nd, srpc_msg_t *reply)
+lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans,
+ struct lstcon_node *nd, struct srpc_msg *reply)
{
- srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
+ struct srpc_mksn_reply *mksn_rep = &reply->msg_body.mksn_reply;
int status = mksn_rep->mksn_status;
if (!status &&
@@ -940,7 +938,7 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
if (!trans->tas_feats_updated) {
spin_lock(&console_session.ses_rpc_lock);
- if (!trans->tas_feats_updated) { /* recheck with lock */
+ if (!trans->tas_feats_updated) { /* recheck with lock */
trans->tas_feats_updated = 1;
trans->tas_features = reply->msg_ses_feats;
}
@@ -964,14 +962,14 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
}
void
-lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
- lstcon_node_t *nd, lstcon_trans_stat_t *stat)
+lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg,
+ struct lstcon_node *nd, lstcon_trans_stat_t *stat)
{
- srpc_rmsn_reply_t *rmsn_rep;
- srpc_debug_reply_t *dbg_rep;
- srpc_batch_reply_t *bat_rep;
- srpc_test_reply_t *test_rep;
- srpc_stat_reply_t *stat_rep;
+ struct srpc_rmsn_reply *rmsn_rep;
+ struct srpc_debug_reply *dbg_rep;
+ struct srpc_batch_reply *bat_rep;
+ struct srpc_test_reply *test_rep;
+ struct srpc_stat_reply *stat_rep;
int rc = 0;
switch (trans->tas_opc) {
@@ -1085,12 +1083,12 @@ int
lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct list_head *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp)
+ struct lstcon_rpc_trans **transpp)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- lstcon_rpc_t *rpc;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
+ struct lstcon_rpc *rpc;
unsigned feats;
int rc;
@@ -1130,14 +1128,16 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
case LST_TRANS_TSBCLIADD:
case LST_TRANS_TSBSRVADD:
rc = lstcon_testrpc_prep(nd, transop, feats,
- (lstcon_test_t *)arg, &rpc);
+ (struct lstcon_test *)arg,
+ &rpc);
break;
case LST_TRANS_TSBRUN:
case LST_TRANS_TSBSTOP:
case LST_TRANS_TSBCLIQRY:
case LST_TRANS_TSBSRVQRY:
rc = lstcon_batrpc_prep(nd, transop, feats,
- (lstcon_tsb_hdr_t *)arg, &rpc);
+ (struct lstcon_tsb_hdr *)arg,
+ &rpc);
break;
case LST_TRANS_STATQRY:
rc = lstcon_statrpc_prep(nd, feats, &rpc);
@@ -1170,17 +1170,18 @@ static void
lstcon_rpc_pinger(void *arg)
{
struct stt_timer *ptimer = (struct stt_timer *)arg;
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
- srpc_debug_reqst_t *drq;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *rep;
+ struct srpc_debug_reqst *drq;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int intv;
int count = 0;
int rc;
- /* RPC pinger is a special case of transaction,
+ /*
+ * RPC pinger is a special case of transaction,
* it's called by timer at 8 seconds interval.
*/
mutex_lock(&console_session.ses_mutex);
@@ -1326,9 +1327,9 @@ lstcon_rpc_pinger_stop(void)
void
lstcon_rpc_cleanup_wait(void)
{
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *temp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_rpc *crpc;
+ struct lstcon_rpc *temp;
struct list_head *pacer;
struct list_head zlist;
@@ -1338,7 +1339,7 @@ lstcon_rpc_cleanup_wait(void)
while (!list_empty(&console_session.ses_trans_list)) {
list_for_each(pacer, &console_session.ses_trans_list) {
- trans = list_entry(pacer, lstcon_rpc_trans_t,
+ trans = list_entry(pacer, struct lstcon_rpc_trans,
tas_link);
CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
@@ -1370,7 +1371,7 @@ lstcon_rpc_cleanup_wait(void)
list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
list_del(&crpc->crp_link);
- LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
+ LIBCFS_FREE(crpc, sizeof(struct lstcon_rpc));
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 3e7839dad..90c3385a3 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -63,9 +63,9 @@ struct lstcon_tsb_hdr;
struct lstcon_test;
struct lstcon_node;
-typedef struct lstcon_rpc {
+struct lstcon_rpc {
struct list_head crp_link; /* chain on rpc transaction */
- srpc_client_rpc_t *crp_rpc; /* client rpc */
+ struct srpc_client_rpc *crp_rpc; /* client rpc */
struct lstcon_node *crp_node; /* destination node */
struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
@@ -76,9 +76,9 @@ typedef struct lstcon_rpc {
unsigned int crp_embedded:1;
int crp_status; /* console rpc errors */
unsigned long crp_stamp; /* replied time stamp */
-} lstcon_rpc_t;
+};
-typedef struct lstcon_rpc_trans {
+struct lstcon_rpc_trans {
struct list_head tas_olink; /* link chain on owner list */
struct list_head tas_link; /* link chain on global list */
int tas_opc; /* operation code of transaction */
@@ -87,7 +87,7 @@ typedef struct lstcon_rpc_trans {
wait_queue_head_t tas_waitq; /* wait queue head */
atomic_t tas_remaining; /* # of un-scheduled rpcs */
struct list_head tas_rpcs_list; /* queued requests */
-} lstcon_rpc_trans_t;
+};
#define LST_TRANS_PRIVATE 0x1000
@@ -106,35 +106,35 @@ typedef struct lstcon_rpc_trans {
#define LST_TRANS_STATQRY 0x21
typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *,
+typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
lstcon_rpc_ent_t __user *);
int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned version, lstcon_rpc_t **crpc);
+ unsigned version, struct lstcon_rpc **crpc);
int lstcon_dbgrpc_prep(struct lstcon_node *nd,
- unsigned version, lstcon_rpc_t **crpc);
+ unsigned version, struct lstcon_rpc **crpc);
int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc);
+ struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_test *test, lstcon_rpc_t **crpc);
+ struct lstcon_test *test, struct lstcon_rpc **crpc);
int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
- lstcon_rpc_t **crpc);
-void lstcon_rpc_put(lstcon_rpc_t *crpc);
+ struct lstcon_rpc **crpc);
+void lstcon_rpc_put(struct lstcon_rpc *crpc);
int lstcon_rpc_trans_prep(struct list_head *translist,
- int transop, lstcon_rpc_trans_t **transpp);
+ int transop, struct lstcon_rpc_trans **transpp);
int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct list_head *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp);
-void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
+ struct lstcon_rpc_trans **transpp);
+void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans,
lstcon_trans_stat_t *stat);
-int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent);
-void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
-void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
-void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req);
-int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout);
+void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
+void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
int lstcon_rpc_pinger_start(void);
void lstcon_rpc_pinger_stop(void);
void lstcon_rpc_cleanup_wait(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 1a923ea3a..a03e52d29 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -61,7 +61,7 @@ do { \
struct lstcon_session console_session;
static void
-lstcon_node_get(lstcon_node_t *nd)
+lstcon_node_get(struct lstcon_node *nd)
{
LASSERT(nd->nd_ref >= 1);
@@ -69,9 +69,9 @@ lstcon_node_get(lstcon_node_t *nd)
}
static int
-lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
+lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
LASSERT(id.nid != LNET_NID_ANY);
@@ -90,11 +90,11 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
if (!create)
return -ENOENT;
- LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
if (!*ndpp)
return -ENOMEM;
- ndl = (lstcon_ndlink_t *)(*ndpp + 1);
+ ndl = (struct lstcon_ndlink *)(*ndpp + 1);
ndl->ndl_node = *ndpp;
@@ -103,7 +103,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
ndl->ndl_node->nd_stamp = cfs_time_current();
ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
ndl->ndl_node->nd_timeout = 0;
- memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
+ memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
/*
* queued in global hash & list, no refcount is taken by
@@ -117,16 +117,16 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
}
static void
-lstcon_node_put(lstcon_node_t *nd)
+lstcon_node_put(struct lstcon_node *nd)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
LASSERT(nd->nd_ref > 0);
if (--nd->nd_ref > 0)
return;
- ndl = (lstcon_ndlink_t *)(nd + 1);
+ ndl = (struct lstcon_ndlink *)(nd + 1);
LASSERT(!list_empty(&ndl->ndl_link));
LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -135,16 +135,16 @@ lstcon_node_put(lstcon_node_t *nd)
list_del(&ndl->ndl_link);
list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
}
static int
lstcon_ndlink_find(struct list_head *hash,
- lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
+ lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int rc;
if (id.nid == LNET_NID_ANY)
@@ -168,7 +168,7 @@ lstcon_ndlink_find(struct list_head *hash,
if (rc)
return rc;
- LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
+ LIBCFS_ALLOC(ndl, sizeof(struct lstcon_ndlink));
if (!ndl) {
lstcon_node_put(nd);
return -ENOMEM;
@@ -184,7 +184,7 @@ lstcon_ndlink_find(struct list_head *hash,
}
static void
-lstcon_ndlink_release(lstcon_ndlink_t *ndl)
+lstcon_ndlink_release(struct lstcon_ndlink *ndl)
{
LASSERT(list_empty(&ndl->ndl_link));
LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -196,12 +196,12 @@ lstcon_ndlink_release(lstcon_ndlink_t *ndl)
}
static int
-lstcon_group_alloc(char *name, lstcon_group_t **grpp)
+lstcon_group_alloc(char *name, struct lstcon_group **grpp)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int i;
- LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
+ LIBCFS_ALLOC(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
if (!grp)
return -ENOMEM;
@@ -209,7 +209,7 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
grp->grp_ref = 1;
if (name) {
if (strlen(name) > sizeof(grp->grp_name) - 1) {
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ LIBCFS_FREE(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
return -E2BIG;
}
@@ -229,18 +229,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
}
static void
-lstcon_group_addref(lstcon_group_t *grp)
+lstcon_group_addref(struct lstcon_group *grp)
{
grp->grp_ref++;
}
-static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *);
+static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
static void
-lstcon_group_drain(lstcon_group_t *grp, int keep)
+lstcon_group_drain(struct lstcon_group *grp, int keep)
{
- lstcon_ndlink_t *ndl;
- lstcon_ndlink_t *tmp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_ndlink *tmp;
list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
if (!(ndl->ndl_node->nd_state & keep))
@@ -249,7 +249,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep)
}
static void
-lstcon_group_decref(lstcon_group_t *grp)
+lstcon_group_decref(struct lstcon_group *grp)
{
int i;
@@ -264,20 +264,20 @@ lstcon_group_decref(lstcon_group_t *grp)
for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&grp->grp_ndl_hash[i]));
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ LIBCFS_FREE(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
}
static int
-lstcon_group_find(const char *name, lstcon_group_t **grpp)
+lstcon_group_find(const char *name, struct lstcon_group **grpp)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
continue;
- lstcon_group_addref(grp); /* +1 ref for caller */
+ lstcon_group_addref(grp); /* +1 ref for caller */
*grpp = grp;
return 0;
}
@@ -286,8 +286,8 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp)
}
static int
-lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
- lstcon_ndlink_t **ndlpp, int create)
+lstcon_group_ndlink_find(struct lstcon_group *grp, lnet_process_id_t id,
+ struct lstcon_ndlink **ndlpp, int create)
{
int rc;
@@ -305,7 +305,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
}
static void
-lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
+lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl)
{
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -313,8 +313,8 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
}
static void
-lstcon_group_ndlink_move(lstcon_group_t *old,
- lstcon_group_t *new, lstcon_ndlink_t *ndl)
+lstcon_group_ndlink_move(struct lstcon_group *old,
+ struct lstcon_group *new, struct lstcon_ndlink *ndl)
{
unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
LST_NODE_HASHSIZE;
@@ -329,21 +329,21 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
}
static void
-lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
+lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
while (!list_empty(&old->grp_ndl_list)) {
ndl = list_entry(old->grp_ndl_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
lstcon_group_ndlink_move(old, new, ndl);
}
}
static int
-lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
- lstcon_group_t *grp = (lstcon_group_t *)arg;
+ struct lstcon_group *grp = (struct lstcon_group *)arg;
switch (transop) {
case LST_TRANS_SESNEW:
@@ -370,10 +370,10 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_debug_reply_t *rep;
+ struct srpc_debug_reply *rep;
switch (transop) {
case LST_TRANS_SESNEW:
@@ -399,13 +399,13 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
}
static int
-lstcon_group_nodes_add(lstcon_group_t *grp,
+lstcon_group_nodes_add(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
unsigned *featp, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int i;
int rc;
@@ -466,13 +466,13 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
}
static int
-lstcon_group_nodes_remove(lstcon_group_t *grp,
+lstcon_group_nodes_remove(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int rc;
int i;
@@ -523,7 +523,7 @@ error:
int
lstcon_group_add(char *name)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST;
@@ -548,7 +548,7 @@ int
lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
unsigned *featp, struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
LASSERT(count > 0);
@@ -578,8 +578,8 @@ lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
int
lstcon_group_del(char *name)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -621,7 +621,7 @@ lstcon_group_del(char *name)
int
lstcon_group_clean(char *name, int args)
{
- lstcon_group_t *grp = NULL;
+ struct lstcon_group *grp = NULL;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -654,7 +654,7 @@ int
lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
struct list_head __user *result_up)
{
- lstcon_group_t *grp = NULL;
+ struct lstcon_group *grp = NULL;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -683,8 +683,8 @@ lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
int
lstcon_group_refresh(char *name, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -725,7 +725,7 @@ lstcon_group_refresh(char *name, struct list_head __user *result_up)
int
lstcon_group_list(int index, int len, char __user *name_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
LASSERT(index >= 0);
LASSERT(name_up);
@@ -733,7 +733,7 @@ lstcon_group_list(int index, int len, char __user *name_up)
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
if (!index--) {
return copy_to_user(name_up, grp->grp_name, len) ?
- -EFAULT : 0;
+ -EFAULT : 0;
}
}
@@ -744,8 +744,8 @@ static int
lstcon_nodes_getent(struct list_head *head, int *index_p,
int *count_p, lstcon_node_ent_t __user *dents_up)
{
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int count = 0;
int index = 0;
@@ -786,8 +786,8 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
lstcon_node_ent_t __user *dents_up)
{
lstcon_ndlist_ent_t *gentp;
- lstcon_group_t *grp;
- lstcon_ndlink_t *ndl;
+ struct lstcon_group *grp;
+ struct lstcon_ndlink *ndl;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -828,9 +828,9 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
}
static int
-lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
+lstcon_batch_find(const char *name, struct lstcon_batch **batpp)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
@@ -845,7 +845,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
int
lstcon_batch_add(char *name)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int i;
int rc;
@@ -855,7 +855,7 @@ lstcon_batch_add(char *name)
return rc;
}
- LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t));
+ LIBCFS_ALLOC(bat, sizeof(struct lstcon_batch));
if (!bat) {
CERROR("Can't allocate descriptor for batch %s\n", name);
return -ENOMEM;
@@ -865,7 +865,7 @@ lstcon_batch_add(char *name)
sizeof(struct list_head) * LST_NODE_HASHSIZE);
if (!bat->bat_cli_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -ENOMEM;
}
@@ -875,7 +875,7 @@ lstcon_batch_add(char *name)
if (!bat->bat_srv_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -ENOMEM;
}
@@ -883,7 +883,7 @@ lstcon_batch_add(char *name)
if (strlen(name) > sizeof(bat->bat_name) - 1) {
LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -E2BIG;
}
strncpy(bat->bat_name, name, sizeof(bat->bat_name));
@@ -911,7 +911,7 @@ lstcon_batch_add(char *name)
int
lstcon_batch_list(int index, int len, char __user *name_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
LASSERT(name_up);
LASSERT(index >= 0);
@@ -934,9 +934,9 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
lstcon_test_batch_ent_t *entp;
struct list_head *clilst;
struct list_head *srvlst;
- lstcon_test_t *test = NULL;
- lstcon_batch_t *bat;
- lstcon_ndlink_t *ndl;
+ struct lstcon_test *test = NULL;
+ struct lstcon_batch *bat;
+ struct lstcon_ndlink *ndl;
int rc;
rc = lstcon_batch_find(name, &bat);
@@ -977,7 +977,6 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
if (!test) {
entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
entp->u.tbe_batch.bae_state = bat->bat_state;
-
} else {
entp->u.tbe_test.tse_type = test->tes_type;
entp->u.tbe_test.tse_loop = test->tes_loop;
@@ -999,7 +998,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
}
static int
-lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
switch (transop) {
case LST_TRANS_TSBRUN:
@@ -1021,10 +1020,10 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_batch_op(lstcon_batch_t *bat, int transop,
+lstcon_batch_op(struct lstcon_batch *bat, int transop,
struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
@@ -1047,7 +1046,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop,
int
lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
if (lstcon_batch_find(name, &bat)) {
@@ -1069,7 +1068,7 @@ lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
int
lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
if (lstcon_batch_find(name, &bat)) {
@@ -1089,17 +1088,17 @@ lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
}
static void
-lstcon_batch_destroy(lstcon_batch_t *bat)
+lstcon_batch_destroy(struct lstcon_batch *bat)
{
- lstcon_ndlink_t *ndl;
- lstcon_test_t *test;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_test *test;
int i;
list_del(&bat->bat_link);
while (!list_empty(&bat->bat_test_list)) {
test = list_entry(bat->bat_test_list.next,
- lstcon_test_t, tes_link);
+ struct lstcon_test, tes_link);
LASSERT(list_empty(&test->tes_trans_list));
list_del(&test->tes_link);
@@ -1107,7 +1106,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
lstcon_group_decref(test->tes_src_grp);
lstcon_group_decref(test->tes_dst_grp);
- LIBCFS_FREE(test, offsetof(lstcon_test_t,
+ LIBCFS_FREE(test, offsetof(struct lstcon_test,
tes_param[test->tes_paramlen]));
}
@@ -1115,7 +1114,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_cli_list)) {
ndl = list_entry(bat->bat_cli_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1123,7 +1122,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_srv_list)) {
ndl = list_entry(bat->bat_srv_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1138,19 +1137,19 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
sizeof(struct list_head) * LST_NODE_HASHSIZE);
LIBCFS_FREE(bat->bat_srv_hash,
sizeof(struct list_head) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
}
static int
-lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
- lstcon_test_t *test;
- lstcon_batch_t *batch;
- lstcon_ndlink_t *ndl;
+ struct lstcon_test *test;
+ struct lstcon_batch *batch;
+ struct lstcon_ndlink *ndl;
struct list_head *hash;
struct list_head *head;
- test = (lstcon_test_t *)arg;
+ test = (struct lstcon_test *)arg;
LASSERT(test);
batch = test->tes_batch;
@@ -1186,10 +1185,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int transop;
int rc;
@@ -1237,7 +1236,7 @@ again:
}
static int
-lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
+lstcon_verify_batch(const char *name, struct lstcon_batch **batch)
{
int rc;
@@ -1256,10 +1255,10 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
}
static int
-lstcon_verify_group(const char *name, lstcon_group_t **grp)
+lstcon_verify_group(const char *name, struct lstcon_group **grp)
{
int rc;
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
rc = lstcon_group_find(name, grp);
if (rc) {
@@ -1284,11 +1283,11 @@ lstcon_test_add(char *batch_name, int type, int loop,
void *param, int paramlen, int *retp,
struct list_head __user *result_up)
{
- lstcon_test_t *test = NULL;
+ struct lstcon_test *test = NULL;
int rc;
- lstcon_group_t *src_grp = NULL;
- lstcon_group_t *dst_grp = NULL;
- lstcon_batch_t *batch = NULL;
+ struct lstcon_group *src_grp = NULL;
+ struct lstcon_group *dst_grp = NULL;
+ struct lstcon_batch *batch = NULL;
/*
* verify that a batch of the given name exists, and the groups
@@ -1310,7 +1309,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
if (dst_grp->grp_userland)
*retp = 1;
- LIBCFS_ALLOC(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+ LIBCFS_ALLOC(test, offsetof(struct lstcon_test, tes_param[paramlen]));
if (!test) {
CERROR("Can't allocate test descriptor\n");
rc = -ENOMEM;
@@ -1357,7 +1356,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
return rc;
out:
if (test)
- LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+ LIBCFS_FREE(test, offsetof(struct lstcon_test, tes_param[paramlen]));
if (dst_grp)
lstcon_group_decref(dst_grp);
@@ -1369,9 +1368,9 @@ out:
}
static int
-lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
{
- lstcon_test_t *test;
+ struct lstcon_test *test;
list_for_each_entry(test, &batch->bat_test_list, tes_link) {
if (idx == test->tes_hdr.tsb_index) {
@@ -1384,10 +1383,10 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
}
static int
-lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+ struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
LASSERT(transop == LST_TRANS_TSBCLIQRY ||
transop == LST_TRANS_TSBSRVQRY);
@@ -1404,12 +1403,12 @@ int
lstcon_test_batch_query(char *name, int testidx, int client,
int timeout, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
struct list_head *translist;
struct list_head *ndlist;
- lstcon_tsb_hdr_t *hdr;
- lstcon_batch_t *batch;
- lstcon_test_t *test = NULL;
+ struct lstcon_tsb_hdr *hdr;
+ struct lstcon_batch *batch;
+ struct lstcon_test *test = NULL;
int transop;
int rc;
@@ -1423,7 +1422,6 @@ lstcon_test_batch_query(char *name, int testidx, int client,
translist = &batch->bat_trans_list;
ndlist = &batch->bat_cli_list;
hdr = &batch->bat_hdr;
-
} else {
/* query specified test only */
rc = lstcon_test_find(batch, testidx, &test);
@@ -1448,7 +1446,8 @@ lstcon_test_batch_query(char *name, int testidx, int client,
lstcon_rpc_trans_postwait(trans, timeout);
- if (!testidx && /* query a batch, not a test */
+ /* query a batch, not a test */
+ if (!testidx &&
!lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
!lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
/* all RPCs finished, and no active test */
@@ -1463,10 +1462,10 @@ lstcon_test_batch_query(char *name, int testidx, int client,
}
static int
-lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_statrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+ struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
sfw_counters_t __user *sfwk_stat;
srpc_counters_t __user *srpc_stat;
lnet_counters_t __user *lnet_stat;
@@ -1491,7 +1490,7 @@ lstcon_ndlist_stat(struct list_head *ndlist,
int timeout, struct list_head __user *result_up)
{
struct list_head head;
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
INIT_LIST_HEAD(&head);
@@ -1516,7 +1515,7 @@ int
lstcon_group_stat(char *grp_name, int timeout,
struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(grp_name, &grp);
@@ -1536,8 +1535,8 @@ int
lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
int timeout, struct list_head __user *result_up)
{
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int i;
int rc;
@@ -1581,7 +1580,7 @@ lstcon_debug_ndlist(struct list_head *ndlist,
struct list_head *translist,
int timeout, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
@@ -1611,7 +1610,7 @@ int
lstcon_batch_debug(int timeout, char *name,
int client, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
rc = lstcon_batch_find(name, &bat);
@@ -1629,7 +1628,7 @@ int
lstcon_group_debug(int timeout, char *name,
struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -1649,8 +1648,8 @@ lstcon_nodes_debug(int timeout,
struct list_head __user *result_up)
{
lnet_process_id_t id;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *grp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *grp;
int i;
int rc;
@@ -1749,7 +1748,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
if (strlen(name) > sizeof(console_session.ses_name) - 1)
return -E2BIG;
- strncpy(console_session.ses_name, name,
+ strlcpy(console_session.ses_name, name,
sizeof(console_session.ses_name));
rc = lstcon_batch_add(LST_DEFAULT_BATCH);
@@ -1758,7 +1757,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
rc = lstcon_rpc_pinger_start();
if (rc) {
- lstcon_batch_t *bat = NULL;
+ struct lstcon_batch *bat = NULL;
lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
lstcon_batch_destroy(bat);
@@ -1782,7 +1781,7 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
char __user *name_up, int len)
{
lstcon_ndlist_ent_t *entp;
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
int rc = 0;
if (console_session.ses_state != LST_SESSION_ACTIVE)
@@ -1813,9 +1812,9 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
int
lstcon_session_end(void)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
- lstcon_batch_t *bat;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
+ struct lstcon_batch *bat;
int rc = 0;
LASSERT(console_session.ses_state == LST_SESSION_ACTIVE);
@@ -1849,7 +1848,7 @@ lstcon_session_end(void)
/* destroy all batches */
while (!list_empty(&console_session.ses_bat_list)) {
bat = list_entry(console_session.ses_bat_list.next,
- lstcon_batch_t, bat_link);
+ struct lstcon_batch, bat_link);
lstcon_batch_destroy(bat);
}
@@ -1857,7 +1856,7 @@ lstcon_session_end(void)
/* destroy all groups */
while (!list_empty(&console_session.ses_grp_list)) {
grp = list_entry(console_session.ses_grp_list.next,
- lstcon_group_t, grp_link);
+ struct lstcon_group, grp_link);
LASSERT(grp->grp_ref == 1);
lstcon_group_decref(grp);
@@ -1906,12 +1905,12 @@ lstcon_session_feats_check(unsigned feats)
static int
lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
{
- srpc_msg_t *rep = &rpc->srpc_replymsg;
- srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg;
- srpc_join_reqst_t *jreq = &req->msg_body.join_reqst;
- srpc_join_reply_t *jrep = &rep->msg_body.join_reply;
- lstcon_group_t *grp = NULL;
- lstcon_ndlink_t *ndl;
+ struct srpc_msg *rep = &rpc->srpc_replymsg;
+ struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_join_reqst *jreq = &req->msg_body.join_reqst;
+ struct srpc_join_reply *jrep = &rep->msg_body.join_reply;
+ struct lstcon_group *grp = NULL;
+ struct lstcon_ndlink *ndl;
int rc = 0;
sfw_unpack_message(req);
@@ -1987,7 +1986,8 @@ out:
return rc;
}
-static srpc_service_t lstcon_acceptor_service;
+static struct srpc_service lstcon_acceptor_service;
+
static void lstcon_init_acceptor_service(void)
{
/* initialize selftest console acceptor service table */
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 554f58244..becd22e41 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -50,22 +50,25 @@
#include "selftest.h"
#include "conrpc.h"
-typedef struct lstcon_node {
+/* node descriptor */
+struct lstcon_node {
lnet_process_id_t nd_id; /* id of the node */
int nd_ref; /* reference count */
int nd_state; /* state of the node */
int nd_timeout; /* session timeout */
unsigned long nd_stamp; /* timestamp of last replied RPC */
struct lstcon_rpc nd_ping; /* ping rpc */
-} lstcon_node_t; /* node descriptor */
+};
-typedef struct {
+/* node link descriptor */
+struct lstcon_ndlink {
struct list_head ndl_link; /* chain on list */
struct list_head ndl_hlink; /* chain on hash */
- lstcon_node_t *ndl_node; /* pointer to node */
-} lstcon_ndlink_t; /* node link descriptor */
+ struct lstcon_node *ndl_node; /* pointer to node */
+};
-typedef struct {
+/* (alias of nodes) group descriptor */
+struct lstcon_group {
struct list_head grp_link; /* chain on global group list
*/
int grp_ref; /* reference count */
@@ -76,18 +79,19 @@ typedef struct {
struct list_head grp_trans_list; /* transaction list */
struct list_head grp_ndl_list; /* nodes list */
struct list_head grp_ndl_hash[0]; /* hash table for nodes */
-} lstcon_group_t; /* (alias of nodes) group descriptor */
+};
#define LST_BATCH_IDLE 0xB0 /* idle batch */
#define LST_BATCH_RUNNING 0xB1 /* running batch */
-typedef struct lstcon_tsb_hdr {
+struct lstcon_tsb_hdr {
lst_bid_t tsb_id; /* batch ID */
int tsb_index; /* test index */
-} lstcon_tsb_hdr_t;
+};
-typedef struct {
- lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
+/* (tests ) batch descriptor */
+struct lstcon_batch {
+ struct lstcon_tsb_hdr bat_hdr; /* test_batch header */
struct list_head bat_link; /* chain on session's batches list */
int bat_ntest; /* # of test */
int bat_state; /* state of the batch */
@@ -95,20 +99,21 @@ typedef struct {
* for run, force for stop */
char bat_name[LST_NAME_SIZE];/* name of batch */
- struct list_head bat_test_list; /* list head of tests (lstcon_test_t)
+ struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
*/
struct list_head bat_trans_list; /* list head of transaction */
struct list_head bat_cli_list; /* list head of client nodes
- * (lstcon_node_t) */
+ * (struct lstcon_node) */
struct list_head *bat_cli_hash; /* hash table of client nodes */
struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /* (tests ) batch descriptor */
+};
-typedef struct lstcon_test {
- lstcon_tsb_hdr_t tes_hdr; /* test batch header */
+/* a single test descriptor */
+struct lstcon_test {
+ struct lstcon_tsb_hdr tes_hdr; /* test batch header */
struct list_head tes_link; /* chain on batch's tests list */
- lstcon_batch_t *tes_batch; /* pointer to batch */
+ struct lstcon_batch *tes_batch; /* pointer to batch */
int tes_type; /* type of the test, i.e: bulk, ping */
int tes_stop_onerr; /* stop on error */
@@ -120,12 +125,12 @@ typedef struct lstcon_test {
int tes_cliidx; /* client index, used for RPC creating */
struct list_head tes_trans_list; /* transaction list */
- lstcon_group_t *tes_src_grp; /* group run the test */
- lstcon_group_t *tes_dst_grp; /* target group */
+ struct lstcon_group *tes_src_grp; /* group run the test */
+ struct lstcon_group *tes_dst_grp; /* target group */
int tes_paramlen; /* test parameter length */
char tes_param[0]; /* test parameter */
-} lstcon_test_t; /* a single test descriptor */
+};
#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
@@ -152,7 +157,7 @@ struct lstcon_session {
unsigned ses_expired:1; /* console is timedout */
__u64 ses_id_cookie; /* batch id cookie */
char ses_name[LST_NAME_SIZE];/* session name */
- lstcon_rpc_trans_t *ses_ping; /* session pinger */
+ struct lstcon_rpc_trans *ses_ping; /* session pinger */
struct stt_timer ses_ping_timer; /* timer for pinger */
lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index e2c532399..30e4f71f1 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -109,19 +109,19 @@ static struct smoketest_framework {
struct list_head fw_tests; /* registered test cases */
atomic_t fw_nzombies; /* # zombie sessions */
spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
+ struct sfw_session *fw_session; /* _the_ session */
int fw_shuttingdown; /* shutdown in progress */
struct srpc_server_rpc *fw_active_srpc;/* running RPC */
} sfw_data;
/* forward ref's */
-int sfw_stop_batch(sfw_batch_t *tsb, int force);
-void sfw_destroy_session(sfw_session_t *sn);
+int sfw_stop_batch(struct sfw_batch *tsb, int force);
+void sfw_destroy_session(struct sfw_session *sn);
-static inline sfw_test_case_t *
+static inline struct sfw_test_case *
sfw_find_test_case(int id)
{
- sfw_test_case_t *tsc;
+ struct sfw_test_case *tsc;
LASSERT(id <= SRPC_SERVICE_MAX_ID);
LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -135,9 +135,9 @@ sfw_find_test_case(int id)
}
static int
-sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
+sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
{
- sfw_test_case_t *tsc;
+ struct sfw_test_case *tsc;
if (sfw_find_test_case(service->sv_id)) {
CERROR("Failed to register test %s (%d)\n",
@@ -145,7 +145,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
return -EEXIST;
}
- LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t));
+ LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case));
if (!tsc)
return -ENOMEM;
@@ -159,7 +159,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
static void
sfw_add_session_timer(void)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
struct stt_timer *timer = &sn->sn_timer;
LASSERT(!sfw_data.fw_shuttingdown);
@@ -177,7 +177,7 @@ sfw_add_session_timer(void)
static int
sfw_del_session_timer(void)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (!sn || !sn->sn_timer_active)
return 0;
@@ -196,10 +196,10 @@ static void
sfw_deactivate_session(void)
__must_hold(&sfw_data.fw_lock)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
int nactive = 0;
- sfw_batch_t *tsb;
- sfw_test_case_t *tsc;
+ struct sfw_batch *tsb;
+ struct sfw_test_case *tsc;
if (!sn)
return;
@@ -226,7 +226,7 @@ __must_hold(&sfw_data.fw_lock)
}
if (nactive)
- return; /* wait for active batches to stop */
+ return; /* wait for active batches to stop */
list_del_init(&sn->sn_list);
spin_unlock(&sfw_data.fw_lock);
@@ -239,7 +239,7 @@ __must_hold(&sfw_data.fw_lock)
static void
sfw_session_expired(void *data)
{
- sfw_session_t *sn = data;
+ struct sfw_session *sn = data;
spin_lock(&sfw_data.fw_lock);
@@ -257,12 +257,12 @@ sfw_session_expired(void *data)
}
static inline void
-sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
+sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
unsigned features, const char *name)
{
struct stt_timer *timer = &sn->sn_timer;
- memset(sn, 0, sizeof(sfw_session_t));
+ memset(sn, 0, sizeof(struct sfw_session));
INIT_LIST_HEAD(&sn->sn_list);
INIT_LIST_HEAD(&sn->sn_batches);
atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
@@ -298,7 +298,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
}
static void
-sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
+sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
{
LASSERT(!rpc->crpc_bulk.bk_niov);
LASSERT(list_empty(&rpc->crpc_list));
@@ -318,11 +318,11 @@ sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
spin_unlock(&sfw_data.fw_lock);
}
-static sfw_batch_t *
+static struct sfw_batch *
sfw_find_batch(lst_bid_t bid)
{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct sfw_batch *bat;
LASSERT(sn);
@@ -334,11 +334,11 @@ sfw_find_batch(lst_bid_t bid)
return NULL;
}
-static sfw_batch_t *
+static struct sfw_batch *
sfw_bid2batch(lst_bid_t bid)
{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct sfw_batch *bat;
LASSERT(sn);
@@ -346,7 +346,7 @@ sfw_bid2batch(lst_bid_t bid)
if (bat)
return bat;
- LIBCFS_ALLOC(bat, sizeof(sfw_batch_t));
+ LIBCFS_ALLOC(bat, sizeof(struct sfw_batch));
if (!bat)
return NULL;
@@ -361,11 +361,11 @@ sfw_bid2batch(lst_bid_t bid)
}
static int
-sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
+sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
sfw_counters_t *cnt = &reply->str_fw;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -402,10 +402,10 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
}
int
-sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
+sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_msg_t *msg = container_of(request, srpc_msg_t,
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct srpc_msg *msg = container_of(request, struct srpc_msg,
msg_body.mksn_reqst);
int cplen = 0;
@@ -438,7 +438,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
/*
* reject the request if it requires unknown features
* NB: old version will always accept all features because it's not
- * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
+ * aware of srpc_msg::msg_ses_feats, it's a defect but it's also
* harmless because it will return zero feature to console, and it's
* console's responsibility to make sure all nodes in a session have
* same feature mask.
@@ -449,7 +449,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
}
/* brand new or create by force */
- LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
+ LIBCFS_ALLOC(sn, sizeof(struct sfw_session));
if (!sn) {
CERROR("dropping RPC mksn under memory pressure\n");
return -ENOMEM;
@@ -473,9 +473,9 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
}
static int
-sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -505,9 +505,9 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
}
static int
-sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
+sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (!sn) {
reply->dbg_status = ESRCH;
@@ -526,10 +526,10 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
}
static void
-sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
+sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct sfw_test_unit *tsu = rpc->crpc_priv;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
/* Called with hold of tsi->tsi_lock */
LASSERT(list_empty(&rpc->crpc_list));
@@ -537,7 +537,7 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
}
static inline int
-sfw_test_buffers(sfw_test_instance_t *tsi)
+sfw_test_buffers(struct sfw_test_instance *tsi)
{
struct sfw_test_case *tsc;
struct srpc_service *svc;
@@ -614,10 +614,10 @@ sfw_unload_test(struct sfw_test_instance *tsi)
}
static void
-sfw_destroy_test_instance(sfw_test_instance_t *tsi)
+sfw_destroy_test_instance(struct sfw_test_instance *tsi)
{
- srpc_client_rpc_t *rpc;
- sfw_test_unit_t *tsu;
+ struct srpc_client_rpc *rpc;
+ struct sfw_test_unit *tsu;
if (!tsi->tsi_is_client)
goto clean;
@@ -630,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
while (!list_empty(&tsi->tsi_units)) {
tsu = list_entry(tsi->tsi_units.next,
- sfw_test_unit_t, tsu_list);
+ struct sfw_test_unit, tsu_list);
list_del(&tsu->tsu_list);
LIBCFS_FREE(tsu, sizeof(*tsu));
}
while (!list_empty(&tsi->tsi_free_rpcs)) {
rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
}
@@ -648,34 +648,34 @@ clean:
}
static void
-sfw_destroy_batch(sfw_batch_t *tsb)
+sfw_destroy_batch(struct sfw_batch *tsb)
{
- sfw_test_instance_t *tsi;
+ struct sfw_test_instance *tsi;
LASSERT(!sfw_batch_active(tsb));
LASSERT(list_empty(&tsb->bat_list));
while (!list_empty(&tsb->bat_tests)) {
tsi = list_entry(tsb->bat_tests.next,
- sfw_test_instance_t, tsi_list);
+ struct sfw_test_instance, tsi_list);
list_del_init(&tsi->tsi_list);
sfw_destroy_test_instance(tsi);
}
- LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
+ LIBCFS_FREE(tsb, sizeof(struct sfw_batch));
}
void
-sfw_destroy_session(sfw_session_t *sn)
+sfw_destroy_session(struct sfw_session *sn)
{
- sfw_batch_t *batch;
+ struct sfw_batch *batch;
LASSERT(list_empty(&sn->sn_list));
LASSERT(sn != sfw_data.fw_session);
while (!list_empty(&sn->sn_batches)) {
batch = list_entry(sn->sn_batches.next,
- sfw_batch_t, bat_list);
+ struct sfw_batch, bat_list);
list_del_init(&batch->bat_list);
sfw_destroy_batch(batch);
}
@@ -685,28 +685,28 @@ sfw_destroy_session(sfw_session_t *sn)
}
static void
-sfw_unpack_addtest_req(srpc_msg_t *msg)
+sfw_unpack_addtest_req(struct srpc_msg *msg)
{
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
LASSERT(req->tsr_is_client);
if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
+ return; /* no flipping needed */
LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
if (req->tsr_service == SRPC_SERVICE_BRW) {
if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
+ struct test_bulk_req *bulk = &req->tsr_u.bulk_v0;
__swab32s(&bulk->blk_opc);
__swab32s(&bulk->blk_npg);
__swab32s(&bulk->blk_flags);
} else {
- test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1;
+ struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1;
__swab16s(&bulk->blk_opc);
__swab16s(&bulk->blk_flags);
@@ -718,7 +718,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
}
if (req->tsr_service == SRPC_SERVICE_PING) {
- test_ping_req_t *ping = &req->tsr_u.ping;
+ struct test_ping_req *ping = &req->tsr_u.ping;
__swab32s(&ping->png_size);
__swab32s(&ping->png_flags);
@@ -729,14 +729,14 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
}
static int
-sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
+sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
{
- srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
- srpc_bulk_t *bk = rpc->srpc_bulk;
+ struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
+ struct srpc_bulk *bk = rpc->srpc_bulk;
int ndest = req->tsr_ndest;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
+ struct sfw_test_unit *tsu;
+ struct sfw_test_instance *tsi;
int i;
int rc;
@@ -789,13 +789,13 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
int j;
dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT(dests); /* my pages are within KVM always */
+ LASSERT(dests); /* my pages are within KVM always */
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
sfw_unpack_id(id);
for (j = 0; j < tsi->tsi_concur; j++) {
- LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t));
+ LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit));
if (!tsu) {
rc = -ENOMEM;
CERROR("Can't allocate tsu for %d\n",
@@ -824,11 +824,11 @@ error:
}
static void
-sfw_test_unit_done(sfw_test_unit_t *tsu)
+sfw_test_unit_done(struct sfw_test_unit *tsu)
{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_batch_t *tsb = tsi->tsi_batch;
- sfw_session_t *sn = tsb->bat_session;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_batch *tsb = tsi->tsi_batch;
+ struct sfw_session *sn = tsb->bat_session;
LASSERT(sfw_test_active(tsi));
@@ -844,8 +844,8 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
spin_lock(&sfw_data.fw_lock);
- if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
- sn == sfw_data.fw_session) { /* sn also active */
+ if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
+ sn == sfw_data.fw_session) { /* sn also active */
spin_unlock(&sfw_data.fw_lock);
return;
}
@@ -866,10 +866,10 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
}
static void
-sfw_test_rpc_done(srpc_client_rpc_t *rpc)
+sfw_test_rpc_done(struct srpc_client_rpc *rpc)
{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct sfw_test_unit *tsu = rpc->crpc_priv;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
int done = 0;
tsi->tsi_ops->tso_done_rpc(tsu, rpc);
@@ -900,19 +900,19 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
}
int
-sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
+sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
unsigned features, int nblk, int blklen,
- srpc_client_rpc_t **rpcpp)
+ struct srpc_client_rpc **rpcpp)
{
- srpc_client_rpc_t *rpc = NULL;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct srpc_client_rpc *rpc = NULL;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
spin_lock(&tsi->tsi_lock);
LASSERT(sfw_test_active(tsi));
/* pick request from buffer */
rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
if (rpc) {
LASSERT(nblk == rpc->crpc_bulk.bk_niov);
list_del_init(&rpc->crpc_list);
@@ -942,11 +942,11 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
}
static int
-sfw_run_test(swi_workitem_t *wi)
+sfw_run_test(struct swi_workitem *wi)
{
- sfw_test_unit_t *tsu = wi->swi_workitem.wi_data;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- srpc_client_rpc_t *rpc = NULL;
+ struct sfw_test_unit *tsu = wi->swi_workitem.wi_data;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct srpc_client_rpc *rpc = NULL;
LASSERT(wi == &tsu->tsu_worker);
@@ -991,11 +991,11 @@ test_done:
}
static int
-sfw_run_batch(sfw_batch_t *tsb)
+sfw_run_batch(struct sfw_batch *tsb)
{
- swi_workitem_t *wi;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
+ struct swi_workitem *wi;
+ struct sfw_test_unit *tsu;
+ struct sfw_test_instance *tsi;
if (sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
@@ -1026,10 +1026,10 @@ sfw_run_batch(sfw_batch_t *tsb)
}
int
-sfw_stop_batch(sfw_batch_t *tsb, int force)
+sfw_stop_batch(struct sfw_batch *tsb, int force)
{
- sfw_test_instance_t *tsi;
- srpc_client_rpc_t *rpc;
+ struct sfw_test_instance *tsi;
+ struct srpc_client_rpc *rpc;
if (!sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
@@ -1068,9 +1068,9 @@ sfw_stop_batch(sfw_batch_t *tsb, int force)
}
static int
-sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
{
- sfw_test_instance_t *tsi;
+ struct sfw_test_instance *tsi;
if (testidx < 0)
return -EINVAL;
@@ -1115,11 +1115,11 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
static int
sfw_add_test(struct srpc_server_rpc *rpc)
{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
- srpc_test_reqst_t *request;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
+ struct srpc_test_reqst *request;
int rc;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1183,11 +1183,11 @@ sfw_add_test(struct srpc_server_rpc *rpc)
}
static int
-sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
+sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
int rc = 0;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1227,8 +1227,8 @@ static int
sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reply = &rpc->srpc_replymsg;
- srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_msg *reply = &rpc->srpc_replymsg;
+ struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
unsigned features = LST_FEATS_MASK;
int rc = 0;
@@ -1244,7 +1244,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
/* Remove timer to avoid racing with it or expiring active session */
if (sfw_del_session_timer()) {
- CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
+ CERROR("dropping RPC %s from %s: racing with expiry timer\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer));
spin_unlock(&sfw_data.fw_lock);
return -EAGAIN;
@@ -1261,7 +1261,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
sv->sv_id != SRPC_SERVICE_DEBUG) {
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (sn &&
sn->sn_features != request->msg_ses_feats) {
@@ -1273,7 +1273,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
}
} else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
- /**
+ /*
* NB: at this point, old version will ignore features and
* create new session anyway, so console should be able
* to handle this
@@ -1377,12 +1377,12 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
return rc;
}
-srpc_client_rpc_t *
+struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
unsigned features, int nbulkiov, int bulklen,
- void (*done)(srpc_client_rpc_t *), void *priv)
+ void (*done)(struct srpc_client_rpc *), void *priv)
{
- srpc_client_rpc_t *rpc = NULL;
+ struct srpc_client_rpc *rpc = NULL;
spin_lock(&sfw_data.fw_lock);
@@ -1391,7 +1391,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
srpc_init_client_rpc(rpc, peer, service, 0, 0,
@@ -1415,7 +1415,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
}
void
-sfw_unpack_message(srpc_msg_t *msg)
+sfw_unpack_message(struct srpc_msg *msg)
{
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
@@ -1424,7 +1424,7 @@ sfw_unpack_message(srpc_msg_t *msg)
LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
if (msg->msg_type == SRPC_MSG_STAT_REQST) {
- srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst;
+ struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst;
__swab32s(&req->str_type);
__swab64s(&req->str_rpyid);
@@ -1433,7 +1433,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+ struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
__swab32s(&rep->str_status);
sfw_unpack_sid(rep->str_sid);
@@ -1444,7 +1444,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
- srpc_mksn_reqst_t *req = &msg->msg_body.mksn_reqst;
+ struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst;
__swab64s(&req->mksn_rpyid);
__swab32s(&req->mksn_force);
@@ -1453,7 +1453,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
- srpc_mksn_reply_t *rep = &msg->msg_body.mksn_reply;
+ struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply;
__swab32s(&rep->mksn_status);
__swab32s(&rep->mksn_timeout);
@@ -1462,7 +1462,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
- srpc_rmsn_reqst_t *req = &msg->msg_body.rmsn_reqst;
+ struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst;
__swab64s(&req->rmsn_rpyid);
sfw_unpack_sid(req->rmsn_sid);
@@ -1470,7 +1470,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
- srpc_rmsn_reply_t *rep = &msg->msg_body.rmsn_reply;
+ struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply;
__swab32s(&rep->rmsn_status);
sfw_unpack_sid(rep->rmsn_sid);
@@ -1478,7 +1478,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
- srpc_debug_reqst_t *req = &msg->msg_body.dbg_reqst;
+ struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst;
__swab64s(&req->dbg_rpyid);
__swab32s(&req->dbg_flags);
@@ -1487,7 +1487,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
- srpc_debug_reply_t *rep = &msg->msg_body.dbg_reply;
+ struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply;
__swab32s(&rep->dbg_nbatch);
__swab32s(&rep->dbg_timeout);
@@ -1496,7 +1496,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
- srpc_batch_reqst_t *req = &msg->msg_body.bat_reqst;
+ struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst;
__swab32s(&req->bar_opc);
__swab64s(&req->bar_rpyid);
@@ -1508,7 +1508,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+ struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
__swab32s(&rep->bar_status);
sfw_unpack_sid(rep->bar_sid);
@@ -1516,7 +1516,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_TEST_REQST) {
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
__swab64s(&req->tsr_rpyid);
__swab64s(&req->tsr_bulkid);
@@ -1530,7 +1530,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
- srpc_test_reply_t *rep = &msg->msg_body.tes_reply;
+ struct srpc_test_reply *rep = &msg->msg_body.tes_reply;
__swab32s(&rep->tsr_status);
sfw_unpack_sid(rep->tsr_sid);
@@ -1538,7 +1538,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
- srpc_join_reqst_t *req = &msg->msg_body.join_reqst;
+ struct srpc_join_reqst *req = &msg->msg_body.join_reqst;
__swab64s(&req->join_rpyid);
sfw_unpack_sid(req->join_sid);
@@ -1546,7 +1546,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
- srpc_join_reply_t *rep = &msg->msg_body.join_reply;
+ struct srpc_join_reply *rep = &msg->msg_body.join_reply;
__swab32s(&rep->join_status);
__swab32s(&rep->join_timeout);
@@ -1558,7 +1558,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
void
-sfw_abort_rpc(srpc_client_rpc_t *rpc)
+sfw_abort_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -1569,7 +1569,7 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
}
void
-sfw_post_rpc(srpc_client_rpc_t *rpc)
+sfw_post_rpc(struct srpc_client_rpc *rpc)
{
spin_lock(&rpc->crpc_lock);
@@ -1584,7 +1584,7 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
spin_unlock(&rpc->crpc_lock);
}
-static srpc_service_t sfw_services[] = {
+static struct srpc_service sfw_services[] = {
{
/* sv_id */ SRPC_SERVICE_DEBUG,
/* sv_name */ "debug",
@@ -1628,8 +1628,8 @@ sfw_startup(void)
int i;
int rc;
int error;
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
+ struct srpc_service *sv;
+ struct sfw_test_case *tsc;
if (session_timeout < 0) {
CERROR("Session timeout must be non-negative: %d\n",
@@ -1721,8 +1721,8 @@ sfw_startup(void)
void
sfw_shutdown(void)
{
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
+ struct srpc_service *sv;
+ struct sfw_test_case *tsc;
int i;
spin_lock(&sfw_data.fw_lock);
@@ -1759,10 +1759,10 @@ sfw_shutdown(void)
}
while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
- srpc_client_rpc_t *rpc;
+ struct srpc_client_rpc *rpc;
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
@@ -1778,7 +1778,7 @@ sfw_shutdown(void)
while (!list_empty(&sfw_data.fw_tests)) {
tsc = list_entry(sfw_data.fw_tests.next,
- sfw_test_case_t, tsc_list);
+ struct sfw_test_case, tsc_list);
srpc_wait_service_shutdown(tsc->tsc_srv_service);
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 81a45045e..ad26fe9dd 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -56,9 +56,9 @@ struct lst_ping_data {
static struct lst_ping_data lst_ping_data;
static int
-ping_client_init(sfw_test_instance_t *tsi)
+ping_client_init(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
LASSERT(tsi->tsi_is_client);
LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
@@ -70,9 +70,9 @@ ping_client_init(sfw_test_instance_t *tsi)
}
static void
-ping_client_fini(sfw_test_instance_t *tsi)
+ping_client_fini(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
int errors;
LASSERT(sn);
@@ -86,12 +86,12 @@ ping_client_fini(sfw_test_instance_t *tsi)
}
static int
-ping_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpc)
+ping_client_prep_rpc(struct sfw_test_unit *tsu, lnet_process_id_t dest,
+ struct srpc_client_rpc **rpc)
{
- srpc_ping_reqst_t *req;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct srpc_ping_reqst *req;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
struct timespec64 ts;
int rc;
@@ -118,18 +118,18 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
}
static void
-ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
- srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
+ struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
struct timespec64 ts;
LASSERT(sn);
if (rpc->crpc_status) {
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_ping_errors);
CERROR("Unable to ping %s (%d): %d\n",
libcfs_id2str(rpc->crpc_dest),
@@ -171,10 +171,10 @@ static int
ping_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst;
- srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
+ struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_msg *replymsg = &rpc->srpc_replymsg;
+ struct srpc_ping_reqst *req = &reqstmsg->msg_body.ping_reqst;
+ struct srpc_ping_reply *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
LASSERT(sv->sv_id == SRPC_SERVICE_PING);
@@ -210,7 +210,8 @@ ping_server_handle(struct srpc_server_rpc *rpc)
return 0;
}
-sfw_test_client_ops_t ping_test_client;
+struct sfw_test_client_ops ping_test_client;
+
void ping_init_test_client(void)
{
ping_test_client.tso_init = ping_client_init;
@@ -219,7 +220,8 @@ void ping_init_test_client(void)
ping_test_client.tso_done_rpc = ping_client_done_rpc;
}
-srpc_service_t ping_test_service;
+struct srpc_service ping_test_service;
+
void ping_init_test_service(void)
{
ping_test_service.sv_id = SRPC_SERVICE_PING;
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 7d7748d96..3c45a7cfa 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -46,19 +46,19 @@
#include "selftest.h"
-typedef enum {
+enum srpc_state {
SRPC_STATE_NONE,
SRPC_STATE_NI_INIT,
SRPC_STATE_EQ_INIT,
SRPC_STATE_RUNNING,
SRPC_STATE_STOPPING,
-} srpc_state_t;
+};
static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
- srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+ struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
- srpc_state_t rpc_state;
+ enum srpc_state rpc_state;
srpc_counters_t rpc_counters;
__u64 rpc_matchbits; /* matchbits counter */
} srpc_data;
@@ -71,7 +71,7 @@ srpc_serv_portal(int svc_id)
}
/* forward ref's */
-int srpc_handle_rpc(swi_workitem_t *wi);
+int srpc_handle_rpc(struct swi_workitem *wi);
void srpc_get_counters(srpc_counters_t *cnt)
{
@@ -88,7 +88,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
}
static int
-srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
+srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
{
nob = min_t(int, nob, PAGE_SIZE);
@@ -102,7 +102,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
}
void
-srpc_free_bulk(srpc_bulk_t *bk)
+srpc_free_bulk(struct srpc_bulk *bk)
{
int i;
struct page *pg;
@@ -117,25 +117,25 @@ srpc_free_bulk(srpc_bulk_t *bk)
__free_page(pg);
}
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
+ LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
}
-srpc_bulk_t *
+struct srpc_bulk *
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
{
- srpc_bulk_t *bk;
+ struct srpc_bulk *bk;
int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
- offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
if (!bk) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL;
}
- memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
@@ -256,7 +256,7 @@ srpc_service_init(struct srpc_service *svc)
svc->sv_shuttingdown = 0;
svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct srpc_service_cd));
+ sizeof(*svc->sv_cpt_data));
if (!svc->sv_cpt_data)
return -ENOMEM;
@@ -338,7 +338,7 @@ srpc_add_service(struct srpc_service *sv)
}
int
-srpc_remove_service(srpc_service_t *sv)
+srpc_remove_service(struct srpc_service *sv)
{
int id = sv->sv_id;
@@ -357,7 +357,7 @@ srpc_remove_service(srpc_service_t *sv)
static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
int len, int options, lnet_process_id_t peer,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
int rc;
lnet_md_t md;
@@ -396,7 +396,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
int options, lnet_process_id_t peer, lnet_nid_t self,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
int rc;
lnet_md_t md;
@@ -449,7 +449,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
lnet_process_id_t any = { 0 };
@@ -697,7 +697,7 @@ srpc_finish_service(struct srpc_service *sv)
/* called with sv->sv_lock held */
static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
@@ -755,11 +755,11 @@ srpc_abort_service(struct srpc_service *sv)
}
void
-srpc_shutdown_service(srpc_service_t *sv)
+srpc_shutdown_service(struct srpc_service *sv)
{
struct srpc_service_cd *scd;
struct srpc_server_rpc *rpc;
- srpc_buffer_t *buf;
+ struct srpc_buffer *buf;
int i;
CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
@@ -792,9 +792,9 @@ srpc_shutdown_service(srpc_service_t *sv)
}
static int
-srpc_send_request(srpc_client_rpc_t *rpc)
+srpc_send_request(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_reqstev;
+ struct srpc_event *ev = &rpc->crpc_reqstev;
int rc;
ev->ev_fired = 0;
@@ -803,7 +803,7 @@ srpc_send_request(srpc_client_rpc_t *rpc)
rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
rpc->crpc_service, &rpc->crpc_reqstmsg,
- sizeof(srpc_msg_t), LNET_MD_OP_PUT,
+ sizeof(struct srpc_msg), LNET_MD_OP_PUT,
rpc->crpc_dest, LNET_NID_ANY,
&rpc->crpc_reqstmdh, ev);
if (rc) {
@@ -814,9 +814,9 @@ srpc_send_request(srpc_client_rpc_t *rpc)
}
static int
-srpc_prepare_reply(srpc_client_rpc_t *rpc)
+srpc_prepare_reply(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_replyev;
+ struct srpc_event *ev = &rpc->crpc_replyev;
__u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
int rc;
@@ -827,7 +827,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
*id = srpc_next_id();
rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &rpc->crpc_replymsg, sizeof(srpc_msg_t),
+ &rpc->crpc_replymsg,
+ sizeof(struct srpc_msg),
LNET_MD_OP_PUT, rpc->crpc_dest,
&rpc->crpc_replymdh, ev);
if (rc) {
@@ -838,10 +839,10 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
}
static int
-srpc_prepare_bulk(srpc_client_rpc_t *rpc)
+srpc_prepare_bulk(struct srpc_client_rpc *rpc)
{
- srpc_bulk_t *bk = &rpc->crpc_bulk;
- srpc_event_t *ev = &rpc->crpc_bulkev;
+ struct srpc_bulk *bk = &rpc->crpc_bulk;
+ struct srpc_event *ev = &rpc->crpc_bulkev;
__u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
int rc;
int opt;
@@ -873,8 +874,8 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc)
static int
srpc_do_bulk(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
- srpc_bulk_t *bk = rpc->srpc_bulk;
+ struct srpc_event *ev = &rpc->srpc_ev;
+ struct srpc_bulk *bk = rpc->srpc_bulk;
__u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
int rc;
int opt;
@@ -903,7 +904,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_buffer_t *buffer;
+ struct srpc_buffer *buffer;
LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
@@ -948,7 +949,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
buffer = list_entry(scd->scd_buf_blocked.next,
- srpc_buffer_t, buf_list);
+ struct srpc_buffer, buf_list);
list_del(&buffer->buf_list);
srpc_init_server_rpc(rpc, scd, buffer);
@@ -963,12 +964,12 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
/* handles an incoming RPC */
int
-srpc_handle_rpc(swi_workitem_t *wi)
+srpc_handle_rpc(struct swi_workitem *wi)
{
struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
int rc = 0;
LASSERT(wi == &rpc->srpc_wi);
@@ -995,8 +996,8 @@ srpc_handle_rpc(swi_workitem_t *wi)
default:
LBUG();
case SWI_STATE_NEWBORN: {
- srpc_msg_t *msg;
- srpc_generic_reply_t *reply;
+ struct srpc_msg *msg;
+ struct srpc_generic_reply *reply;
msg = &rpc->srpc_reqstbuf->buf_msg;
reply = &rpc->srpc_replymsg.msg_body.reply;
@@ -1077,7 +1078,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
static void
srpc_client_rpc_expired(void *data)
{
- srpc_client_rpc_t *rpc = data;
+ struct srpc_client_rpc *rpc = data;
CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
@@ -1096,7 +1097,7 @@ srpc_client_rpc_expired(void *data)
}
static void
-srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
+srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
{
struct stt_timer *timer = &rpc->crpc_timer;
@@ -1117,7 +1118,7 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
* running on any CPU.
*/
static void
-srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
+srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
{
/* timer not planted or already exploded */
if (!rpc->crpc_timeout)
@@ -1138,9 +1139,9 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
}
static void
-srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
+srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
{
- swi_workitem_t *wi = &rpc->crpc_wi;
+ struct swi_workitem *wi = &rpc->crpc_wi;
LASSERT(status || wi->swi_state == SWI_STATE_DONE);
@@ -1175,11 +1176,11 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
/* sends an outgoing RPC */
int
-srpc_send_rpc(swi_workitem_t *wi)
+srpc_send_rpc(struct swi_workitem *wi)
{
int rc = 0;
- srpc_client_rpc_t *rpc;
- srpc_msg_t *reply;
+ struct srpc_client_rpc *rpc;
+ struct srpc_msg *reply;
int do_bulk;
LASSERT(wi);
@@ -1237,7 +1238,7 @@ srpc_send_rpc(swi_workitem_t *wi)
wi->swi_state = SWI_STATE_REQUEST_SENT;
/* perhaps more events, fall thru */
case SWI_STATE_REQUEST_SENT: {
- srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
+ enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service);
if (!rpc->crpc_replyev.ev_fired)
break;
@@ -1308,15 +1309,15 @@ abort:
return 0;
}
-srpc_client_rpc_t *
+struct srpc_client_rpc *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
{
- srpc_client_rpc_t *rpc;
+ struct srpc_client_rpc *rpc;
- LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
+ LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
crpc_bulk.bk_iovs[nbulkiov]));
if (!rpc)
return NULL;
@@ -1328,12 +1329,12 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
/* called with rpc->crpc_lock held */
void
-srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
+srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
{
LASSERT(why);
- if (rpc->crpc_aborted || /* already aborted */
- rpc->crpc_closed) /* callback imminent */
+ if (rpc->crpc_aborted || /* already aborted */
+ rpc->crpc_closed) /* callback imminent */
return;
CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
@@ -1347,7 +1348,7 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
/* called with rpc->crpc_lock held */
void
-srpc_post_rpc(srpc_client_rpc_t *rpc)
+srpc_post_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(!rpc->crpc_aborted);
LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
@@ -1363,7 +1364,7 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
int
srpc_send_reply(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
struct srpc_msg *msg = &rpc->srpc_replymsg;
struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
struct srpc_service_cd *scd = rpc->srpc_scd;
@@ -1401,7 +1402,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
rpc->srpc_peer, rpc->srpc_self,
&rpc->srpc_replymdh, ev);
if (rc)
- ev->ev_fired = 1; /* no more event expected */
+ ev->ev_fired = 1; /* no more event expected */
return rc;
}
@@ -1410,13 +1411,13 @@ static void
srpc_lnet_ev_handler(lnet_event_t *ev)
{
struct srpc_service_cd *scd;
- srpc_event_t *rpcev = ev->md.user_ptr;
- srpc_client_rpc_t *crpc;
+ struct srpc_event *rpcev = ev->md.user_ptr;
+ struct srpc_client_rpc *crpc;
struct srpc_server_rpc *srpc;
- srpc_buffer_t *buffer;
- srpc_service_t *sv;
- srpc_msg_t *msg;
- srpc_msg_type_t type;
+ struct srpc_buffer *buffer;
+ struct srpc_service *sv;
+ struct srpc_msg *msg;
+ enum srpc_msg_type type;
LASSERT(!in_interrupt());
@@ -1486,7 +1487,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
LASSERT(ev->type != LNET_EVENT_UNLINK ||
sv->sv_shuttingdown);
- buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
+ buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg);
buffer->buf_peer = ev->initiator;
buffer->buf_self = ev->target.nid;
@@ -1509,7 +1510,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
scd->scd_buf_err = 0;
}
- if (!scd->scd_buf_err && /* adding buffer is enabled */
+ if (!scd->scd_buf_err && /* adding buffer is enabled */
!scd->scd_buf_adjust &&
scd->scd_buf_nposted < scd->scd_buf_low) {
scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
@@ -1663,7 +1664,7 @@ srpc_shutdown(void)
spin_lock(&srpc_data.rpc_glock);
for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
- srpc_service_t *sv = srpc_data.rpc_services[i];
+ struct srpc_service *sv = srpc_data.rpc_services[i];
LASSERTF(!sv, "service not empty: id %d, name %s\n",
i, sv->sv_name);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index a79c315f2..c9b904cad 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -44,7 +44,7 @@
*
* XXX: *REPLY == *REQST + 1
*/
-typedef enum {
+enum srpc_msg_type {
SRPC_MSG_MKSN_REQST = 0,
SRPC_MSG_MKSN_REPLY = 1,
SRPC_MSG_RMSN_REQST = 2,
@@ -63,7 +63,7 @@ typedef enum {
SRPC_MSG_PING_REPLY = 15,
SRPC_MSG_JOIN_REQST = 16,
SRPC_MSG_JOIN_REPLY = 17,
-} srpc_msg_type_t;
+};
/* CAVEAT EMPTOR:
* All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
@@ -72,122 +72,122 @@ typedef enum {
* All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field
* session id if needed.
*/
-typedef struct {
+struct srpc_generic_reqst {
__u64 rpyid; /* reply buffer matchbits */
__u64 bulkid; /* bulk buffer matchbits */
-} WIRE_ATTR srpc_generic_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_generic_reply {
__u32 status;
lst_sid_t sid;
-} WIRE_ATTR srpc_generic_reply_t;
+} WIRE_ATTR;
/* FRAMEWORK RPCs */
-typedef struct {
+struct srpc_mksn_reqst {
__u64 mksn_rpyid; /* reply buffer matchbits */
lst_sid_t mksn_sid; /* session id */
__u32 mksn_force; /* use brute force */
char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */
+} WIRE_ATTR; /* make session request */
-typedef struct {
+struct srpc_mksn_reply {
__u32 mksn_status; /* session status */
lst_sid_t mksn_sid; /* session id */
__u32 mksn_timeout; /* session timeout */
char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */
+} WIRE_ATTR; /* make session reply */
-typedef struct {
+struct srpc_rmsn_reqst {
__u64 rmsn_rpyid; /* reply buffer matchbits */
lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */
+} WIRE_ATTR; /* remove session request */
-typedef struct {
+struct srpc_rmsn_reply {
__u32 rmsn_status;
lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */
+} WIRE_ATTR; /* remove session reply */
-typedef struct {
+struct srpc_join_reqst {
__u64 join_rpyid; /* reply buffer matchbits */
lst_sid_t join_sid; /* session id to join */
char join_group[LST_NAME_SIZE]; /* group name */
-} WIRE_ATTR srpc_join_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_join_reply {
__u32 join_status; /* returned status */
lst_sid_t join_sid; /* session id */
__u32 join_timeout; /* # seconds' inactivity to
* expire */
char join_session[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_join_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_debug_reqst {
__u64 dbg_rpyid; /* reply buffer matchbits */
lst_sid_t dbg_sid; /* session id */
__u32 dbg_flags; /* bitmap of debug */
-} WIRE_ATTR srpc_debug_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_debug_reply {
__u32 dbg_status; /* returned code */
lst_sid_t dbg_sid; /* session id */
__u32 dbg_timeout; /* session timeout */
__u32 dbg_nbatch; /* # of batches in the node */
char dbg_name[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_debug_reply_t;
+} WIRE_ATTR;
#define SRPC_BATCH_OPC_RUN 1
#define SRPC_BATCH_OPC_STOP 2
#define SRPC_BATCH_OPC_QUERY 3
-typedef struct {
+struct srpc_batch_reqst {
__u64 bar_rpyid; /* reply buffer matchbits */
lst_sid_t bar_sid; /* session id */
lst_bid_t bar_bid; /* batch id */
__u32 bar_opc; /* create/start/stop batch */
__u32 bar_testidx; /* index of test */
__u32 bar_arg; /* parameters */
-} WIRE_ATTR srpc_batch_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_batch_reply {
__u32 bar_status; /* status of request */
lst_sid_t bar_sid; /* session id */
__u32 bar_active; /* # of active tests in batch/test */
__u32 bar_time; /* remained time */
-} WIRE_ATTR srpc_batch_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_stat_reqst {
__u64 str_rpyid; /* reply buffer matchbits */
lst_sid_t str_sid; /* session id */
__u32 str_type; /* type of stat */
-} WIRE_ATTR srpc_stat_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_stat_reply {
__u32 str_status;
lst_sid_t str_sid;
sfw_counters_t str_fw;
srpc_counters_t str_rpc;
lnet_counters_t str_lnet;
-} WIRE_ATTR srpc_stat_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_bulk_req {
__u32 blk_opc; /* bulk operation code */
__u32 blk_npg; /* # of pages */
__u32 blk_flags; /* reserved flags */
-} WIRE_ATTR test_bulk_req_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_bulk_req_v1 {
__u16 blk_opc; /* bulk operation code */
__u16 blk_flags; /* data check flags */
__u32 blk_len; /* data length */
__u32 blk_offset; /* reserved: offset */
-} WIRE_ATTR test_bulk_req_v1_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_ping_req {
__u32 png_size; /* size of ping message */
__u32 png_flags; /* reserved flags */
-} WIRE_ATTR test_ping_req_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_test_reqst {
__u64 tsr_rpyid; /* reply buffer matchbits */
__u64 tsr_bulkid; /* bulk buffer matchbits */
lst_sid_t tsr_sid; /* session id */
@@ -201,82 +201,82 @@ typedef struct {
__u32 tsr_ndest; /* # of dest nodes */
union {
- test_ping_req_t ping;
- test_bulk_req_t bulk_v0;
- test_bulk_req_v1_t bulk_v1;
- } tsr_u;
-} WIRE_ATTR srpc_test_reqst_t;
+ struct test_ping_req ping;
+ struct test_bulk_req bulk_v0;
+ struct test_bulk_req_v1 bulk_v1;
+ } tsr_u;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_test_reply {
__u32 tsr_status; /* returned code */
lst_sid_t tsr_sid;
-} WIRE_ATTR srpc_test_reply_t;
+} WIRE_ATTR;
/* TEST RPCs */
-typedef struct {
+struct srpc_ping_reqst {
__u64 pnr_rpyid;
__u32 pnr_magic;
__u32 pnr_seq;
__u64 pnr_time_sec;
__u64 pnr_time_usec;
-} WIRE_ATTR srpc_ping_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_ping_reply {
__u32 pnr_status;
__u32 pnr_magic;
__u32 pnr_seq;
-} WIRE_ATTR srpc_ping_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_brw_reqst {
__u64 brw_rpyid; /* reply buffer matchbits */
__u64 brw_bulkid; /* bulk buffer matchbits */
__u32 brw_rw; /* read or write */
__u32 brw_len; /* bulk data len */
__u32 brw_flags; /* bulk data patterns */
-} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */
+} WIRE_ATTR; /* bulk r/w request */
-typedef struct {
+struct srpc_brw_reply {
__u32 brw_status;
-} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */
+} WIRE_ATTR; /* bulk r/w reply */
#define SRPC_MSG_MAGIC 0xeeb0f00d
#define SRPC_MSG_VERSION 1
-typedef struct srpc_msg {
+struct srpc_msg {
__u32 msg_magic; /* magic number */
__u32 msg_version; /* message version number */
- __u32 msg_type; /* type of message body: srpc_msg_type_t */
+ __u32 msg_type; /* type of message body: srpc_msg_type */
__u32 msg_reserved0;
__u32 msg_reserved1;
__u32 msg_ses_feats; /* test session features */
union {
- srpc_generic_reqst_t reqst;
- srpc_generic_reply_t reply;
-
- srpc_mksn_reqst_t mksn_reqst;
- srpc_mksn_reply_t mksn_reply;
- srpc_rmsn_reqst_t rmsn_reqst;
- srpc_rmsn_reply_t rmsn_reply;
- srpc_debug_reqst_t dbg_reqst;
- srpc_debug_reply_t dbg_reply;
- srpc_batch_reqst_t bat_reqst;
- srpc_batch_reply_t bat_reply;
- srpc_stat_reqst_t stat_reqst;
- srpc_stat_reply_t stat_reply;
- srpc_test_reqst_t tes_reqst;
- srpc_test_reply_t tes_reply;
- srpc_join_reqst_t join_reqst;
- srpc_join_reply_t join_reply;
-
- srpc_ping_reqst_t ping_reqst;
- srpc_ping_reply_t ping_reply;
- srpc_brw_reqst_t brw_reqst;
- srpc_brw_reply_t brw_reply;
+ struct srpc_generic_reqst reqst;
+ struct srpc_generic_reply reply;
+
+ struct srpc_mksn_reqst mksn_reqst;
+ struct srpc_mksn_reply mksn_reply;
+ struct srpc_rmsn_reqst rmsn_reqst;
+ struct srpc_rmsn_reply rmsn_reply;
+ struct srpc_debug_reqst dbg_reqst;
+ struct srpc_debug_reply dbg_reply;
+ struct srpc_batch_reqst bat_reqst;
+ struct srpc_batch_reply bat_reply;
+ struct srpc_stat_reqst stat_reqst;
+ struct srpc_stat_reply stat_reply;
+ struct srpc_test_reqst tes_reqst;
+ struct srpc_test_reply tes_reply;
+ struct srpc_join_reqst join_reqst;
+ struct srpc_join_reply join_reply;
+
+ struct srpc_ping_reqst ping_reqst;
+ struct srpc_ping_reply ping_reply;
+ struct srpc_brw_reqst brw_reqst;
+ struct srpc_brw_reply brw_reply;
} msg_body;
-} WIRE_ATTR srpc_msg_t;
+} WIRE_ATTR;
static inline void
-srpc_unpack_msg_hdr(srpc_msg_t *msg)
+srpc_unpack_msg_hdr(struct srpc_msg *msg)
{
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index e689ca184..4eac1c9e6 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -93,7 +93,7 @@ struct sfw_test_instance;
/* all reply/bulk RDMAs go to this portal */
#define SRPC_RDMA_PORTAL 52
-static inline srpc_msg_type_t
+static inline enum srpc_msg_type
srpc_service2request(int service)
{
switch (service) {
@@ -128,13 +128,13 @@ srpc_service2request(int service)
}
}
-static inline srpc_msg_type_t
+static inline enum srpc_msg_type
srpc_service2reply(int service)
{
return srpc_service2request(service) + 1;
}
-typedef enum {
+enum srpc_event_type {
SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
* received */
SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
@@ -143,57 +143,58 @@ typedef enum {
SRPC_REPLY_SENT = 5, /* outgoing reply sent */
SRPC_REQUEST_RCVD = 6, /* incoming request received */
SRPC_REQUEST_SENT = 7, /* outgoing request sent */
-} srpc_event_type_t;
+};
/* RPC event */
-typedef struct {
- srpc_event_type_t ev_type; /* what's up */
+struct srpc_event {
+ enum srpc_event_type ev_type; /* what's up */
lnet_event_kind_t ev_lnet; /* LNet event type */
int ev_fired; /* LNet event fired? */
int ev_status; /* LNet event status */
void *ev_data; /* owning server/client RPC */
-} srpc_event_t;
+};
-typedef struct {
+/* bulk descriptor */
+struct srpc_bulk {
int bk_len; /* len of bulk data */
lnet_handle_md_t bk_mdh;
int bk_sink; /* sink/source */
int bk_niov; /* # iov in bk_iovs */
lnet_kiov_t bk_iovs[0];
-} srpc_bulk_t; /* bulk descriptor */
+};
/* message buffer descriptor */
-typedef struct srpc_buffer {
+struct srpc_buffer {
struct list_head buf_list; /* chain on srpc_service::*_msgq */
- srpc_msg_t buf_msg;
+ struct srpc_msg buf_msg;
lnet_handle_md_t buf_mdh;
lnet_nid_t buf_self;
lnet_process_id_t buf_peer;
-} srpc_buffer_t;
+};
struct swi_workitem;
typedef int (*swi_action_t) (struct swi_workitem *);
-typedef struct swi_workitem {
+struct swi_workitem {
struct cfs_wi_sched *swi_sched;
- cfs_workitem_t swi_workitem;
+ struct cfs_workitem swi_workitem;
swi_action_t swi_action;
int swi_state;
-} swi_workitem_t;
+};
/* server-side state of a RPC */
struct srpc_server_rpc {
/* chain on srpc_service::*_rpcq */
struct list_head srpc_list;
struct srpc_service_cd *srpc_scd;
- swi_workitem_t srpc_wi;
- srpc_event_t srpc_ev; /* bulk/reply event */
+ struct swi_workitem srpc_wi;
+ struct srpc_event srpc_ev; /* bulk/reply event */
lnet_nid_t srpc_self;
lnet_process_id_t srpc_peer;
- srpc_msg_t srpc_replymsg;
+ struct srpc_msg srpc_replymsg;
lnet_handle_md_t srpc_replymdh;
- srpc_buffer_t *srpc_reqstbuf;
- srpc_bulk_t *srpc_bulk;
+ struct srpc_buffer *srpc_reqstbuf;
+ struct srpc_bulk *srpc_bulk;
unsigned int srpc_aborted; /* being given up */
int srpc_status;
@@ -201,14 +202,14 @@ struct srpc_server_rpc {
};
/* client-side state of a RPC */
-typedef struct srpc_client_rpc {
+struct srpc_client_rpc {
struct list_head crpc_list; /* chain on user's lists */
spinlock_t crpc_lock; /* serialize */
int crpc_service;
atomic_t crpc_refcount;
int crpc_timeout; /* # seconds to wait for reply */
struct stt_timer crpc_timer;
- swi_workitem_t crpc_wi;
+ struct swi_workitem crpc_wi;
lnet_process_id_t crpc_dest;
void (*crpc_done)(struct srpc_client_rpc *);
@@ -221,20 +222,20 @@ typedef struct srpc_client_rpc {
unsigned int crpc_closed:1; /* completed */
/* RPC events */
- srpc_event_t crpc_bulkev; /* bulk event */
- srpc_event_t crpc_reqstev; /* request event */
- srpc_event_t crpc_replyev; /* reply event */
+ struct srpc_event crpc_bulkev; /* bulk event */
+ struct srpc_event crpc_reqstev; /* request event */
+ struct srpc_event crpc_replyev; /* reply event */
/* bulk, request(reqst), and reply exchanged on wire */
- srpc_msg_t crpc_reqstmsg;
- srpc_msg_t crpc_replymsg;
+ struct srpc_msg crpc_reqstmsg;
+ struct srpc_msg crpc_replymsg;
lnet_handle_md_t crpc_reqstmdh;
lnet_handle_md_t crpc_replymdh;
- srpc_bulk_t crpc_bulk;
-} srpc_client_rpc_t;
+ struct srpc_bulk crpc_bulk;
+};
#define srpc_client_rpc_size(rpc) \
-offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
+offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
#define srpc_client_rpc_addref(rpc) \
do { \
@@ -266,13 +267,13 @@ struct srpc_service_cd {
/** backref to service */
struct srpc_service *scd_svc;
/** event buffer */
- srpc_event_t scd_ev;
+ struct srpc_event scd_ev;
/** free RPC descriptors */
struct list_head scd_rpc_free;
/** in-flight RPCs */
struct list_head scd_rpc_active;
/** workitem for posting buffer */
- swi_workitem_t scd_buf_wi;
+ struct swi_workitem scd_buf_wi;
/** CPT id */
int scd_cpt;
/** error code for scd_buf_wi */
@@ -306,7 +307,7 @@ struct srpc_service_cd {
#define SFW_FRWK_WI_MIN 16
#define SFW_FRWK_WI_MAX 256
-typedef struct srpc_service {
+struct srpc_service {
int sv_id; /* service id */
const char *sv_name; /* human readable name */
int sv_wi_total; /* total server workitems */
@@ -320,9 +321,9 @@ typedef struct srpc_service {
*/
int (*sv_handler)(struct srpc_server_rpc *);
int (*sv_bulk_ready)(struct srpc_server_rpc *, int);
-} srpc_service_t;
+};
-typedef struct {
+struct sfw_session {
struct list_head sn_list; /* chain on fw_zombie_sessions */
lst_sid_t sn_id; /* unique identifier */
unsigned int sn_timeout; /* # seconds' inactivity to expire */
@@ -335,37 +336,37 @@ typedef struct {
atomic_t sn_brw_errors;
atomic_t sn_ping_errors;
unsigned long sn_started;
-} sfw_session_t;
+};
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
(sid0).ses_stamp == (sid1).ses_stamp)
-typedef struct {
+struct sfw_batch {
struct list_head bat_list; /* chain on sn_batches */
lst_bid_t bat_id; /* batch id */
int bat_error; /* error code of batch */
- sfw_session_t *bat_session; /* batch's session */
+ struct sfw_session *bat_session; /* batch's session */
atomic_t bat_nactive; /* # of active tests */
struct list_head bat_tests; /* test instances */
-} sfw_batch_t;
+};
-typedef struct {
+struct sfw_test_client_ops {
int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
* client */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
* client */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
- srpc_client_rpc_t **rpc); /* prep a tests rpc */
+ struct srpc_client_rpc **rpc); /* prep a tests rpc */
void (*tso_done_rpc)(struct sfw_test_unit *tsu,
- srpc_client_rpc_t *rpc); /* done a test rpc */
-} sfw_test_client_ops_t;
+ struct srpc_client_rpc *rpc); /* done a test rpc */
+};
-typedef struct sfw_test_instance {
+struct sfw_test_instance {
struct list_head tsi_list; /* chain on batch */
int tsi_service; /* test type */
- sfw_batch_t *tsi_batch; /* batch */
- sfw_test_client_ops_t *tsi_ops; /* test client operation
+ struct sfw_batch *tsi_batch; /* batch */
+ struct sfw_test_client_ops *tsi_ops; /* test client operation
*/
/* public parameter for all test units */
@@ -384,11 +385,11 @@ typedef struct sfw_test_instance {
struct list_head tsi_active_rpcs; /* active rpcs */
union {
- test_ping_req_t ping; /* ping parameter */
- test_bulk_req_t bulk_v0; /* bulk parameter */
- test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */
+ struct test_ping_req ping; /* ping parameter */
+ struct test_bulk_req bulk_v0; /* bulk parameter */
+ struct test_bulk_req_v1 bulk_v1; /* bulk v1 parameter */
} tsi_u;
-} sfw_test_instance_t;
+};
/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
* pages are not used */
@@ -397,57 +398,58 @@ typedef struct sfw_test_instance {
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
-typedef struct sfw_test_unit {
+struct sfw_test_unit {
struct list_head tsu_list; /* chain on lst_test_instance */
lnet_process_id_t tsu_dest; /* id of dest node */
int tsu_loop; /* loop count of the test */
- sfw_test_instance_t *tsu_instance; /* pointer to test instance */
+ struct sfw_test_instance *tsu_instance; /* pointer to test instance */
void *tsu_private; /* private data */
- swi_workitem_t tsu_worker; /* workitem of the test unit */
-} sfw_test_unit_t;
+ struct swi_workitem tsu_worker; /* workitem of the test unit */
+};
-typedef struct sfw_test_case {
+struct sfw_test_case {
struct list_head tsc_list; /* chain on fw_tests */
- srpc_service_t *tsc_srv_service; /* test service */
- sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
-} sfw_test_case_t;
+ struct srpc_service *tsc_srv_service; /* test service */
+ struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */
+};
-srpc_client_rpc_t *
+struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
unsigned features, int nbulkiov, int bulklen,
- void (*done)(srpc_client_rpc_t *), void *priv);
-int sfw_create_test_rpc(sfw_test_unit_t *tsu,
+ void (*done)(struct srpc_client_rpc *), void *priv);
+int sfw_create_test_rpc(struct sfw_test_unit *tsu,
lnet_process_id_t peer, unsigned features,
- int nblk, int blklen, srpc_client_rpc_t **rpc);
-void sfw_abort_rpc(srpc_client_rpc_t *rpc);
-void sfw_post_rpc(srpc_client_rpc_t *rpc);
-void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
-void sfw_unpack_message(srpc_msg_t *msg);
+ int nblk, int blklen, struct srpc_client_rpc **rpc);
+void sfw_abort_rpc(struct srpc_client_rpc *rpc);
+void sfw_post_rpc(struct srpc_client_rpc *rpc);
+void sfw_client_rpc_done(struct srpc_client_rpc *rpc);
+void sfw_unpack_message(struct srpc_msg *msg);
void sfw_free_pages(struct srpc_server_rpc *rpc);
-void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
+void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i);
int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
int sink);
-int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+int sfw_make_session(struct srpc_mksn_reqst *request,
+ struct srpc_mksn_reply *reply);
-srpc_client_rpc_t *
+struct srpc_client_rpc *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
-void srpc_post_rpc(srpc_client_rpc_t *rpc);
-void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
-void srpc_free_bulk(srpc_bulk_t *bk);
-srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len,
- int sink);
-int srpc_send_rpc(swi_workitem_t *wi);
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv);
+void srpc_post_rpc(struct srpc_client_rpc *rpc);
+void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
+void srpc_free_bulk(struct srpc_bulk *bk);
+struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
+ unsigned bulk_len, int sink);
+int srpc_send_rpc(struct swi_workitem *wi);
int srpc_send_reply(struct srpc_server_rpc *rpc);
-int srpc_add_service(srpc_service_t *sv);
-int srpc_remove_service(srpc_service_t *sv);
-void srpc_shutdown_service(srpc_service_t *sv);
-void srpc_abort_service(srpc_service_t *sv);
-int srpc_finish_service(srpc_service_t *sv);
-int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
-void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
+int srpc_add_service(struct srpc_service *sv);
+int srpc_remove_service(struct srpc_service *sv);
+void srpc_shutdown_service(struct srpc_service *sv);
+void srpc_abort_service(struct srpc_service *sv);
+int srpc_finish_service(struct srpc_service *sv);
+int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer);
+void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer);
void srpc_get_counters(srpc_counters_t *cnt);
void srpc_set_counters(const srpc_counters_t *cnt);
@@ -461,15 +463,17 @@ srpc_serv_is_framework(struct srpc_service *svc)
}
static inline int
-swi_wi_action(cfs_workitem_t *wi)
+swi_wi_action(struct cfs_workitem *wi)
{
- swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem);
+ struct swi_workitem *swi;
+
+ swi = container_of(wi, struct swi_workitem, swi_workitem);
return swi->swi_action(swi);
}
static inline void
-swi_init_workitem(swi_workitem_t *swi, void *data,
+swi_init_workitem(struct swi_workitem *swi, void *data,
swi_action_t action, struct cfs_wi_sched *sched)
{
swi->swi_sched = sched;
@@ -479,19 +483,19 @@ swi_init_workitem(swi_workitem_t *swi, void *data,
}
static inline void
-swi_schedule_workitem(swi_workitem_t *wi)
+swi_schedule_workitem(struct swi_workitem *wi)
{
cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
}
static inline void
-swi_exit_workitem(swi_workitem_t *swi)
+swi_exit_workitem(struct swi_workitem *swi)
{
cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
}
static inline int
-swi_deschedule_workitem(swi_workitem_t *swi)
+swi_deschedule_workitem(struct swi_workitem *swi)
{
return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
}
@@ -502,7 +506,7 @@ void sfw_shutdown(void);
void srpc_shutdown(void);
static inline void
-srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
+srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(rpc);
LASSERT(!srpc_event_pending(rpc));
@@ -515,14 +519,14 @@ srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
}
static inline void
-srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer,
int service, int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
{
LASSERT(nbulkiov <= LNET_MAX_IOV);
- memset(rpc, 0, offsetof(srpc_client_rpc_t,
+ memset(rpc, 0, offsetof(struct srpc_client_rpc,
crpc_bulk.bk_iovs[nbulkiov]));
INIT_LIST_HEAD(&rpc->crpc_list);
@@ -592,7 +596,7 @@ do { \
} while (0)
static inline void
-srpc_wait_service_shutdown(srpc_service_t *sv)
+srpc_wait_service_shutdown(struct srpc_service *sv)
{
int i = 2;
@@ -607,16 +611,16 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
}
}
-extern sfw_test_client_ops_t brw_test_client;
+extern struct sfw_test_client_ops brw_test_client;
void brw_init_test_client(void);
-extern srpc_service_t brw_test_service;
+extern struct srpc_service brw_test_service;
void brw_init_test_service(void);
-extern sfw_test_client_ops_t ping_test_client;
+extern struct sfw_test_client_ops ping_test_client;
void ping_init_test_client(void);
-extern srpc_service_t ping_test_service;
+extern struct srpc_service ping_test_service;
void ping_init_test_service(void);
#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 8be52526a..b6c4aae00 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -49,7 +49,7 @@
* sorted by increasing expiry time. The number of slots is 2**7 (128),
* to cover a time period of 1024 seconds into the future before wrapping.
*/
-#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
+#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
#define STTIMER_NSLOTS (1 << 7)
@@ -170,20 +170,22 @@ stt_check_timers(unsigned long *last)
static int
stt_timer_main(void *arg)
{
+ int rc = 0;
+
cfs_block_allsigs();
while (!stt_data.stt_shuttingdown) {
stt_check_timers(&stt_data.stt_prev_slot);
- wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME));
+ rc = wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
}
spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads--;
spin_unlock(&stt_data.stt_lock);
- return 0;
+ return rc;
}
static int
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 39269c3c5..3a4df6264 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -66,6 +66,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
unsigned int debug_mask;
int rc;
+ LASSERT(exp && !IS_ERR(exp));
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
LUSTRE_MDS_VERSION, SEQ_QUERY);
if (!req)
@@ -101,19 +102,22 @@ static int seq_client_rpc(struct lu_client_seq *seq,
req->rq_no_delay = req->rq_no_resend = 1;
debug_mask = D_CONSOLE;
} else {
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
+ req->rq_reply_portal = MDC_REPLY_PORTAL;
req->rq_request_portal = SEQ_METADATA_PORTAL;
- else
+ } else {
+ req->rq_reply_portal = OSC_REPLY_PORTAL;
req->rq_request_portal = SEQ_DATA_PORTAL;
+ }
debug_mask = D_INFO;
}
ptlrpc_at_set_req_timeout(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
rc = ptlrpc_queue_wait(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 062f388cf..5a04e99d9 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -178,8 +178,9 @@ restart_fixup:
if (n_range->lsr_end <= c_range->lsr_end) {
*n_range = *c_range;
fld_cache_entry_delete(cache, f_curr);
- } else
+ } else {
n_range->lsr_start = c_range->lsr_end;
+ }
}
/* we could have overlap over next
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index e8a3caf20..75d6a4863 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -101,12 +101,6 @@ struct fld_cache {
unsigned int fci_no_shrink:1;
};
-enum fld_op {
- FLD_CREATE = 0,
- FLD_DELETE = 1,
- FLD_LOOKUP = 2
-};
-
enum {
/* 4M of FLD cache will not hurt client a lot. */
FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
@@ -126,7 +120,8 @@ enum {
extern struct lu_fld_hash fld_hash[];
int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op);
+ struct lu_seq_range *range, __u32 fld_op,
+ struct ptlrpc_request **reqp);
extern struct lprocfs_vars fld_client_debugfs_list[];
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index a3d122d85..304c0ec26 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -64,9 +64,9 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
};
@@ -75,15 +75,15 @@ static void fld_enter_request(struct client_obd *cli)
struct mdc_cache_waiter mcw;
struct l_wait_info lwi = { 0 };
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
} else {
cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
}
@@ -92,10 +92,9 @@ static void fld_exit_request(struct client_obd *cli)
struct list_head *l, *tmp;
struct mdc_cache_waiter *mcw;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
/* No free request slots anymore */
break;
@@ -106,7 +105,7 @@ static void fld_exit_request(struct client_obd *cli)
cli->cl_r_in_flight++;
wake_up(&mcw->mcw_waitq);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
@@ -392,55 +391,82 @@ void fld_client_fini(struct lu_client_fld *fld)
EXPORT_SYMBOL(fld_client_fini);
int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op)
+ struct lu_seq_range *range, __u32 fld_op,
+ struct ptlrpc_request **reqp)
{
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
struct lu_seq_range *prange;
__u32 *op;
- int rc;
+ int rc = 0;
struct obd_import *imp;
LASSERT(exp);
imp = class_exp2cliimp(exp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
- FLD_QUERY);
- if (!req)
- return -ENOMEM;
-
- op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
- *op = fld_op;
+ switch (fld_op) {
+ case FLD_QUERY:
+ req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY,
+ LUSTRE_MDS_VERSION, FLD_QUERY);
+ if (!req)
+ return -ENOMEM;
+
+ /*
+ * XXX: only needed when talking to old server(< 2.6), it should
+ * be removed when < 2.6 server is not supported
+ */
+ op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
+ *op = FLD_LOOKUP;
+
+ if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
+ req->rq_allow_replay = 1;
+ break;
+ case FLD_READ:
+ req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_READ,
+ LUSTRE_MDS_VERSION, FLD_READ);
+ if (!req)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA,
+ RCL_SERVER, PAGE_SIZE);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return rc;
prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD);
*prange = *range;
-
ptlrpc_request_set_replen(req);
req->rq_request_portal = FLD_REQUEST_PORTAL;
req->rq_reply_portal = MDC_REPLY_PORTAL;
ptlrpc_at_set_req_timeout(req);
- if (fld_op == FLD_LOOKUP &&
- imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
- req->rq_allow_replay = 1;
-
- if (fld_op != FLD_LOOKUP)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
fld_enter_request(&exp->exp_obd->u.cli);
rc = ptlrpc_queue_wait(req);
fld_exit_request(&exp->exp_obd->u.cli);
- if (fld_op != FLD_LOOKUP)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
- prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
- if (!prange) {
- rc = -EFAULT;
- goto out_req;
+ if (fld_op == FLD_QUERY) {
+ prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
+ if (!prange) {
+ rc = -EFAULT;
+ goto out_req;
+ }
+ *range = *prange;
}
- *range = *prange;
+
out_req:
- ptlrpc_req_finished(req);
+ if (rc || !reqp) {
+ ptlrpc_req_finished(req);
+ req = NULL;
+ }
+
+ if (reqp)
+ *reqp = req;
+
return rc;
}
@@ -468,7 +494,7 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
res.lsr_start = seq;
fld_range_set_type(&res, flags);
- rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP);
+ rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL);
if (rc == 0) {
*mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index fb971ded5..d4c33dd11 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -82,7 +82,6 @@
* - i_mutex
* - PG_locked
* - cl_object_header::coh_page_guard
- * - cl_object_header::coh_lock_guard
* - lu_site::ls_guard
*
* See the top comment in cl_object.c for the description of overall locking and
@@ -98,9 +97,12 @@
* super-class definitions.
*/
#include "lu_object.h"
+#include <linux/atomic.h>
#include "linux/lustre_compat25.h"
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
struct inode;
@@ -138,7 +140,7 @@ struct cl_device_operations {
* cl_req_slice_add().
*
* \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see ccc_req_init()
+ * \see vvp_req_init()
*/
int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
struct cl_req *req);
@@ -147,7 +149,7 @@ struct cl_device_operations {
/**
* Device in the client stack.
*
- * \see ccc_device, lov_device, lovsub_device, osc_device
+ * \see vvp_device, lov_device, lovsub_device, osc_device
*/
struct cl_device {
/** Super-class. */
@@ -243,7 +245,7 @@ enum cl_attr_valid {
* be discarded from the memory, all its sub-objects are torn-down and
* destroyed too.
*
- * \see ccc_object, lov_object, lovsub_object, osc_object
+ * \see vvp_object, lov_object, lovsub_object, osc_object
*/
struct cl_object {
/** super class */
@@ -322,7 +324,7 @@ struct cl_object_operations {
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
@@ -383,11 +385,17 @@ struct cl_object_operations {
* object. Layers are supposed to fill parts of \a lvb that will be
* shipped to the glimpse originator as a glimpse result.
*
- * \see ccc_object_glimpse(), lovsub_object_glimpse(),
+ * \see vvp_object_glimpse(), lovsub_object_glimpse(),
* \see osc_object_glimpse()
*/
int (*coo_glimpse)(const struct lu_env *env,
const struct cl_object *obj, struct ost_lvb *lvb);
+ /**
+ * Object prune method. Called when the layout is going to change on
+ * this object, therefore each layer has to clean up their cache,
+ * mainly pages and locks.
+ */
+ int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
};
/**
@@ -398,22 +406,6 @@ struct cl_object_header {
* here.
*/
struct lu_object_header coh_lu;
- /** \name locks
- * \todo XXX move locks below to the separate cache-lines, they are
- * mostly useless otherwise.
- */
- /** @{ */
- /** Lock protecting page tree. */
- spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
- /** @} locks */
- /** Radix tree of cl_page's, cached for this object. */
- struct radix_tree_root coh_tree;
- /** # of pages in radix tree. */
- unsigned long coh_pages;
- /** List of cl_lock's granted for this object. */
- struct list_head coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
@@ -460,10 +452,6 @@ struct cl_object_header {
co_lu.lo_linkage)
/** @} cl_object */
-#ifndef pgoff_t
-#define pgoff_t unsigned long
-#endif
-
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/** \addtogroup cl_page cl_page
@@ -727,16 +715,10 @@ struct cl_page {
atomic_t cp_ref;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
- /** Logical page index within the object. Immutable after creation. */
- pgoff_t cp_index;
/** List of slices. Immutable after creation. */
struct list_head cp_layers;
- /** Parent page, NULL for top-level page. Immutable after creation. */
- struct cl_page *cp_parent;
- /** Lower-layer page. NULL for bottommost page. Immutable after
- * creation.
- */
- struct cl_page *cp_child;
+ /** vmpage */
+ struct page *cp_vmpage;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
@@ -787,10 +769,11 @@ struct cl_page {
/**
* Per-layer part of cl_page.
*
- * \see ccc_page, lov_page, osc_page
+ * \see vvp_page, lov_page, osc_page
*/
struct cl_page_slice {
struct cl_page *cpl_page;
+ pgoff_t cpl_index;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
@@ -804,16 +787,9 @@ struct cl_page_slice {
/**
* Lock mode. For the client extent locks.
*
- * \warning: cl_lock_mode_match() assumes particular ordering here.
* \ingroup cl_lock
*/
enum cl_lock_mode {
- /**
- * Mode of a lock that protects no data, and exists only as a
- * placeholder. This is used for `glimpse' requests. A phantom lock
- * might get promoted to real lock at some point.
- */
- CLM_PHANTOM,
CLM_READ,
CLM_WRITE,
CLM_GROUP
@@ -846,11 +822,6 @@ struct cl_page_operations {
*/
/**
- * \return the underlying VM page. Optional.
- */
- struct page *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
* Called when \a io acquires this page into the exclusive
* ownership. When this method returns, it is guaranteed that the is
* not owned by other io, and no transfer is going on against
@@ -897,14 +868,6 @@ struct cl_page_operations {
void (*cpo_export)(const struct lu_env *env,
const struct cl_page_slice *slice, int uptodate);
/**
- * Unmaps page from the user space (if it is mapped).
- *
- * \see cl_page_unmap()
- * \see vvp_page_unmap()
- */
- int (*cpo_unmap)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
* Checks whether underlying VM page is locked (in the suitable
* sense). Used for assertions.
*
@@ -957,7 +920,7 @@ struct cl_page_operations {
*/
int (*cpo_is_under_lock)(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *io);
+ struct cl_io *io, pgoff_t *max);
/**
* Optional debugging helper. Prints given page slice.
@@ -1027,26 +990,6 @@ struct cl_page_operations {
*/
int (*cpo_make_ready)(const struct lu_env *env,
const struct cl_page_slice *slice);
- /**
- * Announce that this page is to be written out
- * opportunistically, that is, page is dirty, it is not
- * necessary to start write-out transfer right now, but
- * eventually page has to be written out.
- *
- * Main caller of this is the write path (see
- * vvp_io_commit_write()), using this method to build a
- * "transfer cache" from which large transfers are then
- * constructed by the req-formation engine.
- *
- * \todo XXX it would make sense to add page-age tracking
- * semantics here, and to oblige the req-formation engine to
- * send the page out not later than it is too old.
- *
- * \see cl_page_cache_add()
- */
- int (*cpo_cache_add)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
} io[CRT_NR];
/**
* Tell transfer engine that only [to, from] part of a page should be
@@ -1098,9 +1041,8 @@ struct cl_page_operations {
*/
#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -1111,9 +1053,8 @@ do { \
*/
#define CL_PAGE_HEADER(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -1130,6 +1071,12 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
#define cl_page_in_use(pg) __page_in_use(pg, 1)
#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+static inline struct page *cl_page_vmpage(struct cl_page *page)
+{
+ LASSERT(page->cp_vmpage);
+ return page->cp_vmpage;
+}
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
@@ -1150,12 +1097,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
* cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
*
- * All locks for a given object are linked into cl_object_header::coh_locks
- * list (protected by cl_object_header::coh_lock_guard spin-lock) through
- * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can
- * sort it in starting lock offset, or use altogether different data structure
- * like a tree.
- *
* Typical cl_lock consists of the two layers:
*
* - vvp_lock (vvp specific data), and
@@ -1177,111 +1118,29 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
*
* LIFE CYCLE
*
- * cl_lock is reference counted. When reference counter drops to 0, lock is
- * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
- * lock is destroyed when last reference is released. Referencing between
- * top-lock and its sub-locks is described in the lov documentation module.
- *
- * STATE MACHINE
- *
- * Also, cl_lock is a state machine. This requires some clarification. One of
- * the goals of client IO re-write was to make IO path non-blocking, or at
- * least to make it easier to make it non-blocking in the future. Here
- * `non-blocking' means that when a system call (read, write, truncate)
- * reaches a situation where it has to wait for a communication with the
- * server, it should --instead of waiting-- remember its current state and
- * switch to some other work. E.g,. instead of waiting for a lock enqueue,
- * client should proceed doing IO on the next stripe, etc. Obviously this is
- * rather radical redesign, and it is not planned to be fully implemented at
- * this time, instead we are putting some infrastructure in place, that would
- * make it easier to do asynchronous non-blocking IO easier in the
- * future. Specifically, where old locking code goes to sleep (waiting for
- * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
- * enqueue reply comes, its completion handler signals that lock state-machine
- * is ready to transit to the next state. There is some generic code in
- * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
- * this cl_lock.c code, it looks like locking is done in normal blocking
- * fashion, and it the same time it is possible to switch to the non-blocking
- * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
- * functions).
- *
- * For a description of state machine states and transitions see enum
- * cl_lock_state.
- *
- * There are two ways to restrict a set of states which lock might move to:
- *
- * - placing a "hold" on a lock guarantees that lock will not be moved
- * into cl_lock_state::CLS_FREEING state until hold is released. Hold
- * can be only acquired on a lock that is not in
- * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
- * cl_lock::cll_holds. Hold protects lock from cancellation and
- * destruction. Requests to cancel and destroy a lock on hold will be
- * recorded, but only honored when last hold on a lock is released;
- *
- * - placing a "user" on a lock guarantees that lock will not leave
- * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
- * states, once it enters this set. That is, if a user is added onto a
- * lock in a state not from this set, it doesn't immediately enforce
- * lock to move to this set, but once lock enters this set it will
- * remain there until all users are removed. Lock users are counted in
- * cl_lock::cll_users.
- *
- * User is used to assure that lock is not canceled or destroyed while
- * it is being enqueued, or actively used by some IO.
- *
- * Currently, a user always comes with a hold (cl_lock_invariant()
- * checks that a number of holds is not less than a number of users).
- *
- * CONCURRENCY
- *
- * This is how lock state-machine operates. struct cl_lock contains a mutex
- * cl_lock::cll_guard that protects struct fields.
- *
- * - mutex is taken, and cl_lock::cll_state is examined.
- *
- * - for every state there are possible target states where lock can move
- * into. They are tried in order. Attempts to move into next state are
- * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
- *
- * - if the transition can be performed immediately, state is changed,
- * and mutex is released.
- *
- * - if the transition requires blocking, _try() function returns
- * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
- * sleep, waiting for possibility of lock state change. It is woken
- * up when some event occurs, that makes lock state change possible
- * (e.g., the reception of the reply from the server), and repeats
- * the loop.
- *
- * Top-lock and sub-lock has separate mutexes and the latter has to be taken
- * first to avoid dead-lock.
- *
- * To see an example of interaction of all these issues, take a look at the
- * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
- * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
- * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
- * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
- * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
- * done in parallel, rather than one after another (this is used for glimpse
- * locks, that cannot dead-lock).
+ * cl_lock is a cacheless data container for the requirements of locks to
+ * complete the IO. cl_lock is created before I/O starts and destroyed when the
+ * I/O is complete.
+ *
+ * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
+ * to cl_lock at OSC layer. LDLM lock is still cacheable.
*
* INTERFACE AND USAGE
*
- * struct cl_lock_operations provide a number of call-backs that are invoked
- * when events of interest occurs. Layers can intercept and handle glimpse,
- * blocking, cancel ASTs and a reception of the reply from the server.
+ * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
+ * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
+ * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
+ * consists of multiple sub cl_locks, each sub locks will be enqueued
+ * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
+ * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
+ * OST side.
*
- * One important difference with the old client locking model is that new
- * client has a representation for the top-lock, whereas in the old code only
- * sub-locks existed as real data structures and file-level locks are
- * represented by "request sets" that are created and destroyed on each and
- * every lock creation.
+ * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
+ * method will be called for each layer to release the resource held by this
+ * lock. At OSC layer, the reference count of LDLM lock, which is held at
+ * clo_enqueue time, is released.
*
- * Top-locks are cached, and can be found in the cache by the system calls. It
- * is possible that top-lock is in cache, but some of its sub-locks were
- * canceled and destroyed. In that case top-lock has to be enqueued again
- * before it can be used.
+ * LDLM lock can only be canceled if there is no cl_lock using it.
*
* Overall process of the locking during IO operation is as following:
*
@@ -1294,7 +1153,7 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
*
* - when all locks are acquired, IO is performed;
*
- * - locks are released into cache.
+ * - locks are released after IO is complete.
*
* Striping introduces major additional complexity into locking. The
* fundamental problem is that it is generally unsafe to actively use (hold)
@@ -1316,16 +1175,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* buf is a part of memory mapped Lustre file, a lock or locks protecting buf
* has to be held together with the usual lock on [offset, offset + count].
*
- * As multi-stripe locks have to be allowed, it makes sense to cache them, so
- * that, for example, a sequence of O_APPEND writes can proceed quickly
- * without going down to the individual stripes to do lock matching. On the
- * other hand, multi-stripe locks shouldn't be used by normal read/write
- * calls. To achieve this, every layer can implement ->clo_fits_into() method,
- * that is called by lock matching code (cl_lock_lookup()), and that can be
- * used to selectively disable matching of certain locks for certain IOs. For
- * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
- * locks to be matched only for truncates and O_APPEND writes.
- *
* Interaction with DLM
*
* In the expected setup, cl_lock is ultimately backed up by a collection of
@@ -1356,295 +1205,27 @@ struct cl_lock_descr {
__u32 cld_enq_flags;
};
-#define DDESCR "%s(%d):[%lu, %lu]"
+#define DDESCR "%s(%d):[%lu, %lu]:%x"
#define PDESCR(descr) \
cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
- (descr)->cld_start, (descr)->cld_end
+ (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
const char *cl_lock_mode_name(const enum cl_lock_mode mode);
/**
- * Lock state-machine states.
- *
- * \htmlonly
- * <pre>
- *
- * Possible state transitions:
- *
- * +------------------>NEW
- * | |
- * | | cl_enqueue_try()
- * | |
- * | cl_unuse_try() V
- * | +--------------QUEUING (*)
- * | | |
- * | | | cl_enqueue_try()
- * | | |
- * | | cl_unuse_try() V
- * sub-lock | +-------------ENQUEUED (*)
- * canceled | | |
- * | | | cl_wait_try()
- * | | |
- * | | (R)
- * | | |
- * | | V
- * | | HELD<---------+
- * | | | |
- * | | | | cl_use_try()
- * | | cl_unuse_try() | |
- * | | | |
- * | | V ---+
- * | +------------>INTRANSIT (D) <--+
- * | | |
- * | cl_unuse_try() | | cached lock found
- * | | | cl_use_try()
- * | | |
- * | V |
- * +------------------CACHED---------+
- * |
- * (C)
- * |
- * V
- * FREEING
- *
- * Legend:
- *
- * In states marked with (*) transition to the same state (i.e., a loop
- * in the diagram) is possible.
- *
- * (R) is the point where Receive call-back is invoked: it allows layers
- * to handle arrival of lock reply.
- *
- * (C) is the point where Cancellation call-back is invoked.
- *
- * (D) is the transit state which means the lock is changing.
- *
- * Transition to FREEING state is possible from any other state in the
- * diagram in case of unrecoverable error.
- * </pre>
- * \endhtmlonly
- *
- * These states are for individual cl_lock object. Top-lock and its sub-locks
- * can be in the different states. Another way to say this is that we have
- * nested state-machines.
- *
- * Separate QUEUING and ENQUEUED states are needed to support non-blocking
- * operation for locks with multiple sub-locks. Imagine lock on a file F, that
- * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send
- * enqueue to S0, wait for its completion, then send enqueue for S1, wait for
- * its completion and at last enqueue lock for S2, and wait for its
- * completion. In that case, top-lock is in QUEUING state while S0, S1 are
- * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
- * that in this case, sub-locks move from state to state, and top-lock remains
- * in the same state).
- */
-enum cl_lock_state {
- /**
- * Lock that wasn't yet enqueued
- */
- CLS_NEW,
- /**
- * Enqueue is in progress, blocking for some intermediate interaction
- * with the other side.
- */
- CLS_QUEUING,
- /**
- * Lock is fully enqueued, waiting for server to reply when it is
- * granted.
- */
- CLS_ENQUEUED,
- /**
- * Lock granted, actively used by some IO.
- */
- CLS_HELD,
- /**
- * This state is used to mark the lock is being used, or unused.
- * We need this state because the lock may have several sublocks,
- * so it's impossible to have an atomic way to bring all sublocks
- * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
- * at unuse case.
- * If a thread is referring to a lock, and it sees the lock is in this
- * state, it must wait for the lock.
- * See state diagram for details.
- */
- CLS_INTRANSIT,
- /**
- * Lock granted, not used.
- */
- CLS_CACHED,
- /**
- * Lock is being destroyed.
- */
- CLS_FREEING,
- CLS_NR
-};
-
-enum cl_lock_flags {
- /**
- * lock has been cancelled. This flag is never cleared once set (by
- * cl_lock_cancel0()).
- */
- CLF_CANCELLED = 1 << 0,
- /** cancellation is pending for this lock. */
- CLF_CANCELPEND = 1 << 1,
- /** destruction is pending for this lock. */
- CLF_DOOMED = 1 << 2,
- /** from enqueue RPC reply upcall. */
- CLF_FROM_UPCALL = 1 << 3,
-};
-
-/**
- * Lock closure.
- *
- * Lock closure is a collection of locks (both top-locks and sub-locks) that
- * might be updated in a result of an operation on a certain lock (which lock
- * this is a closure of).
- *
- * Closures are needed to guarantee dead-lock freedom in the presence of
- *
- * - nested state-machines (top-lock state-machine composed of sub-lock
- * state-machines), and
- *
- * - shared sub-locks.
- *
- * Specifically, many operations, such as lock enqueue, wait, unlock,
- * etc. start from a top-lock, and then operate on a sub-locks of this
- * top-lock, holding a top-lock mutex. When sub-lock state changes as a result
- * of such operation, this change has to be propagated to all top-locks that
- * share this sub-lock. Obviously, no natural lock ordering (e.g.,
- * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has
- * to be used. Lock closure systematizes this try-and-repeat logic.
- */
-struct cl_lock_closure {
- /**
- * Lock that is mutexed when closure construction is started. When
- * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on
- * origin is released before waiting.
- */
- struct cl_lock *clc_origin;
- /**
- * List of enclosed locks, so far. Locks are linked here through
- * cl_lock::cll_inclosure.
- */
- struct list_head clc_list;
- /**
- * True iff closure is in a `wait' mode. This determines what
- * cl_lock_enclosure() does when a lock L to be added to the closure
- * is currently mutexed by some other thread.
- *
- * If cl_lock_closure::clc_wait is not set, then closure construction
- * fails with CLO_REPEAT immediately.
- *
- * In wait mode, cl_lock_enclosure() waits until next attempt to build
- * a closure might succeed. To this end it releases an origin mutex
- * (cl_lock_closure::clc_origin), that has to be the only lock mutex
- * owned by the current thread, and then waits on L mutex (by grabbing
- * it and immediately releasing), before returning CLO_REPEAT to the
- * caller.
- */
- int clc_wait;
- /** Number of locks in the closure. */
- int clc_nr;
-};
-
-/**
* Layered client lock.
*/
struct cl_lock {
- /** Reference counter. */
- atomic_t cll_ref;
/** List of slices. Immutable after creation. */
struct list_head cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- struct list_head cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
+ /** lock attribute, extent, cl_object, etc. */
struct cl_lock_descr cll_descr;
- /** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- wait_queue_head_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
- struct mutex cll_guard;
- struct task_struct *cll_guarder;
- int cll_depth;
-
- /**
- * the owner for INTRANSIT state
- */
- struct task_struct *cll_intransit_owner;
- int cll_error;
- /**
- * Number of holds on a lock. A hold prevents a lock from being
- * canceled and destroyed. Protected by cl_lock::cll_guard.
- *
- * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release()
- */
- int cll_holds;
- /**
- * Number of lock users. Valid in cl_lock_state::CLS_HELD state
- * only. Lock user pins lock in CLS_HELD state. Protected by
- * cl_lock::cll_guard.
- *
- * \see cl_wait(), cl_unuse().
- */
- int cll_users;
- /**
- * Flag bit-mask. Values from enum cl_lock_flags. Updates are
- * protected by cl_lock::cll_guard.
- */
- unsigned long cll_flags;
- /**
- * A linkage into a list of locks in a closure.
- *
- * \see cl_lock_closure
- */
- struct list_head cll_inclosure;
- /**
- * Confict lock at queuing time.
- */
- struct cl_lock *cll_conflict;
- /**
- * A list of references to this lock, for debugging.
- */
- struct lu_ref cll_reference;
- /**
- * A list of holds on this lock, for debugging.
- */
- struct lu_ref cll_holders;
- /**
- * A reference for cl_lock::cll_descr::cld_obj. For debugging.
- */
- struct lu_ref_link cll_obj_ref;
-#ifdef CONFIG_LOCKDEP
- /* "dep_map" name is assumed by lockdep.h macros. */
- struct lockdep_map dep_map;
-#endif
};
/**
* Per-layer part of cl_lock
*
- * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
+ * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
@@ -1658,174 +1239,36 @@ struct cl_lock_slice {
};
/**
- * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}().
- *
- * NOTE: lov_subresult() depends on ordering here.
- */
-enum cl_lock_transition {
- /** operation cannot be completed immediately. Wait for state change. */
- CLO_WAIT = 1,
- /** operation had to release lock mutex, restart. */
- CLO_REPEAT = 2,
- /** lower layer re-enqueued. */
- CLO_REENQUEUED = 3,
-};
-
-/**
*
* \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
*/
struct cl_lock_operations {
- /**
- * \name statemachine
- *
- * State machine transitions. These 3 methods are called to transfer
- * lock from one state to another, as described in the commentary
- * above enum #cl_lock_state.
- *
- * \retval 0 this layer has nothing more to do to before
- * transition to the target state happens;
- *
- * \retval CLO_REPEAT method had to release and re-acquire cl_lock
- * mutex, repeat invocation of transition method
- * across all layers;
- *
- * \retval CLO_WAIT this layer cannot move to the target state
- * immediately, as it has to wait for certain event
- * (e.g., the communication with the server). It
- * is guaranteed, that when the state transfer
- * becomes possible, cl_lock::cll_wq wait-queue
- * is signaled. Caller can wait for this event by
- * calling cl_lock_state_wait();
- *
- * \retval -ve failure, abort state transition, move the lock
- * into cl_lock_state::CLS_FREEING state, and set
- * cl_lock::cll_error.
- *
- * Once all layers voted to agree to transition (by returning 0), lock
- * is moved into corresponding target state. All state transition
- * methods are optional.
- */
/** @{ */
/**
* Attempts to enqueue the lock. Called top-to-bottom.
*
- * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
+ * \retval 0 this layer has enqueued the lock successfully
+ * \retval >0 this layer has enqueued the lock, but need to wait on
+ * @anchor for resources
+ * \retval -ve failure
+ *
+ * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
* \see osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
+ struct cl_io *io, struct cl_sync_io *anchor);
/**
- * Attempts to wait for enqueue result. Called top-to-bottom.
- *
- * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait()
- */
- int (*clo_wait)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Attempts to unlock the lock. Called bottom-to-top. In addition to
- * usual return values of lock state-machine methods, this can return
- * -ESTALE to indicate that lock cannot be returned to the cache, and
- * has to be re-initialized.
- * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
- */
- int (*clo_unuse)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Notifies layer that cached lock is started being used.
- *
- * \pre lock->cll_state == CLS_CACHED
- *
- * \see lov_lock_use(), osc_lock_use()
- */
- int (*clo_use)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} statemachine */
- /**
- * A method invoked when lock state is changed (as a result of state
- * transition). This is used, for example, to track when the state of
- * a sub-lock changes, to propagate this change to the corresponding
- * top-lock. Optional
- *
- * \see lovsub_lock_state()
- */
- void (*clo_state)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state st);
- /**
- * Returns true, iff given lock is suitable for the given io, idea
- * being, that there are certain "unsafe" locks, e.g., ones acquired
- * for O_APPEND writes, that we don't want to re-use for a normal
- * write, to avoid the danger of cascading evictions. Optional. Runs
- * under cl_object_header::coh_lock_guard.
- *
- * XXX this should take more information about lock needed by
- * io. Probably lock description or something similar.
- *
- * \see lov_fits_into()
- */
- int (*clo_fits_into)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
- /**
- * \name ast
- * Asynchronous System Traps. All of then are optional, all are
- * executed bottom-to-top.
- */
- /** @{ */
-
- /**
- * Cancellation callback. Cancel a lock voluntarily, or under
- * the request of server.
+ * Cancel a lock, release its DLM lock ref, while does not cancel the
+ * DLM lock
*/
void (*clo_cancel)(const struct lu_env *env,
const struct cl_lock_slice *slice);
- /**
- * Lock weighting ast. Executed to estimate how precious this lock
- * is. The sum of results across all layers is used to determine
- * whether lock worth keeping in cache given present memory usage.
- *
- * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh().
- */
- unsigned long (*clo_weigh)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} ast */
-
- /**
- * \see lovsub_lock_closure()
- */
- int (*clo_closure)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure);
- /**
- * Executed bottom-to-top when lock description changes (e.g., as a
- * result of server granting more generous lock than was requested).
- *
- * \see lovsub_lock_modify()
- */
- int (*clo_modify)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *updated);
- /**
- * Notifies layers (bottom-to-top) that lock is going to be
- * destroyed. Responsibility of layers is to prevent new references on
- * this lock from being acquired once this method returns.
- *
- * This can be called multiple times due to the races.
- *
- * \see cl_lock_delete()
- * \see osc_lock_delete(), lovsub_lock_delete()
- */
- void (*clo_delete)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
+ /** @} */
/**
* Destructor. Frees resources and the slice.
*
- * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
+ * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
* \see osc_lock_fini()
*/
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
@@ -2016,7 +1459,7 @@ enum cl_io_state {
* This is usually embedded into layer session data, rather than allocated
* dynamically.
*
- * \see vvp_io, lov_io, osc_io, ccc_io
+ * \see vvp_io, lov_io, osc_io
*/
struct cl_io_slice {
struct cl_io *cis_io;
@@ -2031,6 +1474,8 @@ struct cl_io_slice {
struct list_head cis_linkage;
};
+typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
+ struct cl_page *);
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -2114,7 +1559,7 @@ struct cl_io_operations {
void (*cio_fini)(const struct lu_env *env,
const struct cl_io_slice *slice);
} op[CIT_OP_NR];
- struct {
+
/**
* Submit pages from \a queue->c2_qin for IO, and move
* successfully submitted pages into \a queue->c2_qout. Return
@@ -2127,7 +1572,15 @@ struct cl_io_operations {
const struct cl_io_slice *slice,
enum cl_req_type crt,
struct cl_2queue *queue);
- } req_op[CRT_NR];
+ /**
+ * Queue async page for write.
+ * The difference between cio_submit and cio_queue is that
+ * cio_submit is for urgent request.
+ */
+ int (*cio_commit_async)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
/**
* Read missing page.
*
@@ -2140,31 +1593,6 @@ struct cl_io_operations {
const struct cl_io_slice *slice,
const struct cl_page_slice *page);
/**
- * Prepare write of a \a page. Called bottom-to-top by a top-level
- * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
- * get data from user-level buffer.
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_prepare_write(), lov_io_prepare_write(),
- * osc_io_prepare_write().
- */
- int (*cio_prepare_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_commit_write(), lov_io_commit_write(),
- * osc_io_commit_write().
- */
- int (*cio_commit_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
* Optional debugging helper. Print given io slice.
*/
int (*cio_print)(const struct lu_env *env, void *cookie,
@@ -2216,9 +1644,13 @@ enum cl_enq_flags {
*/
CEF_AGL = 0x00000020,
/**
+ * enqueue a lock to test DLM lock existence.
+ */
+ CEF_PEEK = 0x00000040,
+ /**
* mask of enq_flags.
*/
- CEF_MASK = 0x0000003f,
+ CEF_MASK = 0x0000007f,
};
/**
@@ -2228,12 +1660,12 @@ enum cl_enq_flags {
struct cl_io_lock_link {
/** linkage into one of cl_lockset lists. */
struct list_head cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
+ struct cl_lock cill_lock;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
struct cl_io_lock_link *link);
};
+#define cill_descr cill_lock.cll_descr
/**
* Lock-set represents a collection of locks, that io needs at a
@@ -2267,8 +1699,6 @@ struct cl_io_lock_link {
struct cl_lockset {
/** locks to be acquired. */
struct list_head cls_todo;
- /** locks currently being processed. */
- struct list_head cls_curr;
/** locks acquired. */
struct list_head cls_done;
};
@@ -2632,9 +2062,7 @@ struct cl_site {
* and top-locks (and top-pages) are accounted here.
*/
struct cache_stats cs_pages;
- struct cache_stats cs_locks;
atomic_t cs_pages_state[CPS_NR];
- atomic_t cs_locks_state[CLS_NR];
};
int cl_site_init(struct cl_site *s, struct cl_device *top);
@@ -2725,7 +2153,7 @@ static inline void cl_device_fini(struct cl_device *d)
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
+ struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
@@ -2758,7 +2186,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb);
int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj);
+int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
/**
@@ -2772,7 +2200,7 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
static inline void cl_object_page_init(struct cl_object *clob, int size)
{
clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
- cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+ cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
}
static inline void *cl_object_page_slice(struct cl_object *clob,
@@ -2781,6 +2209,16 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
return (void *)((char *)page + clob->co_slice_off);
}
+/**
+ * Return refcount of cl_object.
+ */
+static inline int cl_object_refc(struct cl_object *clob)
+{
+ struct lu_object_header *header = clob->co_lu.lo_header;
+
+ return atomic_read(&header->loh_ref);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
@@ -2794,28 +2232,20 @@ enum {
};
/* callback of cl_page_gang_lookup() */
-typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
- struct cl_page *, void *);
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index);
struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
enum cl_page_type type);
-struct cl_page *cl_page_find_sub(const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent);
+struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type);
void cl_page_get(struct cl_page *page);
void cl_page_put(const struct lu_env *env, struct cl_page *page);
void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
const struct cl_page *pg);
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg);
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page);
struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
-struct cl_page *cl_page_top(struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
@@ -2872,12 +2302,10 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
void cl_page_discard(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
+ struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
int cl_page_size(const struct cl_object *obj);
@@ -2890,138 +2318,66 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
const struct cl_lock_descr *descr);
/* @} helper */
+/**
+ * Data structure managing a client's cached pages. A count of
+ * "unstable" pages is maintained, and an LRU of clean pages is
+ * maintained. "unstable" pages are pages pinned by the ptlrpc
+ * layer for recovery purposes.
+ */
+struct cl_client_cache {
+ /**
+ * # of users (OSCs)
+ */
+ atomic_t ccc_users;
+ /**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
+ * # of LRU entries available
+ */
+ atomic_t ccc_lru_left;
+ /**
+ * List of entities(OSCs) for this LRU cache
+ */
+ struct list_head ccc_lru;
+ /**
+ * Max # of LRU entries
+ */
+ unsigned long ccc_lru_max;
+ /**
+ * Lock to protect ccc_lru list
+ */
+ spinlock_t ccc_lru_lock;
+ /**
+ * # of unstable pages for this mount point
+ */
+ atomic_t ccc_unstable_nr;
+ /**
+ * Waitq for awaiting unstable pages to reach zero.
+ * Used at umounting time and signaled on BRW commit
+ */
+ wait_queue_head_t ccc_unstable_waitq;
+
+};
+
/** @} cl_page */
/** \defgroup cl_lock cl_lock
* @{
*/
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except, int pending,
- int canceld);
-static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- struct cl_lock *except,
- int pending, int canceld)
-{
- LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
- return cl_lock_at_pgoff(env, obj, page->cp_index, except,
- pending, canceld);
-}
-
+int cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock);
+int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_io *io);
+void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
-
-void cl_lock_get(struct cl_lock *lock);
-void cl_lock_get_trust(struct cl_lock *lock);
-void cl_lock_put(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock);
-
-int cl_lock_is_intransit(struct cl_lock *lock);
-
-int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
- int keep_mutex);
-
-/** \name statemachine statemachine
- * Interface to lock state machine consists of 3 parts:
- *
- * - "try" functions that attempt to effect a state transition. If state
- * transition is not possible right now (e.g., if it has to wait for some
- * asynchronous event to occur), these functions return
- * cl_lock_transition::CLO_WAIT.
- *
- * - "non-try" functions that implement synchronous blocking interface on
- * top of non-blocking "try" functions. These functions repeatedly call
- * corresponding "try" versions, and if state transition is not possible
- * immediately, wait for lock state change.
- *
- * - methods from cl_lock_operations, called by "try" functions. Lock can
- * be advanced to the target state only when all layers voted that they
- * are ready for this transition. "Try" functions call methods under lock
- * mutex. If a layer had to release a mutex, it re-acquires it and returns
- * cl_lock_transition::CLO_REPEAT, causing "try" function to call all
- * layers again.
- *
- * TRY NON-TRY METHOD FINAL STATE
- *
- * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED
- *
- * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD
- *
- * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED
- *
- * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
- *
- * @{
- */
-
-int cl_wait(const struct lu_env *env, struct cl_lock *lock);
-void cl_unuse(const struct lu_env *env, struct cl_lock *lock);
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags);
-int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock);
-int cl_wait_try(const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic);
-
-/** @} statemachine */
-
-void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state);
-int cl_queue_match(const struct list_head *queue,
- const struct cl_lock_descr *need);
-
-void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_is_mutexed(struct cl_lock *lock);
-int cl_lock_nr_mutexed(const struct lu_env *env);
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_ext_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need);
-int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc);
-
-void cl_lock_closure_init(const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait);
-void cl_lock_closure_fini(struct cl_lock_closure *closure);
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-void cl_lock_disclosure(const struct lu_env *env,
- struct cl_lock_closure *closure);
-int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock, struct cl_sync_io *anchor);
void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error);
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
-
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
/** @} cl_lock */
@@ -3050,15 +2406,14 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_lock_descr *descr);
int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue);
int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue,
long timeout);
+int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
int cl_io_is_going(const struct lu_env *env);
/**
@@ -3114,6 +2469,12 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
}
+static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
+{
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
+}
+
/**
* Iterate over pages in a page list.
*/
@@ -3130,9 +2491,14 @@ void cl_page_list_init(struct cl_page_list *plist);
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
struct cl_page *page);
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
+void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
+ struct cl_page *page);
void cl_page_list_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
void cl_2queue_init(struct cl_2queue *queue);
void cl_2queue_disown(const struct lu_env *env,
@@ -3177,13 +2543,18 @@ struct cl_sync_io {
atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
+ /** callback to invoke when this IO is finished */
+ void (*csi_end_io)(const struct lu_env *,
+ struct cl_sync_io *);
};
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
+void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
+ void (*end)(const struct lu_env *, struct cl_sync_io *));
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout);
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret);
+void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
/** @} cl_sync_io */
@@ -3241,6 +2612,9 @@ void *cl_env_reenter(void);
void cl_env_reexit(void *cookie);
void cl_env_implant(struct lu_env *env, int *refcheck);
void cl_env_unplant(struct lu_env *env, int *refcheck);
+unsigned int cl_env_cache_purge(unsigned int nr);
+struct lu_env *cl_env_percpu_get(void);
+void cl_env_percpu_put(struct lu_env *env);
/** @} cl_env */
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
deleted file mode 100644
index 5d839a9f7..000000000
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Definitions shared between vvp and liblustre, and other clients in the
- * future.
- *
- * Author: Oleg Drokin <oleg.drokin@sun.com>
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#ifndef LCLIENT_H
-#define LCLIENT_H
-
-blkcnt_t dirty_cnt(struct inode *inode);
-
-int cl_glimpse_size0(struct inode *inode, int agl);
-int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
- struct inode *inode, struct cl_object *clob, int agl);
-
-static inline int cl_glimpse_size(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 0);
-}
-
-static inline int cl_agl(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 1);
-}
-
-/**
- * Locking policy for setattr.
- */
-enum ccc_setattr_lock_type {
- /** Locking is done by server */
- SETATTR_NOLOCK,
- /** Extent lock is enqueued */
- SETATTR_EXTENT_LOCK,
- /** Existing local extent lock is used */
- SETATTR_MATCH_LOCK
-};
-
-/**
- * IO state private to vvp or slp layers.
- */
-struct ccc_io {
- /** super class */
- struct cl_io_slice cui_cl;
- struct cl_io_lock_link cui_link;
- /**
- * I/O vector information to or from which read/write is going.
- */
- struct iov_iter *cui_iter;
- /**
- * Total size for the left IO.
- */
- size_t cui_tot_count;
-
- union {
- struct {
- enum ccc_setattr_lock_type cui_local_lock;
- } setattr;
- } u;
- /**
- * True iff io is processing glimpse right now.
- */
- int cui_glimpse;
- /**
- * Layout version when this IO is initialized
- */
- __u32 cui_layout_gen;
- /**
- * File descriptor against which IO is done.
- */
- struct ll_file_data *cui_fd;
- struct kiocb *cui_iocb;
-};
-
-/**
- * True, if \a io is a normal io, False for splice_{read,write}.
- * must be implemented in arch specific code.
- */
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
-
-extern struct lu_context_key ccc_key;
-extern struct lu_context_key ccc_session_key;
-
-struct ccc_thread_info {
- struct cl_lock_descr cti_descr;
- struct cl_io cti_io;
- struct cl_attr cti_attr;
-};
-
-static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
-{
- struct ccc_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &ccc_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
-{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
-
- memset(attr, 0, sizeof(*attr));
- return attr;
-}
-
-static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
-{
- struct cl_io *io = &ccc_env_info(env)->cti_io;
-
- memset(io, 0, sizeof(*io));
- return io;
-}
-
-struct ccc_session {
- struct ccc_io cs_ios;
-};
-
-static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
-{
- struct ccc_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &ccc_session_key);
- LASSERT(ses);
- return ses;
-}
-
-static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
-{
- return &ccc_env_session(env)->cs_ios;
-}
-
-/**
- * ccc-private object state.
- */
-struct ccc_object {
- struct cl_object_header cob_header;
- struct cl_object cob_cl;
- struct inode *cob_inode;
-
- /**
- * A list of dirty pages pending IO in the cache. Used by
- * SOM. Protected by ll_inode_info::lli_lock.
- *
- * \see ccc_page::cpg_pending_linkage
- */
- struct list_head cob_pending_list;
-
- /**
- * Access this counter is protected by inode->i_sem. Now that
- * the lifetime of transient pages must be covered by inode sem,
- * we don't need to hold any lock..
- */
- int cob_transient_pages;
- /**
- * Number of outstanding mmaps on this file.
- *
- * \see ll_vm_open(), ll_vm_close().
- */
- atomic_t cob_mmap_cnt;
-
- /**
- * various flags
- * cob_discard_page_warned
- * if pages belonging to this object are discarded when a client
- * is evicted, some debug info will be printed, this flag will be set
- * during processing the first discarded page, then avoid flooding
- * debug message for lots of discarded pages.
- *
- * \see ll_dirty_page_discard_warn.
- */
- unsigned int cob_discard_page_warned:1;
-};
-
-/**
- * ccc-private page state.
- */
-struct ccc_page {
- struct cl_page_slice cpg_cl;
- int cpg_defer_uptodate;
- int cpg_ra_used;
- int cpg_write_queued;
- /**
- * Non-empty iff this page is already counted in
- * ccc_object::cob_pending_list. Protected by
- * ccc_object::cob_pending_guard. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- struct list_head cpg_pending_linkage;
- /** VM page */
- struct page *cpg_page;
-};
-
-static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
-{
- return container_of(slice, struct ccc_page, cpg_cl);
-}
-
-struct ccc_device {
- struct cl_device cdv_cl;
- struct super_block *cdv_sb;
- struct cl_device *cdv_next;
-};
-
-struct ccc_lock {
- struct cl_lock_slice clk_cl;
-};
-
-struct ccc_req {
- struct cl_req_slice crq_cl;
-};
-
-void *ccc_key_init (const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_key_fini (const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-
-int ccc_device_init (const struct lu_env *env,
- struct lu_device *d,
- const char *name, struct lu_device *next);
-struct lu_device *ccc_device_fini (const struct lu_env *env,
- struct lu_device *d);
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops);
-struct lu_device *ccc_device_free (const struct lu_env *env,
- struct lu_device *d);
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops);
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-void ccc_umount(const struct lu_env *env, struct cl_device *dev);
-int ccc_global_init(struct lu_device_type *device_type);
-void ccc_global_fini(struct lu_device_type *device_type);
-int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
- const struct cl_object_conf *conf);
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
-int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io,
- const struct cl_lock_operations *lkops);
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb);
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice);
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice);
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state);
-
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end);
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end);
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
-void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
- size_t nob);
-void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
- struct cl_io *io);
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed);
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *oa, u64 flags);
-
-struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
-struct lu_object *ccc2lu (struct ccc_object *vob);
-struct ccc_device *lu2ccc_dev (const struct lu_device *d);
-struct ccc_device *cl2ccc_dev (const struct cl_device *d);
-struct ccc_object *lu2ccc (const struct lu_object *obj);
-struct ccc_object *cl2ccc (const struct cl_object *obj);
-struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
-struct ccc_io *cl2ccc_io (const struct lu_env *env,
- const struct cl_io_slice *slice);
-struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
-struct page *cl2vm_page (const struct cl_page_slice *slice);
-struct inode *ccc_object_inode(const struct cl_object *obj);
-struct ccc_object *cl_inode2ccc (struct inode *inode);
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
-
-int ccc_object_invariant(const struct cl_object *obj);
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
-void cl_inode_fini(struct inode *inode);
-int cl_local_size(struct inode *inode);
-
-__u16 ll_dirent_type_get(struct lu_dirent *ent);
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
-__u32 cl_fid_build_gen(const struct lu_fid *fid);
-
-# define CLOBINVRNT(env, clob, expr) \
- ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
-
-int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
-int cl_ocd_update(struct obd_device *host,
- struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data);
-
-struct ccc_grouplock {
- struct lu_env *cg_env;
- struct cl_io *cg_io;
- struct cl_lock *cg_lock;
- unsigned long cg_gid;
-};
-
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg);
-void cl_put_grouplock(struct ccc_grouplock *cg);
-
-/**
- * New interfaces to get and put lov_stripe_md from lov layer. This violates
- * layering because lov_stripe_md is supposed to be a private data in lov.
- *
- * NB: If you find you have to use these interfaces for your new code, please
- * think about it again. These interfaces may be removed in the future for
- * better layering.
- */
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
-void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
-int lov_read_and_clear_async_rc(struct cl_object *clob);
-
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
-void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
-
-/**
- * Data structure managing a client's cached clean pages. An LRU of
- * pages is maintained, along with other statistics.
- */
-struct cl_client_cache {
- atomic_t ccc_users; /* # of users (OSCs) of this data */
- struct list_head ccc_lru; /* LRU list of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries possible */
- unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
-};
-
-#endif /*LCLIENT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
deleted file mode 100644
index 3907bf4ce..000000000
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LINUX_OBD_H
-#define __LINUX_OBD_H
-
-#ifndef __OBD_H
-#error Do not #include this file directly. #include <obd.h> instead
-#endif
-
-#include "../obd_support.h"
-
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/sched.h> /* for struct task_struct, for current.h */
-#include <linux/mount.h>
-
-#include "../lustre_intent.h"
-
-struct ll_iattr {
- struct iattr iattr;
- unsigned int ia_attr_flags;
-};
-
-#define CLIENT_OBD_LIST_LOCK_DEBUG 1
-
-struct client_obd_lock {
- spinlock_t lock;
-
- unsigned long time;
- struct task_struct *task;
- const char *func;
- int line;
-};
-
-static inline void __client_obd_list_lock(struct client_obd_lock *lock,
- const char *func, int line)
-{
- unsigned long cur = jiffies;
-
- while (1) {
- if (spin_trylock(&lock->lock)) {
- LASSERT(!lock->task);
- lock->task = current;
- lock->func = func;
- lock->line = line;
- lock->time = jiffies;
- break;
- }
-
- if (time_before(cur + 5 * HZ, jiffies) &&
- time_before(lock->time + 5 * HZ, jiffies)) {
- struct task_struct *task = lock->task;
-
- if (!task)
- continue;
-
- LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
- current->comm, current->pid,
- lock, task->comm, task->pid,
- lock->func, lock->line,
- (jiffies - lock->time) / HZ);
- LCONSOLE_WARN("====== for current process =====\n");
- dump_stack();
- LCONSOLE_WARN("====== end =======\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1000 * HZ);
- }
- cpu_relax();
- }
-}
-
-#define client_obd_list_lock(lock) \
- __client_obd_list_lock(lock, __func__, __LINE__)
-
-static inline void client_obd_list_unlock(struct client_obd_lock *lock)
-{
- LASSERT(lock->task);
- lock->task = NULL;
- lock->time = jiffies;
- spin_unlock(&lock->lock);
-}
-
-static inline void client_obd_list_lock_init(struct client_obd_lock *lock)
-{
- spin_lock_init(&lock->lock);
-}
-
-static inline void client_obd_list_lock_done(struct client_obd_lock *lock)
-{}
-
-#endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 242bb1ef6..281651218 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -198,7 +198,6 @@ typedef int (*lu_printer_t)(const struct lu_env *env,
* Operations specific for particular lu_object.
*/
struct lu_object_operations {
-
/**
* Allocate lower-layer parts of the object by calling
* lu_device_operations::ldo_object_alloc() of the corresponding
@@ -656,21 +655,21 @@ static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
* @{
*/
-int lu_site_init (struct lu_site *s, struct lu_device *d);
-void lu_site_fini (struct lu_site *s);
-int lu_site_init_finish (struct lu_site *s);
-void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
-void lu_device_get (struct lu_device *d);
-void lu_device_put (struct lu_device *d);
-int lu_device_init (struct lu_device *d, struct lu_device_type *t);
-void lu_device_fini (struct lu_device *d);
-int lu_object_header_init(struct lu_object_header *h);
+int lu_site_init(struct lu_site *s, struct lu_device *d);
+void lu_site_fini(struct lu_site *s);
+int lu_site_init_finish(struct lu_site *s);
+void lu_stack_fini(const struct lu_env *env, struct lu_device *top);
+void lu_device_get(struct lu_device *d);
+void lu_device_put(struct lu_device *d);
+int lu_device_init(struct lu_device *d, struct lu_device_type *t);
+void lu_device_fini(struct lu_device *d);
+int lu_object_header_init(struct lu_object_header *h);
void lu_object_header_fini(struct lu_object_header *h);
-int lu_object_init (struct lu_object *o,
- struct lu_object_header *h, struct lu_device *d);
-void lu_object_fini (struct lu_object *o);
-void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
-void lu_object_add (struct lu_object *before, struct lu_object *o);
+int lu_object_init(struct lu_object *o,
+ struct lu_object_header *h, struct lu_device *d);
+void lu_object_fini(struct lu_object *o);
+void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
+void lu_object_add(struct lu_object *before, struct lu_object *o);
/**
* Helpers to initialize and finalize device types.
@@ -781,9 +780,8 @@ int lu_cdebug_printer(const struct lu_env *env,
*/
#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -794,9 +792,8 @@ do { \
*/
#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
(object)->lo_header); \
lu_cdebug_printer(env, &msgdata, "\n"); \
@@ -1007,6 +1004,10 @@ enum lu_context_tag {
*/
LCT_LOCAL = 1 << 7,
/**
+ * session for server thread
+ **/
+ LCT_SERVER_SESSION = BIT(8),
+ /**
* Set when at least one of keys, having values in this context has
* non-NULL lu_context_key::lct_exit() method. This is used to
* optimize lu_context_exit() call.
@@ -1118,7 +1119,7 @@ struct lu_context_key {
{ \
type *value; \
\
- CLASSERT(PAGE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_SIZE >= sizeof(*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
@@ -1154,12 +1155,12 @@ do { \
(key)->lct_owner = THIS_MODULE; \
} while (0)
-int lu_context_key_register(struct lu_context_key *key);
-void lu_context_key_degister(struct lu_context_key *key);
-void *lu_context_key_get (const struct lu_context *ctx,
- const struct lu_context_key *key);
-void lu_context_key_quiesce (struct lu_context_key *key);
-void lu_context_key_revive (struct lu_context_key *key);
+int lu_context_key_register(struct lu_context_key *key);
+void lu_context_key_degister(struct lu_context_key *key);
+void *lu_context_key_get(const struct lu_context *ctx,
+ const struct lu_context_key *key);
+void lu_context_key_quiesce(struct lu_context_key *key);
+void lu_context_key_revive(struct lu_context_key *key);
/*
* LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
@@ -1216,21 +1217,21 @@ void lu_context_key_revive (struct lu_context_key *key);
LU_TYPE_START(mod, __VA_ARGS__); \
LU_TYPE_STOP(mod, __VA_ARGS__)
-int lu_context_init (struct lu_context *ctx, __u32 tags);
-void lu_context_fini (struct lu_context *ctx);
-void lu_context_enter (struct lu_context *ctx);
-void lu_context_exit (struct lu_context *ctx);
-int lu_context_refill(struct lu_context *ctx);
+int lu_context_init(struct lu_context *ctx, __u32 tags);
+void lu_context_fini(struct lu_context *ctx);
+void lu_context_enter(struct lu_context *ctx);
+void lu_context_exit(struct lu_context *ctx);
+int lu_context_refill(struct lu_context *ctx);
/*
* Helper functions to operate on multiple keys. These are used by the default
* device type operations, defined by LU_TYPE_INIT_FINI().
*/
-int lu_context_key_register_many(struct lu_context_key *k, ...);
+int lu_context_key_register_many(struct lu_context_key *k, ...);
void lu_context_key_degister_many(struct lu_context_key *k, ...);
-void lu_context_key_revive_many (struct lu_context_key *k, ...);
-void lu_context_key_quiesce_many (struct lu_context_key *k, ...);
+void lu_context_key_revive_many(struct lu_context_key *k, ...);
+void lu_context_key_quiesce_many(struct lu_context_key *k, ...);
/**
* Environment.
@@ -1246,9 +1247,9 @@ struct lu_env {
struct lu_context *le_ses;
};
-int lu_env_init (struct lu_env *env, __u32 tags);
-void lu_env_fini (struct lu_env *env);
-int lu_env_refill(struct lu_env *env);
+int lu_env_init(struct lu_env *env, __u32 tags);
+void lu_env_fini(struct lu_env *env);
+int lu_env_refill(struct lu_env *env);
/** @} lu_context */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5aae1d06a..9c53c1792 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -183,6 +183,12 @@ struct lu_seq_range {
__u32 lsr_flags;
};
+struct lu_seq_range_array {
+ __u32 lsra_count;
+ __u32 lsra_padding;
+ struct lu_seq_range lsra_lsr[0];
+};
+
#define LU_SEQ_RANGE_MDT 0x0
#define LU_SEQ_RANGE_OST 0x1
#define LU_SEQ_RANGE_ANY 0x3
@@ -578,7 +584,7 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
if (fid_seq_is_mdt0(ostid->oi.oi_seq))
return FID_SEQ_OST_MDT0;
- if (fid_seq_is_default(ostid->oi.oi_seq))
+ if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
return FID_SEQ_LOV_DEFAULT;
if (fid_is_idif(&ostid->oi_fid))
@@ -590,9 +596,12 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
/* extract OST objid from a wire ost_id (id/seq) pair */
static inline __u64 ostid_id(const struct ost_id *ostid)
{
- if (fid_seq_is_mdt0(ostid_seq(ostid)))
+ if (fid_seq_is_mdt0(ostid->oi.oi_seq))
return ostid->oi.oi_id & IDIF_OID_MASK;
+ if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
+ return ostid->oi.oi_id;
+
if (fid_is_idif(&ostid->oi_fid))
return fid_idif_id(fid_seq(&ostid->oi_fid),
fid_oid(&ostid->oi_fid), 0);
@@ -636,12 +645,22 @@ static inline void ostid_set_seq_llog(struct ost_id *oi)
*/
static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
if (oid >= IDIF_MAX_OID) {
CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
return;
}
oi->oi.oi_id = oid;
+ } else if (fid_is_idif(&oi->oi_fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Bad %llu to set "DOSTID"\n",
+ oid, POSTID(oi));
+ return;
+ }
+ oi->oi_fid.f_seq = fid_idif_seq(oid,
+ fid_idif_ost_idx(&oi->oi_fid));
+ oi->oi_fid.f_oid = oid;
+ oi->oi_fid.f_ver = oid >> 48;
} else {
if (oid > OBIF_MAX_OID) {
CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
@@ -651,25 +670,31 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
}
}
-static inline void ostid_inc_id(struct ost_id *oi)
+static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
- CERROR("Bad inc "DOSTID"\n", POSTID(oi));
- return;
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
+
+ if (fid_is_idif(fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
}
- oi->oi.oi_id++;
+ fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
+ fid->f_oid = oid;
+ fid->f_ver = oid >> 48;
} else {
- oi->oi_fid.f_oid++;
+ if (oid > OBIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set REG "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
+ }
+ fid->f_oid = oid;
}
-}
-
-static inline void ostid_dec_id(struct ost_id *oi)
-{
- if (fid_seq_is_mdt0(ostid_seq(oi)))
- oi->oi.oi_id--;
- else
- oi->oi_fid.f_oid--;
+ return 0;
}
/**
@@ -684,30 +709,34 @@ static inline void ostid_dec_id(struct ost_id *oi)
static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
__u32 ost_idx)
{
+ __u64 seq = ostid_seq(ostid);
+
if (ost_idx > 0xffff) {
CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
ost_idx);
return -EBADF;
}
- if (fid_seq_is_mdt0(ostid_seq(ostid))) {
+ if (fid_seq_is_mdt0(seq)) {
+ __u64 oid = ostid_id(ostid);
+
/* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
* that we map into the IDIF namespace. It allows up to 2^48
* objects per OST, as this is the object namespace that has
* been in production for years. This can handle create rates
* of 1M objects/s/OST for 9 years, or combinations thereof.
*/
- if (ostid_id(ostid) >= IDIF_MAX_OID) {
+ if (oid >= IDIF_MAX_OID) {
CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
POSTID(ostid), ost_idx);
return -EBADF;
}
- fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
+ fid->f_seq = fid_idif_seq(oid, ost_idx);
/* truncate to 32 bits by assignment */
- fid->f_oid = ostid_id(ostid);
+ fid->f_oid = oid;
/* in theory, not currently used */
- fid->f_ver = ostid_id(ostid) >> 48;
- } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
+ fid->f_ver = oid >> 48;
+ } else if (likely(!fid_seq_is_default(seq))) {
/* This is either an IDIF object, which identifies objects across
* all OSTs, or a regular FID. The IDIF namespace maps legacy
* OST objects into the FID namespace. In both cases, we just
@@ -1001,8 +1030,9 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr)
size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
size += sizeof(struct luda_type);
- } else
+ } else {
size = sizeof(struct lu_dirent) + namelen;
+ }
return (size + 7) & ~7;
}
@@ -1256,6 +1286,9 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
+#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
+ * name in request
+ */
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
@@ -1428,6 +1461,8 @@ enum obdo_flags {
*/
OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
+ OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
+ OBD_FL_SHORT_IO = 0x00400000, /* short io request */
/* Note that while these checksum values are currently separate bits,
* in 2.x we can actually allow all values from 1-31 if we wanted.
@@ -1525,6 +1560,11 @@ static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
oi->oi.oi_seq = seq;
}
+static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
+{
+ oi->oi.oi_id = oid;
+}
+
static inline __u64 lmm_oi_id(struct ost_id *oi)
{
return oi->oi.oi_id;
@@ -1732,6 +1772,11 @@ void lustre_swab_obd_statfs(struct obd_statfs *os);
#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
+#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
+ * that the client is running low on
+ * space for unstable pages; asking
+ * it to sync quickly
+ */
#define OBD_OBJECT_EOF 0xffffffffffffffffULL
@@ -2436,6 +2481,7 @@ struct mdt_rec_reint {
void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+/* lmv structures */
struct lmv_desc {
__u32 ld_tgt_count; /* how many MDS's */
__u32 ld_active_tgt_count; /* how many active */
@@ -2460,7 +2506,6 @@ struct lmv_stripe_md {
struct lu_fid mea_ids[0];
};
-/* lmv structures */
#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
#define MEA_MAGIC_ALL_CHARS 0xb222a11c
#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
@@ -2470,9 +2515,10 @@ struct lmv_stripe_md {
#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
enum fld_rpc_opc {
- FLD_QUERY = 900,
+ FLD_QUERY = 900,
+ FLD_READ = 901,
FLD_LAST_OPC,
- FLD_FIRST_OPC = FLD_QUERY
+ FLD_FIRST_OPC = FLD_QUERY
};
enum seq_rpc_opc {
@@ -2486,6 +2532,12 @@ enum seq_op {
SEQ_ALLOC_META = 1
};
+enum fld_op {
+ FLD_CREATE = 0,
+ FLD_DELETE = 1,
+ FLD_LOOKUP = 2,
+};
+
/*
* LOV data structures
*/
@@ -2582,6 +2634,8 @@ struct ldlm_extent {
__u64 gid;
};
+#define LDLM_GID_ANY ((__u64)-1)
+
static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
struct ldlm_extent *ex2)
{
@@ -3304,7 +3358,7 @@ struct getinfo_fid2path {
char gf_path[0];
} __packed;
-void lustre_swab_fid2path (struct getinfo_fid2path *gf);
+void lustre_swab_fid2path(struct getinfo_fid2path *gf);
enum {
LAYOUT_INTENT_ACCESS = 0,
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 276906e64..59ba48ac3 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -193,37 +193,37 @@ struct ost_id {
* *INFO - set/get lov_user_mds_data
*/
/* see <lustre_lib.h> for ioctl numberss 101-150 */
-#define LL_IOC_GETFLAGS _IOR ('f', 151, long)
-#define LL_IOC_SETFLAGS _IOW ('f', 152, long)
-#define LL_IOC_CLRFLAGS _IOW ('f', 153, long)
+#define LL_IOC_GETFLAGS _IOR('f', 151, long)
+#define LL_IOC_SETFLAGS _IOW('f', 152, long)
+#define LL_IOC_CLRFLAGS _IOW('f', 153, long)
/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */
-#define LL_IOC_LOV_SETSTRIPE _IOW ('f', 154, long)
+#define LL_IOC_LOV_SETSTRIPE _IOW('f', 154, long)
/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */
-#define LL_IOC_LOV_GETSTRIPE _IOW ('f', 155, long)
+#define LL_IOC_LOV_GETSTRIPE _IOW('f', 155, long)
/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */
-#define LL_IOC_LOV_SETEA _IOW ('f', 156, long)
-#define LL_IOC_RECREATE_OBJ _IOW ('f', 157, long)
-#define LL_IOC_RECREATE_FID _IOW ('f', 157, struct lu_fid)
-#define LL_IOC_GROUP_LOCK _IOW ('f', 158, long)
-#define LL_IOC_GROUP_UNLOCK _IOW ('f', 159, long)
+#define LL_IOC_LOV_SETEA _IOW('f', 156, long)
+#define LL_IOC_RECREATE_OBJ _IOW('f', 157, long)
+#define LL_IOC_RECREATE_FID _IOW('f', 157, struct lu_fid)
+#define LL_IOC_GROUP_LOCK _IOW('f', 158, long)
+#define LL_IOC_GROUP_UNLOCK _IOW('f', 159, long)
/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */
-#define LL_IOC_QUOTACHECK _IOW ('f', 160, int)
+#define LL_IOC_QUOTACHECK _IOW('f', 160, int)
/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */
-#define LL_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+#define LL_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */
#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
-#define LL_IOC_FLUSHCTX _IOW ('f', 166, long)
-#define LL_IOC_RMTACL _IOW ('f', 167, long)
-#define LL_IOC_GETOBDCOUNT _IOR ('f', 168, long)
+#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
+#define LL_IOC_RMTACL _IOW('f', 167, long)
+#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid)
#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long)
-#define LL_IOC_PATH2FID _IOR ('f', 173, long)
+#define LL_IOC_PATH2FID _IOR('f', 173, long)
#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
-#define LL_IOC_GET_MDTIDX _IOR ('f', 175, int)
+#define LL_IOC_GET_MDTIDX _IOR('f', 175, int)
/* see <lustre_lib.h> for ioctl numbers 177-210 */
@@ -676,7 +676,12 @@ static inline const char *changelog_type2str(int type)
#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
/* HSM cleaning needed */
/* Flags for rename */
-#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of target */
+#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of
+ * target
+ */
+#define CLF_RENAME_LAST_EXISTS 0x0002 /* rename unlink last hardlink of target
+ * has an archive in backend
+ */
/* Flags for HSM */
/* 12b used (from high weight to low weight):
@@ -833,9 +838,8 @@ struct ioc_data_version {
__u64 idv_flags; /* See LL_DV_xxx */
};
-#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling
- * version. Dirty caches are left unchanged.
- */
+#define LL_DV_RD_FLUSH BIT(0) /* Flush dirty pages from clients */
+#define LL_DV_WR_FLUSH BIT(1) /* Flush all caching pages from clients */
#ifndef offsetof
# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
@@ -1095,12 +1099,12 @@ struct hsm_action_list {
__u32 padding1;
char hal_fsname[0]; /* null-terminated */
/* struct hsm_action_item[hal_count] follows, aligned on 8-byte
- * boundaries. See hai_zero
+ * boundaries. See hai_first
*/
} __packed;
#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round (int val)
+static inline int cfs_size_round(int val)
{
return (val + 7) & (~0x7);
}
@@ -1109,7 +1113,7 @@ static inline int cfs_size_round (int val)
#endif
/* Return pointer to first hai in action list */
-static inline struct hsm_action_item *hai_zero(struct hsm_action_list *hal)
+static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal)
{
return (struct hsm_action_item *)(hal->hal_fsname +
cfs_size_round(strlen(hal-> \
@@ -1131,7 +1135,7 @@ static inline int hal_size(struct hsm_action_list *hal)
struct hsm_action_item *hai;
sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
- hai = hai_zero(hal);
+ hai = hai_first(hal);
for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
sz += cfs_size_round(hai->hai_len);
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index bb16ae980..e229e91f7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -161,7 +161,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
int offset;
int bufcount;
- LASSERT (index >= 0);
+ LASSERT(index >= 0);
bufcount = lcfg->lcfg_bufcount;
if (index >= bufcount)
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 95fd36063..b36821ffb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -130,7 +130,6 @@ struct lustre_sb_info {
struct lustre_mount_data *lsi_lmd; /* mount command info */
struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/
- struct vfsmount *lsi_srv_mnt; /* the one server mount */
atomic_t lsi_mounts; /* references to the srv_mnt */
char lsi_svname[MTI_NAME_MAXLEN];
char lsi_osd_obdname[64];
@@ -158,7 +157,6 @@ struct lustre_sb_info {
struct lustre_mount_info {
char *lmi_name;
struct super_block *lmi_sb;
- struct vfsmount *lmi_mnt;
struct list_head lmi_list_chain;
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 8b0364f71..9cade144f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -71,6 +71,7 @@ struct obd_device;
*/
enum ldlm_error {
ELDLM_OK = 0,
+ ELDLM_LOCK_MATCHED = 1,
ELDLM_LOCK_CHANGED = 300,
ELDLM_LOCK_ABORTED = 301,
@@ -269,7 +270,7 @@ struct ldlm_pool {
struct completion pl_kobj_unregister;
};
-typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
+typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
/**
* LVB operations.
@@ -446,8 +447,11 @@ struct ldlm_namespace {
/** Limit of parallel AST RPC count. */
unsigned ns_max_parallel_ast;
- /** Callback to cancel locks before replaying it during recovery. */
- ldlm_cancel_for_recovery ns_cancel_for_recovery;
+ /**
+ * Callback to check if a lock is good to be canceled by ELC or
+ * during recovery.
+ */
+ ldlm_cancel_cbt ns_cancel;
/** LDLM lock stats */
struct lprocfs_stats *ns_stats;
@@ -479,9 +483,9 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
}
static inline void ns_register_cancel(struct ldlm_namespace *ns,
- ldlm_cancel_for_recovery arg)
+ ldlm_cancel_cbt arg)
{
- ns->ns_cancel_for_recovery = arg;
+ ns->ns_cancel = arg;
}
struct ldlm_lock;
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 7f2ba2ffe..e7e0c21a9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -37,17 +37,11 @@
/** l_flags bits marked as "gone" bits */
#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
-/** l_flags bits marked as "hide_lock" bits */
-#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
-
/** l_flags bits marked as "inherit" bits */
#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
-/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
-
-/** l_flags bits marked as "on_wire" bits */
-#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
+/** l_flags bits marked as "off_wire" bits */
+#define LDLM_FL_OFF_WIRE_MASK 0x00FFFFFF00000000ULL
/** extent, mode, or resource changed */
#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */
@@ -204,7 +198,7 @@
#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36)
#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
-/** whatever it might mean */
+/** whatever it might mean -- never transmitted? */
#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */
#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37)
#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37)
@@ -287,18 +281,18 @@
* has canceled this lock and is waiting for rpc_lock which is taken by
* the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
* the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
- * dropped to let ldlm_callback_handler() return EINVAL to the server. It
- * is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC.
*/
#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */
#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46)
#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46)
#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
-/** whatever it might mean */
+/**
+ * Set by ldlm_cancel_callback() when lock cache is dropped to let
+ * ldlm_callback_handler() return EINVAL to the server. It is used when
+ * ELC RPC is already prepared and is waiting for rpc_lock, too late to
+ * send a separate CANCEL RPC.
+ */
#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */
#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47)
#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47)
@@ -381,104 +375,16 @@
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+/** multi-bit test: are any of mask bits set? */
+#define LDLM_HAVE_MASK(_l, _m) ((_l)->l_flags & LDLM_FL_##_m##_MASK)
+
/** set a ldlm_lock flag bit */
#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
/** clear a ldlm_lock flag bit */
#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
-
/** @} subgroup */
/** @} group */
-#ifdef WIRESHARK_COMPILE
-static int hf_lustre_ldlm_fl_lock_changed = -1;
-static int hf_lustre_ldlm_fl_block_granted = -1;
-static int hf_lustre_ldlm_fl_block_conv = -1;
-static int hf_lustre_ldlm_fl_block_wait = -1;
-static int hf_lustre_ldlm_fl_ast_sent = -1;
-static int hf_lustre_ldlm_fl_replay = -1;
-static int hf_lustre_ldlm_fl_intent_only = -1;
-static int hf_lustre_ldlm_fl_has_intent = -1;
-static int hf_lustre_ldlm_fl_flock_deadlock = -1;
-static int hf_lustre_ldlm_fl_discard_data = -1;
-static int hf_lustre_ldlm_fl_no_timeout = -1;
-static int hf_lustre_ldlm_fl_block_nowait = -1;
-static int hf_lustre_ldlm_fl_test_lock = -1;
-static int hf_lustre_ldlm_fl_cancel_on_block = -1;
-static int hf_lustre_ldlm_fl_deny_on_contention = -1;
-static int hf_lustre_ldlm_fl_ast_discard_data = -1;
-static int hf_lustre_ldlm_fl_fail_loc = -1;
-static int hf_lustre_ldlm_fl_skipped = -1;
-static int hf_lustre_ldlm_fl_cbpending = -1;
-static int hf_lustre_ldlm_fl_wait_noreproc = -1;
-static int hf_lustre_ldlm_fl_cancel = -1;
-static int hf_lustre_ldlm_fl_local_only = -1;
-static int hf_lustre_ldlm_fl_failed = -1;
-static int hf_lustre_ldlm_fl_canceling = -1;
-static int hf_lustre_ldlm_fl_local = -1;
-static int hf_lustre_ldlm_fl_lvb_ready = -1;
-static int hf_lustre_ldlm_fl_kms_ignore = -1;
-static int hf_lustre_ldlm_fl_cp_reqd = -1;
-static int hf_lustre_ldlm_fl_cleaned = -1;
-static int hf_lustre_ldlm_fl_atomic_cb = -1;
-static int hf_lustre_ldlm_fl_bl_ast = -1;
-static int hf_lustre_ldlm_fl_bl_done = -1;
-static int hf_lustre_ldlm_fl_no_lru = -1;
-static int hf_lustre_ldlm_fl_fail_notified = -1;
-static int hf_lustre_ldlm_fl_destroyed = -1;
-static int hf_lustre_ldlm_fl_server_lock = -1;
-static int hf_lustre_ldlm_fl_res_locked = -1;
-static int hf_lustre_ldlm_fl_waited = -1;
-static int hf_lustre_ldlm_fl_ns_srv = -1;
-static int hf_lustre_ldlm_fl_excl = -1;
-
-const value_string lustre_ldlm_flags_vals[] = {
- {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
- {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
- {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
- {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
- {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
- {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
- {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
- {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
- {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
- {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
- {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
- {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
- {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
- {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
- {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
- {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
- {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
- {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
- {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
- {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
- {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
- {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
- {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
- {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
- {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
- {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
- {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
- {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
- {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
- {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
- {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
- {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
- {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
- {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
- {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
- {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
- {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
- {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
- {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
- {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
- { 0, NULL }
-};
-#endif /* WIRESHARK_COMPILE */
+
#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index ab4a92390..12e8b585c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -308,10 +308,10 @@ static inline int fid_seq_in_fldb(__u64 seq)
fid_seq_is_root(seq) || fid_seq_is_dot(seq);
}
-static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq)
+static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
{
if (fid_seq_is_mdt0(seq)) {
- fid->f_seq = fid_idif_seq(0, 0);
+ fid->f_seq = fid_idif_seq(0, ost_idx);
} else {
LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
fid_seq_is_idif(seq), "%#llx\n", seq);
@@ -498,19 +498,6 @@ static inline void ostid_build_res_name(struct ost_id *oi,
}
}
-static inline void ostid_res_name_to_id(struct ost_id *oi,
- struct ldlm_res_id *name)
-{
- if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_SEQ_OFF])) {
- /* old resid */
- ostid_set_seq(oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
- ostid_set_id(oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- } else {
- /* new resid */
- fid_extract_from_res_name(&oi->oi_fid, name);
- }
-}
-
/**
* Return true if the resource is for the object identified by this id & group.
*/
@@ -546,7 +533,8 @@ static inline void ost_fid_build_resid(const struct lu_fid *fid,
}
static inline void ost_fid_from_resid(struct lu_fid *fid,
- const struct ldlm_res_id *name)
+ const struct ldlm_res_id *name,
+ int ost_idx)
{
if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
/* old resid */
@@ -554,7 +542,7 @@ static inline void ost_fid_from_resid(struct lu_fid *fid,
ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- ostid_to_fid(fid, &oi, 0);
+ ostid_to_fid(fid, &oi, ost_idx);
} else {
/* new resid */
fid_extract_from_res_name(fid, name);
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index dac2d84d8..8325c82b3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -109,7 +109,7 @@ static inline char *ptlrpc_import_state_name(enum lustre_imp_state state)
"RECOVER", "FULL", "EVICTED",
};
- LASSERT (state <= LUSTRE_IMP_EVICTED);
+ LASSERT(state <= LUSTRE_IMP_EVICTED);
return import_state_names[state];
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index f2223d558..00b976766 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -280,16 +280,16 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_DATA_TYPE long
#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DESTROY _IOW ('f', 104, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SETATTR _IOW ('f', 107, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE)
#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE)
#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
@@ -308,13 +308,13 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PING_TARGET _IOW ('f', 136, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE)
#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139)
-#define OBD_IOC_NO_TRANSNO _IOW ('f', 140, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SET_READONLY _IOW ('f', 141, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE)
#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
@@ -324,27 +324,27 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CHANGELOG_SEND _IOW ('f', 148, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE)
#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE)
/* see also <lustre/lustre_user.h> for ioctls 151-153 */
/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */
-#define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_SETSTRIPE _IOW('f', 154, OBD_IOC_DATA_TYPE)
/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */
-#define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_GETSTRIPE _IOW('f', 155, OBD_IOC_DATA_TYPE)
/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */
-#define OBD_IOC_LOV_SETEA _IOW ('f', 156, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_SETEA _IOW('f', 156, OBD_IOC_DATA_TYPE)
/* see <lustre/lustre_user.h> for ioctls 157-159 */
/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */
-#define OBD_IOC_QUOTACHECK _IOW ('f', 160, int)
+#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */
-#define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */
#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
/* see also <lustre/lustre_user.h> for ioctls 163-176 */
-#define OBD_IOC_CHANGELOG_REG _IOW ('f', 177, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_DEREG _IOW ('f', 178, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_CLEAR _IOW ('f', 179, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data)
#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
@@ -352,7 +352,7 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARAM _IOW ('f', 187, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE)
#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
@@ -522,6 +522,28 @@ struct l_wait_info {
sigmask(SIGTERM) | sigmask(SIGQUIT) | \
sigmask(SIGALRM))
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link) \
+{ \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&((waitq)->lock), flags); \
+ __add_wait_queue_exclusive(waitq, link); \
+ spin_unlock_irqrestore(&((waitq)->lock), flags); \
+}
+
/*
* wait for @condition to become true, but no longer than timeout, specified
* by @info.
@@ -578,7 +600,7 @@ do { \
\
if (condition) \
break; \
- if (cfs_signal_pending()) { \
+ if (signal_pending(current)) { \
if (info->lwi_on_signal && \
(__timeout == 0 || __allow_intr)) { \
if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index af77eb359..f267ff8a6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -64,9 +64,27 @@ struct obd_export;
struct ptlrpc_request;
struct obd_device;
+/**
+ * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
+ *
+ * This mutex is used to implement execute-once semantics on the MDT.
+ * The MDT stores the last transaction ID and result for every client in
+ * its last_rcvd file. If the client doesn't get a reply, it can safely
+ * resend the request and the MDT will reconstruct the reply being aware
+ * that the request has already been executed. Without this lock,
+ * execution status of concurrent in-flight requests would be
+ * overwritten.
+ *
+ * This design limits the extent to which we can keep a full pipeline of
+ * in-flight requests from a single client. This limitation could be
+ * overcome by allowing multiple slots per client in the last_rcvd file.
+ */
struct mdc_rpc_lock {
+ /** Lock protecting in-flight RPC concurrency. */
struct mutex rpcl_mutex;
+ /** Intent associated with currently executing request. */
struct lookup_intent *rpcl_it;
+ /** Used for MDS/RPC load testing purposes. */
int rpcl_fakes;
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 69586a522..a7973d5de 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1327,7 +1327,9 @@ struct ptlrpc_request {
/* allow the req to be sent if the import is in recovery
* status
*/
- rq_allow_replay:1;
+ rq_allow_replay:1,
+ /* bulk request, sent to server, but uncommitted */
+ rq_unstable:1;
unsigned int rq_nr_resend;
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
index 383fe6feb..a42cf90c1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -89,6 +89,7 @@ int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */
#define PARAM_OST "ost."
+#define PARAM_OSD "osd."
#define PARAM_OSC "osc."
#define PARAM_MDT "mdt."
#define PARAM_MDD "mdd."
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index b2e67fcf9..0aac4391e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -137,6 +137,7 @@ extern struct req_format RQF_MGS_CONFIG_READ;
/* fid/fld req_format */
extern struct req_format RQF_SEQ_QUERY;
extern struct req_format RQF_FLD_QUERY;
+extern struct req_format RQF_FLD_READ;
/* MDS req_format */
extern struct req_format RQF_MDS_CONNECT;
extern struct req_format RQF_MDS_DISCONNECT;
@@ -199,7 +200,7 @@ extern struct req_format RQF_OST_BRW_READ;
extern struct req_format RQF_OST_BRW_WRITE;
extern struct req_format RQF_OST_STATFS;
extern struct req_format RQF_OST_SET_GRANT_INFO;
-extern struct req_format RQF_OST_GET_INFO_GENERIC;
+extern struct req_format RQF_OST_GET_INFO;
extern struct req_format RQF_OST_GET_INFO_LAST_ID;
extern struct req_format RQF_OST_GET_INFO_LAST_FID;
extern struct req_format RQF_OST_SET_INFO_LAST_FID;
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4264d9765..2d926e0ee 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -37,7 +37,7 @@
#ifndef __OBD_H
#define __OBD_H
-#include "linux/obd.h"
+#include <linux/spinlock.h>
#define IOC_OSC_TYPE 'h'
#define IOC_OSC_MIN_NR 20
@@ -54,6 +54,7 @@
#include "lustre_export.h"
#include "lustre_fid.h"
#include "lustre_fld.h"
+#include "lustre_intent.h"
#define MAX_OBD_DEVICES 8192
@@ -165,9 +166,6 @@ struct obd_info {
obd_enqueue_update_f oi_cb_up;
};
-void lov_stripe_lock(struct lov_stripe_md *md);
-void lov_stripe_unlock(struct lov_stripe_md *md);
-
struct obd_type {
struct list_head typ_chain;
struct obd_ops *typ_dt_ops;
@@ -293,14 +291,10 @@ struct client_obd {
* blocking everywhere, but we don't want to slow down fast-path of
* our main platform.)
*
- * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
- * with client_obd_list_{un,}lock() and
- * client_obd_list_lock_{init,done}() functions.
- *
* NB by Jinshan: though field names are still _loi_, but actually
* osc_object{}s are in the list.
*/
- struct client_obd_lock cl_loi_list_lock;
+ spinlock_t cl_loi_list_lock;
struct list_head cl_loi_ready_list;
struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list;
@@ -327,7 +321,8 @@ struct client_obd {
atomic_t cl_lru_shrinkers;
atomic_t cl_lru_in_list;
struct list_head cl_lru_list; /* lru page list */
- struct client_obd_lock cl_lru_list_lock; /* page list protector */
+ spinlock_t cl_lru_list_lock; /* page list protector */
+ atomic_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
atomic_t cl_destroy_in_flight;
@@ -364,6 +359,7 @@ struct client_obd {
/* ptlrpc work for writeback in ptlrpcd context */
void *cl_writeback_work;
+ void *cl_lru_work;
/* hash tables for osc_quota_info */
struct cfs_hash *cl_quota_hash[MAXQUOTAS];
};
@@ -391,45 +387,9 @@ struct ost_pool {
struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
};
-/* Round-robin allocator data */
-struct lov_qos_rr {
- __u32 lqr_start_idx; /* start index of new inode */
- __u32 lqr_offset_idx; /* aliasing for start_idx */
- int lqr_start_count; /* reseed counter */
- struct ost_pool lqr_pool; /* round-robin optimized list */
- unsigned long lqr_dirty:1; /* recalc round-robin list */
-};
-
/* allow statfs data caching for 1 second */
#define OBD_STATFS_CACHE_SECONDS 1
-struct lov_statfs_data {
- struct obd_info lsd_oi;
- struct obd_statfs lsd_statfs;
-};
-
-/* Stripe placement optimization */
-struct lov_qos {
- struct list_head lq_oss_list; /* list of OSSs that targets use */
- struct rw_semaphore lq_rw_sem;
- __u32 lq_active_oss_count;
- unsigned int lq_prio_free; /* priority for free space */
- unsigned int lq_threshold_rr;/* priority for rr */
- struct lov_qos_rr lq_rr; /* round robin qos data */
- unsigned long lq_dirty:1, /* recalc qos data */
- lq_same_space:1,/* the ost's all have approx.
- * the same space avail
- */
- lq_reset:1, /* zero current penalties */
- lq_statfs_in_progress:1; /* statfs op in
- progress */
- /* qos statfs data */
- struct lov_statfs_data *lq_statfs_data;
- wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
- * requests completion
- */
-};
-
struct lov_tgt_desc {
struct list_head ltd_kill;
struct obd_uuid ltd_uuid;
@@ -442,25 +402,6 @@ struct lov_tgt_desc {
ltd_reap:1; /* should this target be deleted */
};
-/* Pool metadata */
-#define pool_tgt_size(_p) _p->pool_obds.op_size
-#define pool_tgt_count(_p) _p->pool_obds.op_count
-#define pool_tgt_array(_p) _p->pool_obds.op_array
-#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
-
-struct pool_desc {
- char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
- struct ost_pool pool_obds; /* pool members */
- atomic_t pool_refcount; /* pool ref. counter */
- struct lov_qos_rr pool_rr; /* round robin qos */
- struct hlist_node pool_hash; /* access by poolname */
- struct list_head pool_list; /* serial access */
- struct dentry *pool_debugfs_entry; /* file in debugfs */
- struct obd_device *pool_lobd; /* obd of the lov/lod to which
- * this pool belongs
- */
-};
-
struct lov_obd {
struct lov_desc desc;
struct lov_tgt_desc **lov_tgts; /* sparse array */
@@ -468,8 +409,6 @@ struct lov_obd {
struct mutex lov_lock;
struct obd_connect_data lov_ocd;
atomic_t lov_refcount;
- __u32 lov_tgt_count; /* how many OBD's */
- __u32 lov_active_tgt_count; /* how many active */
__u32 lov_death_row;/* tgts scheduled to be deleted */
__u32 lov_tgt_size; /* size of tgts array */
int lov_connects;
@@ -479,7 +418,7 @@ struct lov_obd {
struct dentry *lov_pool_debugfs_entry;
enum lustre_sec_part lov_sp_me;
- /* Cached LRU pages from upper layer */
+ /* Cached LRU and unstable data from upper layer */
void *lov_cache;
struct rw_semaphore lov_notify_lock;
@@ -511,7 +450,7 @@ struct lmv_obd {
struct obd_uuid cluuid;
struct obd_export *exp;
- struct mutex init_mutex;
+ struct mutex lmv_init_mutex;
int connected;
int max_easize;
int max_def_easize;
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index 637fa2211..f6c18df90 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -35,6 +35,7 @@
#ifndef __OBD_CKSUM
#define __OBD_CKSUM
#include "../../include/linux/libcfs/libcfs.h"
+#include "../../include/linux/libcfs/libcfs_crypto.h"
#include "lustre/lustre_idl.h"
static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type)
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 706869f8c..32863bcb3 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -477,7 +477,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
struct lu_context session_ctx;
struct lu_env env;
- lu_context_init(&session_ctx, LCT_SESSION);
+ lu_context_init(&session_ctx, LCT_SESSION | LCT_SERVER_SESSION);
session_ctx.lc_thread = NULL;
lu_context_enter(&session_ctx);
@@ -490,8 +490,9 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
obd->obd_lu_dev = d;
d->ld_obd = obd;
rc = 0;
- } else
+ } else {
rc = PTR_ERR(d);
+ }
}
lu_context_exit(&session_ctx);
lu_context_fini(&session_ctx);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index f8ee3a325..60034d39b 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -58,6 +58,7 @@ extern int at_early_margin;
extern int at_extra;
extern unsigned int obd_sync_filter;
extern unsigned int obd_max_dirty_pages;
+extern atomic_t obd_unstable_pages;
extern atomic_t obd_dirty_pages;
extern atomic_t obd_dirty_transit_pages;
extern char obd_jobid_var[];
@@ -289,6 +290,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OST_ENOINO 0x229
#define OBD_FAIL_OST_DQACQ_NET 0x230
#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
+#define OBD_FAIL_OST_SET_INFO_NET 0x232
#define OBD_FAIL_LDLM 0x300
#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301
@@ -319,6 +321,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LDLM_AGL_DELAY 0x31a
#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
#define OBD_FAIL_LDLM_OST_LVB 0x31c
+#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d
/* LOCKLESS IO */
#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
@@ -426,6 +429,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_FLD 0x1100
#define OBD_FAIL_FLD_QUERY_NET 0x1101
+#define OBD_FAIL_FLD_READ_NET 0x1102
#define OBD_FAIL_SEC_CTX 0x1200
#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
deleted file mode 100644
index 96141d17d..000000000
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ /dev/null
@@ -1,1203 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../../include/linux/libcfs/libcfs.h"
-# include <linux/fs.h>
-# include <linux/sched.h>
-# include <linux/mm.h>
-# include <linux/quotaops.h>
-# include <linux/highmem.h>
-# include <linux/pagemap.h>
-# include <linux/rbtree.h>
-
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_ver.h"
-#include "../include/lustre_mdc.h"
-#include "../include/cl_object.h"
-
-#include "../include/lclient.h"
-
-#include "../llite/llite_internal.h"
-
-static const struct cl_req_operations ccc_req_ops;
-
-/*
- * ccc_ prefix stands for "Common Client Code".
- */
-
-static struct kmem_cache *ccc_lock_kmem;
-static struct kmem_cache *ccc_object_kmem;
-static struct kmem_cache *ccc_thread_kmem;
-static struct kmem_cache *ccc_session_kmem;
-static struct kmem_cache *ccc_req_kmem;
-
-static struct lu_kmem_descr ccc_caches[] = {
- {
- .ckd_cache = &ccc_lock_kmem,
- .ckd_name = "ccc_lock_kmem",
- .ckd_size = sizeof(struct ccc_lock)
- },
- {
- .ckd_cache = &ccc_object_kmem,
- .ckd_name = "ccc_object_kmem",
- .ckd_size = sizeof(struct ccc_object)
- },
- {
- .ckd_cache = &ccc_thread_kmem,
- .ckd_name = "ccc_thread_kmem",
- .ckd_size = sizeof(struct ccc_thread_info),
- },
- {
- .ckd_cache = &ccc_session_kmem,
- .ckd_name = "ccc_session_kmem",
- .ckd_size = sizeof(struct ccc_session)
- },
- {
- .ckd_cache = &ccc_req_kmem,
- .ckd_name = "ccc_req_kmem",
- .ckd_size = sizeof(struct ccc_req)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
-{
- struct ccc_thread_info *info;
-
- info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-void ccc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_thread_info *info = data;
-
- kmem_cache_free(ccc_thread_kmem, info);
-}
-
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct ccc_session *session;
-
- session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS);
- if (!session)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_session *session = data;
-
- kmem_cache_free(ccc_session_kmem, session);
-}
-
-struct lu_context_key ccc_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = ccc_key_init,
- .lct_fini = ccc_key_fini
-};
-
-struct lu_context_key ccc_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = ccc_session_key_init,
- .lct_fini = ccc_session_key_fini
-};
-
-/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
-/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
-
-int ccc_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct ccc_device *vdv;
- int rc;
-
- vdv = lu2ccc_dev(d);
- vdv->cdv_next = lu2cl_dev(next);
-
- LASSERT(d->ld_site && next->ld_type);
- next->ld_site = d->ld_site;
- rc = next->ld_type->ldt_ops->ldto_device_init(
- env, next, next->ld_type->ldt_name, NULL);
- if (rc == 0) {
- lu_device_get(next);
- lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
- }
- return rc;
-}
-
-struct lu_device *ccc_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
-}
-
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops)
-{
- struct ccc_device *vdv;
- struct lu_device *lud;
- struct cl_site *site;
- int rc;
-
- vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
- if (!vdv)
- return ERR_PTR(-ENOMEM);
-
- lud = &vdv->cdv_cl.cd_lu_dev;
- cl_device_init(&vdv->cdv_cl, t);
- ccc2lu_dev(vdv)->ld_ops = luops;
- vdv->cdv_cl.cd_ops = clops;
-
- site = kzalloc(sizeof(*site), GFP_NOFS);
- if (site) {
- rc = cl_site_init(site, &vdv->cdv_cl);
- if (rc == 0)
- rc = lu_site_init_finish(&site->cs_lu);
- else {
- LASSERT(!lud->ld_site);
- CERROR("Cannot init lu_site, rc %d.\n", rc);
- kfree(site);
- }
- } else
- rc = -ENOMEM;
- if (rc != 0) {
- ccc_device_free(env, lud);
- lud = ERR_PTR(rc);
- }
- return lud;
-}
-
-struct lu_device *ccc_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct ccc_device *vdv = lu2ccc_dev(d);
- struct cl_site *site = lu2cl_site(d->ld_site);
- struct lu_device *next = cl2lu_dev(vdv->cdv_next);
-
- if (d->ld_site) {
- cl_site_fini(site);
- kfree(site);
- }
- cl_device_fini(lu2cl_dev(d));
- kfree(vdv);
- return next;
-}
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct ccc_req *vrq;
- int result;
-
- vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS);
- if (vrq) {
- cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-/**
- * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
- * fails. Access to this environment is serialized by ccc_inode_fini_guard
- * mutex.
- */
-static struct lu_env *ccc_inode_fini_env;
-
-/**
- * A mutex serializing calls to slp_inode_fini() under extreme memory
- * pressure, when environments cannot be allocated.
- */
-static DEFINE_MUTEX(ccc_inode_fini_guard);
-static int dummy_refcheck;
-
-int ccc_global_init(struct lu_device_type *device_type)
-{
- int result;
-
- result = lu_kmem_init(ccc_caches);
- if (result)
- return result;
-
- result = lu_device_type_init(device_type);
- if (result)
- goto out_kmem;
-
- ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
- LCT_REMEMBER|LCT_NOREF);
- if (IS_ERR(ccc_inode_fini_env)) {
- result = PTR_ERR(ccc_inode_fini_env);
- goto out_device;
- }
-
- ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
- return 0;
-out_device:
- lu_device_type_fini(device_type);
-out_kmem:
- lu_kmem_fini(ccc_caches);
- return result;
-}
-
-void ccc_global_fini(struct lu_device_type *device_type)
-{
- if (ccc_inode_fini_env) {
- cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
- ccc_inode_fini_env = NULL;
- }
- lu_device_type_fini(device_type);
- lu_kmem_fini(ccc_caches);
-}
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops)
-{
- struct ccc_object *vob;
- struct lu_object *obj;
-
- vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS);
- if (vob) {
- struct cl_object_header *hdr;
-
- obj = ccc2lu(vob);
- hdr = &vob->cob_header;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
-
- vob->cob_cl.co_ops = clops;
- obj->lo_ops = luops;
- } else
- obj = NULL;
- return obj;
-}
-
-int ccc_object_init0(const struct lu_env *env,
- struct ccc_object *vob,
- const struct cl_object_conf *conf)
-{
- vob->cob_inode = conf->coc_inode;
- vob->cob_transient_pages = 0;
- cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
- return 0;
-}
-
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
- struct ccc_object *vob = lu2ccc(obj);
- struct lu_object *below;
- struct lu_device *under;
- int result;
-
- under = &dev->cdv_next->cd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below) {
- const struct cl_object_conf *cconf;
-
- cconf = lu2cl_conf(conf);
- INIT_LIST_HEAD(&vob->cob_pending_list);
- lu_object_add(obj, below);
- result = ccc_object_init0(env, vob, cconf);
- } else
- result = -ENOMEM;
- return result;
-}
-
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct ccc_object *vob = lu2ccc(obj);
-
- lu_object_fini(obj);
- lu_object_header_fini(obj->lo_header);
- kmem_cache_free(ccc_object_kmem, vob);
-}
-
-int ccc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused,
- const struct cl_lock_operations *lkops)
-{
- struct ccc_lock *clk;
- int result;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS);
- if (clk) {
- cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- lvb->lvb_mtime = cl_inode_mtime(inode);
- lvb->lvb_atime = cl_inode_atime(inode);
- lvb->lvb_ctime = cl_inode_ctime(inode);
- /*
- * LU-417: Add dirty pages block count lest i_blocks reports 0, some
- * "cp" or "tar" on remote node may think it's a completely sparse file
- * and skip it.
- */
- if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
- lvb->lvb_blocks = dirty_cnt(inode);
- return 0;
-}
-
-static void ccc_object_size_lock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- ll_inode_size_lock(inode);
- cl_object_attr_lock(obj);
-}
-
-static void ccc_object_size_unlock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- cl_object_attr_unlock(obj);
- ll_inode_size_unlock(inode);
-}
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2vm_page(slice);
-}
-
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
- struct cl_page *page = slice->cpl_page;
-
- int result;
-
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- io->ci_type == CIT_FAULT) {
- if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
- result = -EBUSY;
- else {
- desc->cld_start = page->cp_index;
- desc->cld_end = page->cp_index;
- desc->cld_obj = page->cp_obj;
- desc->cld_mode = CLM_READ;
- result = cl_queue_match(&io->ci_lockset.cls_done,
- desc) ? -EBUSY : 0;
- }
- } else
- result = 0;
- return result;
-}
-
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /* transient page should always be sent. */
- return 0;
-}
-
-/*****************************************************************************
- *
- * Lock operations.
- *
- */
-
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
-}
-
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
-{
- struct ccc_lock *clk = cl2ccc_lock(slice);
-
- kmem_cache_free(ccc_lock_kmem, clk);
-}
-
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-/**
- * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
- * layer. This function is executed every time io finds an existing lock in
- * the lock cache while creating new lock. This function has to decide whether
- * cached lock "fits" into io.
- *
- * \param slice lock to be checked
- * \param io IO that wants a lock.
- *
- * \see lov_lock_fits_into().
- */
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock *lock = slice->cls_lock;
- const struct cl_lock_descr *descr = &lock->cll_descr;
- const struct ccc_io *cio = ccc_env_io(env);
- int result;
-
- /*
- * Work around DLM peculiarity: it assumes that glimpse
- * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
- * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
- * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
- * doesn't enqueue CLM_WRITE sub-locks.
- */
- if (cio->cui_glimpse)
- result = descr->cld_mode != CLM_WRITE;
-
- /*
- * Also, don't match incomplete write locks for read, otherwise read
- * would enqueue missing sub-locks in the write mode.
- */
- else if (need->cld_mode != descr->cld_mode)
- result = lock->cll_state >= CLS_ENQUEUED;
- else
- result = 1;
- return result;
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
- * whenever lock state changes. Transfers object attributes, that might be
- * updated as a result of lock acquiring into inode.
- */
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct cl_lock *lock = slice->cls_lock;
-
- /*
- * Refresh inode attributes when the lock is moving into CLS_HELD
- * state, and only when this is a result of real enqueue, rather than
- * of finding lock in the cache.
- */
- if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
- struct cl_object *obj;
- struct inode *inode;
-
- obj = slice->cls_obj;
- inode = ccc_object_inode(obj);
-
- /* vmtruncate() sets the i_size
- * under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order.
- */
- if (lock->cll_descr.cld_start == 0 &&
- lock->cll_descr.cld_end == CL_PAGE_EOF)
- cl_merge_lvb(env, inode);
- }
-}
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
- struct cl_object *obj = io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
-
- memset(&cio->cui_link, 0, sizeof(cio->cui_link));
-
- if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- descr->cld_mode = CLM_GROUP;
- descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
- } else {
- descr->cld_mode = mode;
- }
- descr->cld_obj = obj;
- descr->cld_start = start;
- descr->cld_end = end;
- descr->cld_enq_flags = enqflags;
-
- cl_io_lock_add(env, io, &cio->cui_link);
- return 0;
-}
-
-void ccc_io_update_iov(const struct lu_env *env,
- struct ccc_io *cio, struct cl_io *io)
-{
- size_t size = io->u.ci_rw.crw_count;
-
- if (!cl_is_normalio(env, io) || !cio->cui_iter)
- return;
-
- iov_iter_truncate(cio->cui_iter, size);
-}
-
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end)
-{
- struct cl_object *obj = io->ci_obj;
-
- return ccc_io_one_lock_index(env, io, enqflags, mode,
- cl_index(obj, start), cl_index(obj, end));
-}
-
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- CLOBINVRNT(env, ios->cis_io->ci_obj,
- ccc_object_invariant(ios->cis_io->ci_obj));
-}
-
-void ccc_io_advance(const struct lu_env *env,
- const struct cl_io_slice *ios,
- size_t nob)
-{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = ios->cis_io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- if (!cl_is_normalio(env, io))
- return;
-
- iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
-}
-
-/**
- * Helper function that if necessary adjusts file size (inode->i_size), when
- * position at the offset \a pos is accessed. File size can be arbitrary stale
- * on a Lustre client, but client at least knows KMS. If accessed area is
- * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
- *
- * Locking: cl_isize_lock is used to serialize changes to inode size and to
- * protect consistency between inode size and cl_object
- * attributes. cl_object_size_lock() protects consistency between cl_attr's of
- * top-object and sub-objects.
- */
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct inode *inode = ccc_object_inode(obj);
- loff_t pos = start + count - 1;
- loff_t kms;
- int result;
-
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being accessed and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock already acquired by
- * the caller, because to change the class, other client has to take
- * DLM lock conflicting with our lock. Also, any updates to ->i_size
- * by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- ccc_object_size_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- kms = attr->cat_kms;
- if (pos > kms) {
- /*
- * A glimpse is necessary to determine whether we
- * return a short read (B) or some zeroes at the end
- * of the buffer (C)
- */
- ccc_object_size_unlock(obj);
- result = cl_glimpse_lock(env, io, inode, obj, 0);
- if (result == 0 && exceed) {
- /* If objective page index exceed end-of-file
- * page index, return directly. Do not expect
- * kernel will check such case correctly.
- * linux-2.6.18-128.1.1 miss to do that.
- * --bug 17336
- */
- loff_t size = cl_isize_read(inode);
- loff_t cur_index = start >> PAGE_SHIFT;
- loff_t size_index = (size - 1) >>
- PAGE_SHIFT;
-
- if ((size == 0 && cur_index != 0) ||
- size_index < cur_index)
- *exceed = 1;
- }
- return result;
- }
- /*
- * region is within kms and, hence, within real file
- * size (A). We need to increase i_size to cover the
- * read region so that generic_file_read() will do its
- * job, but that doesn't mean the kms size is
- * _correct_, it is only the _minimum_ size. If
- * someone does a stat they will get the correct size
- * which will always be >= the kms value here.
- * b=11081
- */
- if (cl_isize_read(inode) < kms) {
- cl_isize_write_nolock(inode, kms);
- CDEBUG(D_VFSTRACE,
- DFID" updating i_size %llu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (__u64)cl_isize_read(inode));
-
- }
- }
- ccc_object_size_unlock(obj);
- return result;
-}
-
-/*****************************************************************************
- *
- * Transfer operations.
- *
- */
-
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct ccc_req *vrq;
-
- if (ioret > 0)
- cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
- vrq = cl2ccc_req(slice);
- kmem_cache_free(ccc_req_kmem, vrq);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for ccc
- * layer. ccc is responsible for
- *
- * - o_[mac]time
- *
- * - o_mode
- *
- * - o_parent_seq
- *
- * - o_[ug]id
- *
- * - o_parent_oid
- *
- * - o_parent_ver
- *
- * - o_ioepoch,
- *
- */
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct inode *inode;
- struct obdo *oa;
- u32 valid_flags;
-
- oa = attr->cra_oa;
- inode = ccc_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE;
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
- obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
- memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
- JOBSTATS_JOBID_SIZE);
-}
-
-static const struct cl_req_operations ccc_req_ops = {
- .cro_attr_set = ccc_req_attr_set,
- .cro_completion = ccc_req_completion
-};
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
-{
- struct lu_env *env;
- struct cl_io *io;
- int result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
-
- io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
- io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
- io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
- io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
- io->u.ci_setattr.sa_valid = attr->ia_valid;
-
-again:
- if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
- struct ccc_io *cio = ccc_env_io(env);
-
- if (attr->ia_valid & ATTR_FILE)
- /* populate the file descriptor for ftruncate to honor
- * group lock - see LU-787
- */
- cio->cui_fd = cl_iattr2fd(inode, attr);
-
- result = cl_io_loop(env, io);
- } else {
- result = io->ci_result;
- }
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
- /* HSM import case: file is released, cannot be restored
- * no need to fail except if restore registration failed
- * with -ENODATA
- */
- if (result == -ENODATA && io->ci_restore_needed &&
- io->ci_result != -ENODATA)
- result = 0;
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
-{
- return &vdv->cdv_cl.cd_lu_dev;
-}
-
-struct ccc_device *lu2ccc_dev(const struct lu_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
-}
-
-struct ccc_device *cl2ccc_dev(const struct cl_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl);
-}
-
-struct lu_object *ccc2lu(struct ccc_object *vob)
-{
- return &vob->cob_cl.co_lu;
-}
-
-struct ccc_object *lu2ccc(const struct lu_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl.co_lu);
-}
-
-struct ccc_object *cl2ccc(const struct cl_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl);
-}
-
-struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
-{
- return container_of(slice, struct ccc_lock, clk_cl);
-}
-
-struct ccc_io *cl2ccc_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct ccc_io *cio;
-
- cio = container_of(slice, struct ccc_io, cui_cl);
- LASSERT(cio == ccc_env_io(env));
- return cio;
-}
-
-struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct ccc_req, crq_cl);
-}
-
-struct page *cl2vm_page(const struct cl_page_slice *slice)
-{
- return cl2ccc_page(slice)->cpg_page;
-}
-
-/*****************************************************************************
- *
- * Accessors.
- *
- */
-int ccc_object_invariant(const struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
- struct cl_inode_info *lli = cl_i2info(inode);
-
- return (S_ISREG(cl_inode_mode(inode)) ||
- /* i_mode of unlinked inode is zeroed. */
- cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
-}
-
-struct inode *ccc_object_inode(const struct cl_object *obj)
-{
- return cl2ccc(obj)->cob_inode;
-}
-
-/**
- * Initialize or update CLIO structures for regular files when new
- * meta-data arrives from the server.
- *
- * \param inode regular file inode
- * \param md new file metadata from MDS
- * - allocates cl_object if necessary,
- * - updated layout, if object was already here.
- */
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
-{
- struct lu_env *env;
- struct cl_inode_info *lli;
- struct cl_object *clob;
- struct lu_site *site;
- struct lu_fid *fid;
- struct cl_object_conf conf = {
- .coc_inode = inode,
- .u = {
- .coc_md = md
- }
- };
- int result = 0;
- int refcheck;
-
- LASSERT(md->body->valid & OBD_MD_FLID);
- LASSERT(S_ISREG(cl_inode_mode(inode)));
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- site = cl_i2sbi(inode)->ll_site;
- lli = cl_i2info(inode);
- fid = &lli->lli_fid;
- LASSERT(fid_is_sane(fid));
-
- if (!lli->lli_clob) {
- /* clob is slave of inode, empty lli_clob means for new inode,
- * there is no clob in cache with the given fid, so it is
- * unnecessary to perform lookup-alloc-lookup-insert, just
- * alloc and insert directly.
- */
- LASSERT(inode->i_state & I_NEW);
- conf.coc_lu.loc_flags = LOC_F_NEW;
- clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
- fid, &conf);
- if (!IS_ERR(clob)) {
- /*
- * No locking is necessary, as new inode is
- * locked by I_NEW bit.
- */
- lli->lli_clob = clob;
- lli->lli_has_smd = lsm_has_objects(md->lsm);
- lu_object_ref_add(&clob->co_lu, "inode", inode);
- } else
- result = PTR_ERR(clob);
- } else {
- result = cl_conf_set(env, lli->lli_clob, &conf);
- }
-
- cl_env_put(env, &refcheck);
-
- if (result != 0)
- CERROR("Failure to initialize cl object "DFID": %d\n",
- PFID(fid), result);
- return result;
-}
-
-/**
- * Wait for others drop their references of the object at first, then we drop
- * the last one, which will lead to the object be destroyed immediately.
- * Must be called after cl_object_kill() against this object.
- *
- * The reason we want to do this is: destroying top object will wait for sub
- * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
- * to initiate top object destroying which may deadlock. See bz22520.
- */
-static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
-{
- struct lu_object_header *header = obj->co_lu.lo_header;
- wait_queue_t waiter;
-
- if (unlikely(atomic_read(&header->loh_ref) != 1)) {
- struct lu_site *site = obj->co_lu.lo_dev->ld_site;
- struct lu_site_bkt_data *bkt;
-
- bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
-
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&header->loh_ref) == 1)
- break;
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
- }
-
- cl_object_put(env, obj);
-}
-
-void cl_inode_fini(struct inode *inode)
-{
- struct lu_env *env;
- struct cl_inode_info *lli = cl_i2info(inode);
- struct cl_object *clob = lli->lli_clob;
- int refcheck;
- int emergency;
-
- if (clob) {
- void *cookie;
-
- cookie = cl_env_reenter();
- env = cl_env_get(&refcheck);
- emergency = IS_ERR(env);
- if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
- LASSERT(ccc_inode_fini_env);
- cl_env_implant(ccc_inode_fini_env, &refcheck);
- env = ccc_inode_fini_env;
- }
- /*
- * cl_object cache is a slave to inode cache (which, in turn
- * is a slave to dentry cache), don't keep cl_object in memory
- * when its master is evicted.
- */
- cl_object_kill(env, clob);
- lu_object_ref_del(&clob->co_lu, "inode", inode);
- cl_object_put_last(env, clob);
- lli->lli_clob = NULL;
- if (emergency) {
- cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
- } else
- cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
- }
-}
-
-/**
- * return IF_* type for given lu_dirent entry.
- * IF_* flag shld be converted to particular OS file type in
- * platform llite module.
- */
-__u16 ll_dirent_type_get(struct lu_dirent *ent)
-{
- __u16 type = 0;
- struct luda_type *lt;
- int len = 0;
-
- if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
-
- len = le16_to_cpu(ent->lde_namelen);
- len = (len + align) & ~align;
- lt = (void *)ent->lde_name + len;
- type = IFTODT(le16_to_cpu(lt->lt_type));
- }
- return type;
-}
-
-/**
- * build inode number from passed @fid
- */
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
-{
- if (BITS_PER_LONG == 32 || api32)
- return fid_flatten32(fid);
- else
- return fid_flatten(fid);
-}
-
-/**
- * build inode generation from passed @fid. If our FID overflows the 32-bit
- * inode number then return a non-zero generation to distinguish them.
- */
-__u32 cl_fid_build_gen(const struct lu_fid *fid)
-{
- __u32 gen;
-
- if (fid_is_igif(fid)) {
- gen = lu_igif_gen(fid);
- return gen;
- }
-
- gen = fid_flatten(fid) >> 32;
- return gen;
-}
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT.
- */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
- return lov_lsm_get(cl_i2info(inode)->lli_clob);
-}
-
-inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
- lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index e5d1344e8..621323f6e 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -54,7 +54,7 @@ struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
lock_res(lock->l_resource);
- lock->l_flags |= LDLM_FL_RES_LOCKED;
+ ldlm_set_res_locked(lock);
return lock->l_resource;
}
EXPORT_SYMBOL(lock_res_and_lock);
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- lock->l_flags &= ~LDLM_FL_RES_LOCKED;
+ ldlm_clear_res_locked(lock);
unlock_res(lock->l_resource);
spin_unlock(&lock->l_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index a803e200f..cf1f17836 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -75,12 +75,12 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
* just after we finish and take our lock into account in its
* calculation of the kms
*/
- lock->l_flags |= LDLM_FL_KMS_IGNORE;
+ ldlm_set_kms_ignore(lock);
list_for_each(tmp, &res->lr_granted) {
lck = list_entry(tmp, struct ldlm_lock, l_res_link);
- if (lck->l_flags & LDLM_FL_KMS_IGNORE)
+ if (ldlm_is_kms_ignore(lck))
continue;
if (lck->l_policy_data.l_extent.end >= old_kms)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index b88b78606..349bfcc9b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -101,8 +101,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
+ if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
@@ -436,7 +435,7 @@ ldlm_flock_interrupted_wait(void *data)
lock_res_and_lock(lock);
/* client side - set flag to prevent lock from being put on LRU list */
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
}
@@ -520,30 +519,29 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (lock->l_flags & LDLM_FL_DESTROYED) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- return 0;
- }
-
- if (lock->l_flags & LDLM_FL_FAILED) {
+ if (ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
return -EIO;
}
- if (rc) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
LDLM_DEBUG(lock, "client-side enqueue granted");
lock_res_and_lock(lock);
+ /*
+ * Protect against race where lock could have been just destroyed
+ * due to overlap in ldlm_process_flock_lock().
+ */
+ if (ldlm_is_destroyed(lock)) {
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
+ return 0;
+ }
+
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
- if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+ if (ldlm_is_flock_deadlock(lock)) {
LDLM_DEBUG(lock, "client-side enqueue deadlock received");
rc = -EDEADLK;
} else if (flags & LDLM_FL_TEST_LOCK) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index e21373e73..32f227f37 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -95,9 +95,10 @@ enum {
LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
- LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs)
- */
+ LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither
+ * sending nor waiting for any rpcs)
+ */
+ LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -145,7 +146,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
enum ldlm_desc_ast_t ast_type);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
+#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
@@ -216,8 +218,6 @@ enum ldlm_policy_res {
LDLM_POLICY_SKIP_LOCK
};
-typedef enum ldlm_policy_res ldlm_policy_res_t;
-
#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
@@ -305,9 +305,10 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
int ret = 0;
lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
+ if ((lock->l_req_mode == lock->l_granted_mode) &&
+ !ldlm_is_cp_reqd(lock))
+ ret = 1;
+ else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
unlock_res_and_lock(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 7dd7df59a..b4ffbe2fc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -314,7 +314,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_write_list);
INIT_LIST_HEAD(&cli->cl_loi_read_list);
- client_obd_list_lock_init(&cli->cl_loi_list_lock);
+ spin_lock_init(&cli->cl_loi_list_lock);
atomic_set(&cli->cl_pending_w_pages, 0);
atomic_set(&cli->cl_pending_r_pages, 0);
cli->cl_r_in_flight = 0;
@@ -333,7 +333,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
atomic_set(&cli->cl_lru_busy, 0);
atomic_set(&cli->cl_lru_in_list, 0);
INIT_LIST_HEAD(&cli->cl_lru_list);
- client_obd_list_lock_init(&cli->cl_lru_list_lock);
+ spin_lock_init(&cli->cl_lru_list_lock);
+ atomic_set(&cli->cl_unstable_count, 0);
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
@@ -355,6 +356,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
LNET_MTU >> PAGE_SHIFT);
+ /*
+ * set cl_chunkbits default value to PAGE_CACHE_SHIFT,
+ * it will be updated at OSC connection time.
+ */
+ cli->cl_chunkbits = PAGE_SHIFT;
+
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
@@ -429,7 +436,6 @@ err_ldlm:
ldlm_put_ref();
err:
return rc;
-
}
EXPORT_SYMBOL(client_obd_setup);
@@ -438,6 +444,7 @@ int client_obd_cleanup(struct obd_device *obddev)
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
+ obd_cleanup_client_import(obddev);
LASSERT(!obddev->u.cli.cl_import);
ldlm_put_ref();
@@ -748,6 +755,7 @@ int ldlm_error2errno(enum ldlm_error error)
switch (error) {
case ELDLM_OK:
+ case ELDLM_LOCK_MATCHED:
result = 0;
break;
case ELDLM_LOCK_CHANGED:
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index ecd65a7a3..bff94ea12 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -185,7 +185,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
"final lock_put on destroyed lock, freeing it.");
res = lock->l_resource;
- LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
+ LASSERT(ldlm_is_destroyed(lock));
LASSERT(list_empty(&lock->l_res_link));
LASSERT(list_empty(&lock->l_pending_chain));
@@ -229,15 +229,25 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
/**
* Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ *
+ * If \a last_use is non-zero, it will remove the lock from LRU only if
+ * it matches lock's l_last_used.
+ *
+ * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
+ * doesn't match lock's l_last_used;
+ * otherwise, the lock hasn't been in the LRU list.
+ * \retval 1 the lock was in LRU list and removed.
*/
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc;
+ int rc = 0;
spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
+ if (last_use == 0 || last_use == lock->l_last_used)
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
+
return rc;
}
@@ -252,8 +262,7 @@ static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- if (lock->l_flags & LDLM_FL_SKIPPED)
- lock->l_flags &= ~LDLM_FL_SKIPPED;
+ ldlm_clear_skipped(lock);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
}
@@ -318,11 +327,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
LBUG();
}
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
LASSERT(list_empty(&lock->l_lru));
return 0;
}
- lock->l_flags |= LDLM_FL_DESTROYED;
+ ldlm_set_destroyed(lock);
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -544,7 +553,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it
*/
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
+ if (flags == 0 && !ldlm_is_destroyed(lock)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
}
@@ -554,21 +563,22 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(lock->l_resource);
lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
+ if (unlikely(ldlm_is_destroyed(lock))) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
return NULL;
}
- if (flags && (lock->l_flags & flags)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
+ if (flags) {
+ if (lock->l_flags & flags) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
- if (flags)
lock->l_flags |= flags;
+ }
unlock_res_and_lock(lock);
return lock;
@@ -599,14 +609,14 @@ EXPORT_SYMBOL(ldlm_lock2desc);
static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
+ if (!ldlm_is_ast_sent(lock)) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
- lock->l_flags |= LDLM_FL_AST_SENT;
+ ldlm_set_ast_sent(lock);
/* If the enqueuing client said so, tell the AST recipient to
* discard dirty data, rather than writing back.
*/
- if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
- lock->l_flags |= LDLM_FL_DISCARD_DATA;
+ if (ldlm_is_ast_discard_data(new))
+ ldlm_set_discard_data(lock);
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
@@ -621,8 +631,8 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
- lock->l_flags |= LDLM_FL_CP_REQD;
+ if (!ldlm_is_cp_reqd(lock)) {
+ ldlm_set_cp_reqd(lock);
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
LASSERT(list_empty(&lock->l_cp_ast));
list_add(&lock->l_cp_ast, work_list);
@@ -657,7 +667,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
struct ldlm_lock *lock;
lock = ldlm_handle2lock(lockh);
- LASSERT(lock);
+ LASSERTF(lock, "Non-existing lock: %llx\n", lockh->cookie);
ldlm_lock_addref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
}
@@ -704,7 +714,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
if (lock) {
lock_res_and_lock(lock);
if (lock->l_readers != 0 || lock->l_writers != 0 ||
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
+ !ldlm_is_cbpending(lock)) {
ldlm_lock_addref_internal_nolock(lock, mode);
result = 0;
}
@@ -770,17 +780,17 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_decref_internal_nolock(lock, mode);
- if (lock->l_flags & LDLM_FL_LOCAL &&
+ if (ldlm_is_local(lock) &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
* the last reference, cancel the lock.
*/
CDEBUG(D_INFO, "forcing cancel of local lock\n");
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
}
if (!lock->l_readers && !lock->l_writers &&
- (lock->l_flags & LDLM_FL_CBPENDING)) {
+ ldlm_is_cbpending(lock)) {
/* If we received a blocked AST and this was the last reference,
* run the callback.
*/
@@ -791,16 +801,14 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_remove_from_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
- if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
+ if (ldlm_is_atomic_cb(lock) ||
ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
ldlm_handle_bl_callback(ns, NULL, lock);
} else if (!lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_NO_LRU) &&
- !(lock->l_flags & LDLM_FL_BL_AST)) {
-
+ !ldlm_is_no_lru(lock) && !ldlm_is_bl_ast(lock)) {
LDLM_DEBUG(lock, "add lock into lru list");
/* If this is a client-side namespace and this was the last
@@ -809,7 +817,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
@@ -853,7 +861,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
@@ -971,7 +979,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1073,10 +1081,9 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* whose parents already hold a lock so forward progress
* can still happen.
*/
- if (lock->l_flags & LDLM_FL_CBPENDING &&
- !(flags & LDLM_FL_CBPENDING))
+ if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
continue;
- if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
+ if (!unref && ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
continue;
@@ -1092,6 +1099,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
if (unlikely(match == LCK_GROUP) &&
lock->l_resource->lr_type == LDLM_EXTENT &&
+ policy->l_extent.gid != LDLM_GID_ANY &&
lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
continue;
@@ -1104,11 +1112,10 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
policy->l_inodebits.bits))
continue;
- if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
+ if (!unref && LDLM_HAVE_MASK(lock, GONE))
continue;
- if ((flags & LDLM_FL_LOCAL_ONLY) &&
- !(lock->l_flags & LDLM_FL_LOCAL))
+ if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
continue;
if (flags & LDLM_FL_TEST_LOCK) {
@@ -1142,7 +1149,7 @@ EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
*/
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
- lock->l_flags |= LDLM_FL_LVB_READY;
+ ldlm_set_lvb_ready(lock);
wake_up_all(&lock->l_waitq);
}
EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
@@ -1243,8 +1250,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
if (lock) {
ldlm_lock2handle(lock, lockh);
- if ((flags & LDLM_FL_LVB_READY) &&
- (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ if ((flags & LDLM_FL_LVB_READY) && !ldlm_is_lvb_ready(lock)) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
@@ -1271,7 +1277,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
l_wait_event(lock->l_waitq,
lock->l_flags & wait_flags,
&lwi);
- if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
+ if (!ldlm_is_lvb_ready(lock)) {
if (flags & LDLM_FL_TEST_LOCK)
LDLM_LOCK_RELEASE(lock);
else
@@ -1325,10 +1331,10 @@ enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
lock = ldlm_handle2lock(lockh);
if (lock) {
lock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_GONE_MASK)
+ if (LDLM_HAVE_MASK(lock, GONE))
goto out;
- if (lock->l_flags & LDLM_FL_CBPENDING &&
+ if (ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
goto out;
@@ -1542,7 +1548,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
/* Some flags from the enqueue want to make it into the AST, via the
* lock's l_flags.
*/
- lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+ if (*flags & LDLM_FL_AST_DISCARD_DATA)
+ ldlm_set_ast_discard_data(lock);
/*
* This distinction between local lock trees is very important; a client
@@ -1581,7 +1588,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
lock_res_and_lock(lock);
list_del_init(&lock->l_bl_ast);
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ LASSERT(ldlm_is_ast_sent(lock));
LASSERT(lock->l_bl_ast_run == 0);
LASSERT(lock->l_blocking_lock);
lock->l_bl_ast_run++;
@@ -1628,12 +1635,12 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
list_del_init(&lock->l_cp_ast);
- LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
+ LASSERT(ldlm_is_cp_reqd(lock));
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225
*/
completion_callback = lock->l_completion_ast;
- lock->l_flags &= ~LDLM_FL_CP_REQD;
+ ldlm_clear_cp_reqd(lock);
unlock_res_and_lock(lock);
if (completion_callback)
@@ -1778,8 +1785,8 @@ out:
void ldlm_cancel_callback(struct ldlm_lock *lock)
{
check_res_locked(lock->l_resource);
- if (!(lock->l_flags & LDLM_FL_CANCEL)) {
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (!ldlm_is_cancel(lock)) {
+ ldlm_set_cancel(lock);
if (lock->l_blocking_ast) {
unlock_res_and_lock(lock);
lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
@@ -1789,7 +1796,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "no blocking ast");
}
}
- lock->l_flags |= LDLM_FL_BL_DONE;
+ ldlm_set_bl_done(lock);
}
/**
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index ebe9042ad..ab739f079 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -124,10 +124,10 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
LDLM_DEBUG(lock, "client blocking AST callback handler");
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (ldlm_is_cancel_on_block(lock))
+ ldlm_set_cancel(lock);
do_ast = !lock->l_readers && !lock->l_writers;
unlock_res_and_lock(lock);
@@ -172,7 +172,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode ||
- lock->l_flags & LDLM_FL_DESTROYED)
+ ldlm_is_destroyed(lock))
break;
}
}
@@ -215,7 +215,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
}
lock_res_and_lock(lock);
- if ((lock->l_flags & LDLM_FL_DESTROYED) ||
+ if (ldlm_is_destroyed(lock) ||
lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock);
@@ -291,7 +291,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
out:
if (rc < 0) {
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_failed(lock);
unlock_res_and_lock(lock);
wake_up(&lock->l_waitq);
}
@@ -360,8 +360,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
spin_lock(&blp->blp_lock);
- if (blwi->blwi_lock &&
- blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) {
/* add LDLM_FL_DISCARD_DATA requests to the priority list */
list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
} else {
@@ -626,23 +625,22 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
return 0;
}
- if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
+ if (ldlm_is_fail_loc(lock) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
lock_res_and_lock(lock);
lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_AST_FLAGS);
+ LDLM_FL_AST_MASK);
if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
/* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache.
*/
- if (((lock->l_flags & LDLM_FL_CANCELING) &&
- (lock->l_flags & LDLM_FL_BL_DONE)) ||
- (lock->l_flags & LDLM_FL_FAILED)) {
+ if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
+ ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
@@ -656,7 +654,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
* Let ldlm_cancel_lru() be fast.
*/
ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_BL_AST;
+ ldlm_set_bl_ast(lock);
}
unlock_res_and_lock(lock);
@@ -674,7 +672,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
case LDLM_BL_CALLBACK:
CDEBUG(D_INODE, "blocking ast\n");
req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
- if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
+ if (!ldlm_is_cancel_on_block(lock)) {
rc = ldlm_callback_reply(req, 0);
if (req->rq_no_reply || rc)
ldlm_callback_errmsg(req, "Normal process", rc,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 74e193e52..107314e28 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -153,7 +153,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
long delay;
int result;
- if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+ if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
@@ -252,7 +252,7 @@ noreproc:
lwd.lwd_lock = lock;
- if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+ if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
@@ -269,7 +269,7 @@ noreproc:
if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- lock->l_flags |= LDLM_FL_FAIL_LOC;
+ ldlm_set_fail_loc(lock);
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
@@ -296,7 +296,7 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
if ((lock->l_req_mode != lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
+ !ldlm_is_failed(lock)) {
/* Make sure that this lock will not be found by raced
* bl_ast and -EINVAL reply is sent to server anyways.
* bug 17645
@@ -347,7 +347,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
struct ldlm_lock *lock;
struct ldlm_reply *reply;
int cleanup_phase = 1;
- int size = 0;
lock = ldlm_handle2lock(lockh);
/* ldlm_cli_enqueue is holding a reference on this lock. */
@@ -375,8 +374,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
goto cleanup;
}
- if (lvb_len != 0) {
- LASSERT(lvb);
+ if (lvb_len > 0) {
+ int size = 0;
size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
RCL_SERVER);
@@ -390,12 +389,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = -EINVAL;
goto cleanup;
}
+ lvb_len = size;
}
if (rc == ELDLM_LOCK_ABORTED) {
- if (lvb_len != 0)
+ if (lvb_len > 0 && lvb)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lvb, size);
+ lvb, lvb_len);
if (rc == 0)
rc = ELDLM_LOCK_ABORTED;
goto cleanup;
@@ -421,7 +421,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_INHERIT_FLAGS);
+ LDLM_FL_INHERIT_MASK);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
* to wait with no timeout as well
*/
@@ -489,7 +489,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
/* If the lock has already been granted by a completion AST, don't
* clobber the LVB with an older one.
*/
- if (lvb_len != 0) {
+ if (lvb_len > 0) {
/* We must lock or a racing completion might update lvb without
* letting us know and we'll clobber the correct value.
* Cannot unlock after the check either, as that still leaves
@@ -498,7 +498,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
lock_res_and_lock(lock);
if (lock->l_req_mode != lock->l_granted_mode)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lock->l_lvb_data, size);
+ lock->l_lvb_data, lvb_len);
unlock_res_and_lock(lock);
if (rc < 0) {
cleanup_phase = 1;
@@ -518,7 +518,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
}
}
- if (lvb_len && lvb) {
+ if (lvb_len > 0 && lvb) {
/* Copy the LVB here, and not earlier, because the completion
* AST (if any) can override what we got in the reply
*/
@@ -601,7 +601,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
@@ -821,12 +821,11 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
(LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
- rc = (lock->l_flags & LDLM_FL_BL_AST) ?
- LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+ rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
unlock_res_and_lock(lock);
if (local_only) {
@@ -1131,31 +1130,30 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
* dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
* readahead requests, ...)
*/
-static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res
+ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count)
{
- ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
- ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
-
- lock_res_and_lock(lock);
+ enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
/* don't check added & count since we want to process all locks
- * from unused list
+ * from unused list.
+ * It's fine to not take lock to access lock->l_resource since
+ * the lock has already been granted so it won't change.
*/
switch (lock->l_resource->lr_type) {
case LDLM_EXTENT:
case LDLM_IBITS:
- if (cb && cb(lock))
+ if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
break;
default:
result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
+ lock_res_and_lock(lock);
+ ldlm_set_skipped(lock);
+ unlock_res_and_lock(lock);
break;
}
- unlock_res_and_lock(lock);
return result;
}
@@ -1168,10 +1166,10 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
unsigned long cur = cfs_time_current();
struct ldlm_pool *pl = &ns->ns_pool;
@@ -1196,8 +1194,13 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
/* Stop when SLV is not yet come from server or lv is smaller than
* it is.
*/
- return (slv == 0 || lv < slv) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if (slv == 0 || lv < slv)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
}
/**
@@ -1209,10 +1212,10 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
/* Stop LRU processing when we reach past @count or have checked all
* locks in LRU.
@@ -1230,16 +1233,35 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
- /* Stop LRU processing if young lock is found and we reach past count */
- return ((added >= count) &&
- time_before(cfs_time_current(),
- cfs_time_add(lock->l_last_used, ns->ns_max_age))) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if ((added >= count) &&
+ time_before(cfs_time_current(),
+ cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
+}
+
+static enum ldlm_policy_res
+ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ enum ldlm_policy_res result;
+
+ result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK)
+ return result;
+
+ return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
}
/**
@@ -1251,10 +1273,9 @@ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res
+ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count)
{
/* Stop LRU processing when we reach past count or have checked all
* locks in LRU.
@@ -1263,7 +1284,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
+ struct ldlm_namespace *,
struct ldlm_lock *, int,
int, int);
@@ -1281,6 +1303,8 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
return ldlm_cancel_lrur_policy;
else if (flags & LDLM_CANCEL_PASSED)
return ldlm_cancel_passed_policy;
+ else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
+ return ldlm_cancel_lrur_no_wait_policy;
} else {
if (flags & LDLM_CANCEL_AGED)
return ldlm_cancel_aged_policy;
@@ -1329,6 +1353,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
+ int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -1341,7 +1366,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
LASSERT(pf);
while (!list_empty(&ns->ns_unused_list)) {
- ldlm_policy_res_t result;
+ enum ldlm_policy_res result;
+ time_t last_use = 0;
/* all unused locks */
if (remained-- <= 0)
@@ -1354,17 +1380,20 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
- LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+ LASSERT(!ldlm_is_bl_ast(lock));
- if (flags & LDLM_CANCEL_NO_WAIT &&
- lock->l_flags & LDLM_FL_SKIPPED)
+ if (no_wait && ldlm_is_skipped(lock))
/* already processed */
continue;
+ last_use = lock->l_last_used;
+ if (last_use == cfs_time_current())
+ continue;
+
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again.
*/
- if (!(lock->l_flags & LDLM_FL_CANCELING))
+ if (!ldlm_is_canceling(lock))
break;
ldlm_lock_remove_from_lru_nolock(lock);
@@ -1407,12 +1436,14 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
+ if (ldlm_is_canceling(lock) ||
+ (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
- * by itself, or the lock is no longer unused.
+ * by itself, or the lock is no longer unused or
+ * the lock has been used since the pf() call and
+ * pages could be put under it.
*/
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference,
@@ -1429,7 +1460,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* where while we are doing cancel here, server is also
* silently cancelling this lock.
*/
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+ ldlm_clear_cancel_on_block(lock);
/* Setting the CBPENDING flag is a little misleading,
* but prevents an important race; namely, once
@@ -1526,8 +1557,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
/* If somebody is already doing CANCEL, or blocking AST came,
* skip this lock.
*/
- if (lock->l_flags & LDLM_FL_BL_AST ||
- lock->l_flags & LDLM_FL_CANCELING)
+ if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
continue;
if (lockmode_compat(lock->l_granted_mode, mode))
@@ -1771,7 +1801,6 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_iter_helper, &helper);
-
}
/* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1887,7 +1916,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
int flags;
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (lock->l_flags & LDLM_FL_CANCELING) {
+ if (ldlm_is_canceling(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
return 0;
}
@@ -1896,7 +1925,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already)
*/
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
return 0;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 9dede87ad..e99c89c34 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -124,9 +124,15 @@ int ldlm_debugfs_setup(void)
}
rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
+ if (rc) {
+ CERROR("LProcFS failed in ldlm-init\n");
+ goto err_svc;
+ }
return 0;
+err_svc:
+ ldebugfs_remove(&ldlm_svc_debugfs_dir);
err_ns:
ldebugfs_remove(&ldlm_ns_debugfs_dir);
err_type:
@@ -758,12 +764,12 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
list_for_each(tmp, q) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
- if (lock->l_flags & LDLM_FL_CLEANED) {
+ if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
}
LDLM_LOCK_GET(lock);
- lock->l_flags |= LDLM_FL_CLEANED;
+ ldlm_set_cleaned(lock);
break;
}
@@ -775,13 +781,13 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
/* Set CBPENDING so nothing in the cancellation path
* can match this lock.
*/
- lock->l_flags |= LDLM_FL_CBPENDING;
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_cbpending(lock);
+ ldlm_set_failed(lock);
lock->l_flags |= flags;
/* ... without sending a CANCEL message for local_only. */
if (local_only)
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+ ldlm_set_local_only(lock);
if (local_only && (lock->l_readers || lock->l_writers)) {
/* This is a little bit gross, but much better than the
@@ -1275,7 +1281,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
LDLM_DEBUG(lock, "About to add this lock:\n");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1400,3 +1406,4 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
LDLM_DEBUG_LIMIT(level, lock, "###");
}
}
+EXPORT_SYMBOL(ldlm_resource_dump);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 9ac29e718..2ce10ff01 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -4,7 +4,8 @@ lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
rw.o namei.o symlink.o llite_mmap.o \
xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
rw26.o super25.o statahead.o \
- ../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \
- vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o lproc_llite.o
+ glimpse.o lcommon_cl.o lcommon_misc.o \
+ vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
+ lproc_llite.o
llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index dd1c82701..1b6f82a1a 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -108,11 +108,8 @@ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
{
- if ((lock->l_flags &
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
- return LDLM_ITER_CONTINUE;
- return LDLM_ITER_STOP;
+ return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
+ LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
}
/* find any ldlm lock of the inode in mdc and lov
@@ -253,8 +250,8 @@ void ll_invalidate_aliases(struct inode *inode)
{
struct dentry *dentry;
- CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_lock_dcache(inode);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
@@ -289,8 +286,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
if (it->d.lustre.it_lock_mode && inode) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index e4c82883e..4b00d1ac8 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -158,11 +158,16 @@ static int ll_dir_filler(void *_hash, struct page *page0)
int i;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n",
- inode->i_ino, inode->i_generation, inode, hash);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) hash %llu\n",
+ PFID(ll_inode2fid(inode)), inode, hash);
LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
if (page_pool) {
page_pool[0] = page0;
@@ -177,8 +182,6 @@ static int ll_dir_filler(void *_hash, struct page *page0)
page_pool[npages] = page;
}
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
op_data->op_npages = npages;
op_data->op_offset = hash;
rc = md_readpage(exp, op_data, page_pool, &request);
@@ -190,7 +193,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
/* Checked by mdc_readpage() */
if (body->valid & OBD_MD_FLSIZE)
- cl_isize_write(inode, body->size);
+ i_size_write(inode, body->size);
nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
>> PAGE_SHIFT;
@@ -372,8 +375,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
return ERR_PTR(rc);
}
- CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
- dir, dir->i_ino, dir->i_generation);
+ CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(dir)), dir);
md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
&it.d.lustre.it_lock_handle, dir, NULL);
} else {
@@ -468,6 +471,28 @@ fail:
goto out_unlock;
}
+/**
+ * return IF_* type for given lu_dirent entry.
+ * IF_* flag shld be converted to particular OS file type in
+ * platform llite module.
+ */
+static __u16 ll_dirent_type_get(struct lu_dirent *ent)
+{
+ __u16 type = 0;
+ struct luda_type *lt;
+ int len = 0;
+
+ if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
+ const unsigned int align = sizeof(struct luda_type) - 1;
+
+ len = le16_to_cpu(ent->lde_namelen);
+ len = (len + align) & ~align;
+ lt = (void *)ent->lde_name + len;
+ type = IFTODT(le16_to_cpu(lt->lt_type));
+ }
+ return type;
+}
+
int ll_dir_read(struct inode *inode, struct dir_context *ctx)
{
struct ll_inode_info *info = ll_i2info(inode);
@@ -589,15 +614,16 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
struct inode *inode = file_inode(filp);
struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
struct ll_sb_info *sbi = ll_i2sbi(inode);
+ __u64 pos = lfd ? lfd->lfd_pos : 0;
int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
int api32 = ll_need_32bit_api(sbi);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
- inode->i_ino, inode->i_generation,
- inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos %lu/%llu 32bit_api %d\n",
+ PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
+ i_size_read(inode), api32);
- if (lfd->lfd_pos == MDS_DIR_END_OFF) {
+ if (pos == MDS_DIR_END_OFF) {
/*
* end-of-file.
*/
@@ -605,9 +631,10 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
goto out;
}
- ctx->pos = lfd->lfd_pos;
+ ctx->pos = pos;
rc = ll_dir_read(inode, ctx);
- lfd->lfd_pos = ctx->pos;
+ if (lfd)
+ lfd->lfd_pos = ctx->pos;
if (ctx->pos == MDS_DIR_END_OFF) {
if (api32)
ctx->pos = LL_DIR_END_OFF_32BIT;
@@ -804,9 +831,8 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
- inode->i_ino,
- inode->i_generation, rc);
+ CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
+ PFID(ll_inode2fid(inode)), rc);
goto out;
}
@@ -916,7 +942,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
}
/* Read current file data version */
- rc = ll_data_version(inode, &data_version, 1);
+ rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc != 0) {
CDEBUG(D_HSM, "Could not read file data version of "
@@ -936,6 +962,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
}
progress:
+ /* On error, the request should be considered as completed */
+ if (hpk.hpk_errval > 0)
+ hpk.hpk_flags |= HP_FLAG_COMPLETED;
rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
&hpk, NULL);
@@ -997,8 +1026,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
goto progress;
}
- rc = ll_data_version(inode, &data_version,
- copy->hc_hai.hai_action == HSMA_ARCHIVE);
+ rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc) {
CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
@@ -1033,7 +1061,6 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
/* hpk_errval must be >= 0 */
hpk.hpk_errval = EBUSY;
}
-
}
progress:
@@ -1242,8 +1269,8 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct obd_ioctl_data *data;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
- inode->i_ino, inode->i_generation, inode, cmd);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%#x\n",
+ PFID(ll_inode2fid(inode)), inode, cmd);
/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
@@ -1362,7 +1389,6 @@ out_free:
lmv_out_free:
obd_ioctl_freedata(buf, len);
return rc;
-
}
case LL_IOC_LOV_SETSTRIPE: {
struct lov_user_md_v3 lumv3;
@@ -1474,8 +1500,9 @@ free_lmv:
cmd == LL_IOC_MDC_GETINFO)) {
rc = 0;
goto skip_lmm;
- } else
+ } else {
goto out_req;
+ }
}
if (cmd == IOC_MDC_GETFILESTRIPE ||
@@ -1688,15 +1715,16 @@ out_quotactl:
return ll_flush_ctx(inode);
#ifdef CONFIG_FS_POSIX_ACL
case LL_IOC_RMTACL: {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- rc = rct_add(&sbi->ll_rct, current_pid(), arg);
- if (!rc)
- fd->fd_flags |= LL_FILE_RMTACL;
- return rc;
- } else
- return 0;
+ rc = rct_add(&sbi->ll_rct, current_pid(), arg);
+ if (!rc)
+ fd->fd_flags |= LL_FILE_RMTACL;
+ return rc;
+ } else {
+ return 0;
+ }
}
#endif
case LL_IOC_GETOBDCOUNT: {
@@ -1817,6 +1845,9 @@ out_quotactl:
return rc;
}
case LL_IOC_HSM_CT_START:
+ if (!capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
sizeof(struct lustre_kernelcomm));
return rc;
@@ -1865,7 +1896,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
int api32 = ll_need_32bit_api(sbi);
loff_t ret = -EINVAL;
- inode_lock(inode);
switch (origin) {
case SEEK_SET:
break;
@@ -1903,7 +1933,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
goto out;
out:
- inode_unlock(inode);
return ret;
}
@@ -1922,7 +1951,7 @@ const struct file_operations ll_dir_operations = {
.open = ll_dir_open,
.release = ll_dir_release,
.read = generic_read_dir,
- .iterate = ll_readdir,
+ .iterate_shared = ll_readdir,
.unlocked_ioctl = ll_dir_ioctl,
.fsync = ll_fsync,
};
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index cf619af3c..f47f2acaf 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -45,6 +45,7 @@
#include "../include/lustre_lite.h"
#include <linux/pagemap.h>
#include <linux/file.h>
+#include <linux/mount.h>
#include "llite_internal.h"
#include "../include/lustre/ll_fiemap.h"
@@ -87,8 +88,7 @@ void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
op_data->op_attr_blocks = inode->i_blocks;
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- ll_inode_to_ext_flags(inode->i_flags);
+ op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
if (fh)
op_data->op_handle = *fh;
@@ -170,13 +170,15 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
*/
rc = ll_som_update(inode, op_data);
if (rc) {
- CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
- inode->i_ino, rc);
+ CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
+ ll_i2mdexp(inode)->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
rc = 0;
}
} else if (rc) {
- CERROR("inode %lu mdc close failed: rc = %d\n",
- inode->i_ino, rc);
+ CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
+ ll_i2mdexp(inode)->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
}
/* DATA_MODIFIED flag was successfully sent on close, cancel data
@@ -278,7 +280,7 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
/* clear group lock, if present */
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
- ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
+ ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
if (fd->fd_lease_och) {
bool lease_broken;
@@ -343,8 +345,8 @@ int ll_file_release(struct inode *inode, struct file *file)
struct ll_inode_info *lli = ll_i2info(inode);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
@@ -543,8 +545,8 @@ int ll_file_open(struct inode *inode, struct file *file)
struct ll_file_data *fd;
int rc = 0, opendir_set = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
- inode->i_generation, inode, file->f_flags);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
+ PFID(ll_inode2fid(inode)), inode, file->f_flags);
it = file->private_data; /* XXX: compat macro */
file->private_data = NULL; /* prevent ll_local_open assertion */
@@ -677,7 +679,9 @@ restart:
if (rc)
goto out_och_free;
- LASSERT(it_disposition(it, DISP_ENQ_OPEN_REF));
+ LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
+ "inode %p: disposition %x, status %d\n", inode,
+ it_disposition(it, ~0), it->d.lustre.it_status);
rc = ll_local_open(file, it, fd, *och_p);
if (rc)
@@ -875,16 +879,19 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
return och;
out_close:
- rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
- if (rc2)
- CERROR("Close openhandle returned %d\n", rc2);
-
- /* cancel open lock */
+ /* Cancel open lock */
if (it.d.lustre.it_lock_mode != 0) {
ldlm_lock_decref_and_cancel(&och->och_lease_handle,
it.d.lustre.it_lock_mode);
it.d.lustre.it_lock_mode = 0;
+ och->och_lease_handle.cookie = 0ULL;
}
+ rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+ if (rc2 < 0)
+ CERROR("%s: error closing file "DFID": %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&ll_i2info(inode)->lli_fid), rc2);
+ och = NULL; /* och has been freed in ll_close_inode_openhandle() */
out_release_it:
ll_intent_release(&it);
out:
@@ -908,7 +915,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
lock_res_and_lock(lock);
cancelled = ldlm_is_cancel(lock);
unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
CDEBUG(D_INODE, "lease for " DFID " broken? %d\n",
@@ -926,7 +933,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
/* Fills the obdo with the attributes for the lsm */
static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
- struct obdo *obdo, __u64 ioepoch, int sync)
+ struct obdo *obdo, __u64 ioepoch, int dv_flags)
{
struct ptlrpc_request_set *set;
struct obd_info oinfo = { };
@@ -945,9 +952,11 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
OBD_MD_FLDATAVERSION;
- if (sync) {
+ if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
+ if (dv_flags & LL_DV_WR_FLUSH)
+ oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
}
set = ptlrpc_prep_set();
@@ -960,11 +969,16 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
}
- if (rc == 0)
+ if (rc == 0) {
oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
OBD_MD_FLATIME | OBD_MD_FLMTIME |
OBD_MD_FLCTIME | OBD_MD_FLSIZE |
- OBD_MD_FLDATAVERSION);
+ OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
+ if (dv_flags & LL_DV_WR_FLUSH &&
+ !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
+ oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
+ return -ENOTSUPP;
+ }
return rc;
}
@@ -980,7 +994,7 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
lsm = ccc_inode_lsm_get(inode);
rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
- obdo, ioepoch, sync);
+ obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
if (rc == 0) {
struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
@@ -994,50 +1008,57 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
return rc;
}
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
+int ll_merge_attr(const struct lu_env *env, struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct ost_lvb lvb;
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ s64 atime;
+ s64 mtime;
+ s64 ctime;
int rc = 0;
ll_inode_size_lock(inode);
+
/* merge timestamps the most recently obtained from mds with
* timestamps obtained from osts
*/
- LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
+ LTIME_S(inode->i_atime) = lli->lli_atime;
+ LTIME_S(inode->i_mtime) = lli->lli_mtime;
+ LTIME_S(inode->i_ctime) = lli->lli_ctime;
- lvb.lvb_size = i_size_read(inode);
- lvb.lvb_blocks = inode->i_blocks;
- lvb.lvb_mtime = LTIME_S(inode->i_mtime);
- lvb.lvb_atime = LTIME_S(inode->i_atime);
- lvb.lvb_ctime = LTIME_S(inode->i_ctime);
+ mtime = LTIME_S(inode->i_mtime);
+ atime = LTIME_S(inode->i_atime);
+ ctime = LTIME_S(inode->i_ctime);
cl_object_attr_lock(obj);
rc = cl_object_attr_get(env, obj, attr);
cl_object_attr_unlock(obj);
- if (rc == 0) {
- if (lvb.lvb_atime < attr->cat_atime)
- lvb.lvb_atime = attr->cat_atime;
- if (lvb.lvb_ctime < attr->cat_ctime)
- lvb.lvb_ctime = attr->cat_ctime;
- if (lvb.lvb_mtime < attr->cat_mtime)
- lvb.lvb_mtime = attr->cat_mtime;
+ if (rc != 0)
+ goto out_size_unlock;
- CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
- PFID(&lli->lli_fid), attr->cat_size);
- cl_isize_write_nolock(inode, attr->cat_size);
+ if (atime < attr->cat_atime)
+ atime = attr->cat_atime;
- inode->i_blocks = attr->cat_blocks;
+ if (ctime < attr->cat_ctime)
+ ctime = attr->cat_ctime;
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
- }
+ if (mtime < attr->cat_mtime)
+ mtime = attr->cat_mtime;
+
+ CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
+ PFID(&lli->lli_fid), attr->cat_size);
+
+ i_size_write(inode, attr->cat_size);
+
+ inode->i_blocks = attr->cat_blocks;
+
+ LTIME_S(inode->i_mtime) = mtime;
+ LTIME_S(inode->i_atime) = atime;
+ LTIME_S(inode->i_ctime) = ctime;
+
+out_size_unlock:
ll_inode_size_unlock(inode);
return rc;
@@ -1120,47 +1141,48 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
struct cl_io *io;
ssize_t result;
+ CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zd\n",
+ file->f_path.dentry->d_name.name, iot, *ppos, count);
+
restart:
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
ll_io_init(io, file, iot == CIT_WRITE);
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
int write_mutex_locked = 0;
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- vio->cui_io_subtype = args->via_io_subtype;
+ vio->vui_fd = LUSTRE_FPRIVATE(file);
+ vio->vui_io_subtype = args->via_io_subtype;
- switch (vio->cui_io_subtype) {
+ switch (vio->vui_io_subtype) {
case IO_NORMAL:
- cio->cui_iter = args->u.normal.via_iter;
- cio->cui_iocb = args->u.normal.via_iocb;
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
if ((iot == CIT_WRITE) &&
- !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
if (mutex_lock_interruptible(&lli->
lli_write_mutex)) {
result = -ERESTARTSYS;
goto out;
}
write_mutex_locked = 1;
- } else if (iot == CIT_READ) {
- down_read(&lli->lli_trunc_sem);
}
+ down_read(&lli->lli_trunc_sem);
break;
case IO_SPLICE:
- vio->u.splice.cui_pipe = args->u.splice.via_pipe;
- vio->u.splice.cui_flags = args->u.splice.via_flags;
+ vio->u.splice.vui_pipe = args->u.splice.via_pipe;
+ vio->u.splice.vui_flags = args->u.splice.via_flags;
break;
default:
- CERROR("Unknown IO type - %u\n", vio->cui_io_subtype);
+ CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
LBUG();
}
result = cl_io_loop(env, io);
+ if (args->via_io_subtype == IO_NORMAL)
+ up_read(&lli->lli_trunc_sem);
if (write_mutex_locked)
mutex_unlock(&lli->lli_write_mutex);
- else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
- up_read(&lli->lli_trunc_sem);
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
@@ -1197,6 +1219,7 @@ out:
fd->fd_write_failed = true;
}
}
+ CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
return result;
}
@@ -1212,7 +1235,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_NORMAL);
+ args = ll_env_args(env, IO_NORMAL);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
@@ -1236,7 +1259,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_NORMAL);
+ args = ll_env_args(env, IO_NORMAL);
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
@@ -1262,7 +1285,7 @@ static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_SPLICE);
+ args = ll_env_args(env, IO_SPLICE);
args->u.splice.via_pipe = pipe;
args->u.splice.via_flags = flags;
@@ -1354,7 +1377,8 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum, int lum_size)
+ __u64 flags, struct lov_user_md *lum,
+ int lum_size)
{
struct lov_stripe_md *lsm = NULL;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
@@ -1363,8 +1387,8 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
lsm = ccc_inode_lsm_get(inode);
if (lsm) {
ccc_inode_lsm_put(inode, lsm);
- CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
- inode->i_ino);
+ CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
rc = -EEXIST;
goto out;
}
@@ -1478,7 +1502,7 @@ out:
static int ll_lov_setea(struct inode *inode, struct file *file,
unsigned long arg)
{
- int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
+ __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
struct lov_user_md *lump;
int lum_size = sizeof(struct lov_user_md) +
sizeof(struct lov_user_ost_data);
@@ -1512,7 +1536,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
int lum_size, rc;
- int flags = FMODE_WRITE;
+ __u64 flags = FMODE_WRITE;
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
@@ -1561,7 +1585,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
+ struct ll_grouplock grouplock;
int rc;
if (arg == 0) {
@@ -1575,14 +1599,14 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.cg_gid);
+ fd->fd_grouplock.lg_gid);
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
- LASSERT(!fd->fd_grouplock.cg_lock);
+ LASSERT(!fd->fd_grouplock.lg_lock);
spin_unlock(&lli->lli_lock);
- rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
return rc;
@@ -1608,7 +1632,7 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
+ struct ll_grouplock grouplock;
spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
@@ -1616,11 +1640,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
CWARN("no group lock held\n");
return -EINVAL;
}
- LASSERT(fd->fd_grouplock.cg_lock);
+ LASSERT(fd->fd_grouplock.lg_lock);
- if (fd->fd_grouplock.cg_gid != arg) {
+ if (fd->fd_grouplock.lg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
- arg, fd->fd_grouplock.cg_gid);
+ arg, fd->fd_grouplock.lg_gid);
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
@@ -1861,11 +1885,12 @@ error:
* This value is computed using stripe object version on OST.
* Version is computed using server side locking.
*
- * @param extent_lock Take extent lock. Not needed if a process is already
- * holding the OST object group locks.
+ * @param sync if do sync on the OST side;
+ * 0: no sync
+ * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
+ * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
*/
-int ll_data_version(struct inode *inode, __u64 *data_version,
- int extent_lock)
+int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
{
struct lov_stripe_md *lsm = NULL;
struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -1887,7 +1912,7 @@ int ll_data_version(struct inode *inode, __u64 *data_version,
goto out;
}
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, extent_lock);
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
if (rc == 0) {
if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
rc = -EOPNOTSUPP;
@@ -1923,7 +1948,7 @@ int ll_hsm_release(struct inode *inode)
}
/* Grab latest data_version and [am]time values */
- rc = ll_data_version(inode, &data_version, 1);
+ rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
if (rc != 0)
goto out;
@@ -1933,7 +1958,7 @@ int ll_hsm_release(struct inode *inode)
goto out;
}
- ll_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
cl_env_nested_put(&nest, env);
/* Release the file.
@@ -2227,8 +2252,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags, rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
- inode->i_generation, inode, cmd);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),cmd=%x\n",
+ PFID(ll_inode2fid(inode)), inode, cmd);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
@@ -2331,9 +2356,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
return -EFAULT;
- rc = ll_data_version(inode, &idv.idv_version,
- !(idv.idv_flags & LL_DV_NOFLUSH));
-
+ idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
+ rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags);
if (rc == 0 && copy_to_user((char __user *)arg, &idv,
sizeof(idv)))
return -EFAULT;
@@ -2499,7 +2523,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = och->och_flags &
(FMODE_READ | FMODE_WRITE);
unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
}
mutex_unlock(&lli->lli_och_mutex);
@@ -2537,9 +2561,8 @@ static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
(origin == SEEK_CUR) ? file->f_pos : 0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
- inode->i_ino, inode->i_generation, inode, retval, retval,
- origin);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
+ PFID(ll_inode2fid(inode)), inode, retval, retval, origin);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
@@ -2603,8 +2626,8 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
if (IS_ERR(env))
return PTR_ERR(env);
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
io->ci_ignore_layout = ignore_layout;
/* initialize parameters for sync */
@@ -2634,8 +2657,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct ptlrpc_request *req;
int rc, err;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
@@ -2693,8 +2716,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
int rc;
int rc2 = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
- inode->i_ino, file_lock);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
+ PFID(ll_inode2fid(inode)), file_lock);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
@@ -2777,9 +2800,9 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
- inode->i_ino, flock.l_flock.pid, flags, einfo.ei_mode,
- flock.l_flock.start, flock.l_flock.end);
+ CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
+ PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags,
+ einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
op_data, &lockh, &flock, 0, NULL /* req */, flags);
@@ -2901,8 +2924,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
struct obd_export *exp;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n",
- inode->i_ino, inode->i_generation, inode, dentry);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%pd\n",
+ PFID(ll_inode2fid(inode)), inode, dentry);
exp = ll_i2mdexp(inode);
@@ -2998,9 +3021,9 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
/* if object isn't regular file, don't validate size */
if (!S_ISREG(inode->i_mode)) {
- LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
+ LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime;
+ LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
+ LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
} else {
/* In case of restore, the MDT has the right size and has
* already send it back without granting the layout lock,
@@ -3124,8 +3147,8 @@ int ll_inode_permission(struct inode *inode, int mask)
return rc;
}
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
- inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
+ PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
return lustre_check_remote_perm(inode, mask);
@@ -3335,10 +3358,10 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
int rc;
CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
- PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
+ PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
lock->l_lvb_data, lock->l_lvb_len);
- if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY))
+ if (lock->l_lvb_data && ldlm_is_lvb_ready(lock))
return 0;
/* if layout lock was granted right away, the layout is returned
@@ -3415,14 +3438,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d",
- inode, PFID(&lli->lli_fid), reconf);
+ LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d",
+ PFID(&lli->lli_fid), inode, reconf);
/* in case this is a caching lock and reinstate with new inode */
md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
lock_res_and_lock(lock);
- lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
+ lvb_ready = ldlm_is_lvb_ready(lock);
unlock_res_and_lock(lock);
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time.
@@ -3487,9 +3510,9 @@ out:
/* wait for IO to complete if it's still being used. */
if (wait_layout) {
- CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n",
+ CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
ll_get_fsname(inode->i_sb, NULL, 0),
- inode, PFID(&lli->lli_fid));
+ PFID(&lli->lli_fid), inode);
memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_WAIT;
@@ -3498,7 +3521,8 @@ out:
if (rc == 0)
rc = -EAGAIN;
- CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n",
+ CDEBUG(D_INODE, "%s: file="DFID" waiting layout return: %d.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&lli->lli_fid), rc);
}
return rc;
@@ -3571,9 +3595,9 @@ again:
it.it_op = IT_LAYOUT;
lockh.cookie = 0ULL;
- LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid));
+ LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&lli->lli_fid), inode);
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
NULL, 0, NULL, 0);
@@ -3601,7 +3625,7 @@ again:
/**
* This function send a restore request to the MDT
*/
-int ll_layout_restore(struct inode *inode)
+int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
{
struct hsm_user_request *hur;
int len, rc;
@@ -3617,9 +3641,10 @@ int ll_layout_restore(struct inode *inode)
hur->hur_request.hr_flags = 0;
memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
sizeof(hur->hur_user_item[0].hui_fid));
- hur->hur_user_item[0].hui_extent.length = -1;
+ hur->hur_user_item[0].hui_extent.offset = offset;
+ hur->hur_user_item[0].hui_extent.length = length;
hur->hur_request.hr_itemcount = 1;
- rc = obd_iocontrol(LL_IOC_HSM_REQUEST, cl_i2sbi(inode)->ll_md_exp,
+ rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,
len, hur, NULL);
kfree(hur);
return rc;
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index c4e8a0878..d8ea75424 100644
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -52,7 +52,6 @@
#include <linux/file.h>
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../llite/llite_internal.h"
static const struct cl_lock_descr whole_file = {
@@ -70,14 +69,14 @@ static const struct cl_lock_descr whole_file = {
blkcnt_t dirty_cnt(struct inode *inode)
{
blkcnt_t cnt = 0;
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
void *results[1];
if (inode->i_mapping)
cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree,
results, 0, 1,
PAGECACHE_TAG_DIRTY);
- if (cnt == 0 && atomic_read(&vob->cob_mmap_cnt) > 0)
+ if (cnt == 0 && atomic_read(&vob->vob_mmap_cnt) > 0)
cnt = 1;
return (cnt > 0) ? 1 : 0;
@@ -86,17 +85,17 @@ blkcnt_t dirty_cnt(struct inode *inode)
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
struct inode *inode, struct cl_object *clob, int agl)
{
- struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr;
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock *lock;
int result;
result = 0;
if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
- CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
+ CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
if (lli->lli_has_smd) {
+ struct cl_lock *lock = vvp_env_lock(env);
+ struct cl_lock_descr *descr = &lock->cll_descr;
+
/* NOTE: this looks like DLM lock request, but it may
* not be one. Due to CEF_ASYNC flag (translated
* to LDLM_FL_HAS_INTENT by osc), this is
@@ -113,11 +112,10 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
*/
*descr = whole_file;
descr->cld_obj = clob;
- descr->cld_mode = CLM_PHANTOM;
+ descr->cld_mode = CLM_READ;
descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
if (agl)
descr->cld_enq_flags |= CEF_AGL;
- cio->cui_glimpse = 1;
/*
* CEF_ASYNC is used because glimpse sub-locks cannot
* deadlock (because they never conflict with other
@@ -126,21 +124,13 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
* CEF_MUST protects glimpse lock from conversion into
* a lockless mode.
*/
- lock = cl_lock_request(env, io, descr, "glimpse",
- current);
- cio->cui_glimpse = 0;
-
- if (!lock)
- return 0;
-
- if (IS_ERR(lock))
- return PTR_ERR(lock);
+ result = cl_lock_request(env, io, lock);
+ if (result < 0)
+ return result;
- LASSERT(agl == 0);
- result = cl_wait(env, lock);
- if (result == 0) {
- cl_merge_lvb(env, inode);
- if (cl_isize_read(inode) > 0 &&
+ if (!agl) {
+ ll_merge_attr(env, inode);
+ if (i_size_read(inode) > 0 &&
inode->i_blocks == 0) {
/*
* LU-417: Add dirty pages block count
@@ -150,12 +140,11 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
*/
inode->i_blocks = dirty_cnt(inode);
}
- cl_unuse(env, lock);
}
- cl_lock_release(env, lock, "glimpse", current);
+ cl_lock_release(env, lock);
} else {
CDEBUG(D_DLMTRACE, "No objects for inode\n");
- cl_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
}
}
@@ -167,22 +156,24 @@ static int cl_io_get(struct inode *inode, struct lu_env **envout,
{
struct lu_env *env;
struct cl_io *io;
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *clob = lli->lli_clob;
int result;
- if (S_ISREG(cl_inode_mode(inode))) {
+ if (S_ISREG(inode->i_mode)) {
env = cl_env_get(refcheck);
if (!IS_ERR(env)) {
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = clob;
*envout = env;
*ioout = io;
result = 1;
- } else
+ } else {
result = PTR_ERR(env);
- } else
+ }
+ } else {
result = 0;
+ }
return result;
}
@@ -231,14 +222,11 @@ int cl_local_size(struct inode *inode)
{
struct lu_env *env = NULL;
struct cl_io *io = NULL;
- struct ccc_thread_info *cti;
struct cl_object *clob;
- struct cl_lock_descr *descr;
- struct cl_lock *lock;
int result;
int refcheck;
- if (!cl_i2info(inode)->lli_has_smd)
+ if (!ll_i2info(inode)->lli_has_smd)
return 0;
result = cl_io_get(inode, &env, &io, &refcheck);
@@ -247,22 +235,19 @@ int cl_local_size(struct inode *inode)
clob = io->ci_obj;
result = cl_io_init(env, io, CIT_MISC, clob);
- if (result > 0)
+ if (result > 0) {
result = io->ci_result;
- else if (result == 0) {
- cti = ccc_env_info(env);
- descr = &cti->cti_descr;
-
- *descr = whole_file;
- descr->cld_obj = clob;
- lock = cl_lock_peek(env, io, descr, "localsize", current);
- if (lock) {
- cl_merge_lvb(env, inode);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, "localsize", current);
- result = 0;
- } else
- result = -ENODATA;
+ } else if (result == 0) {
+ struct cl_lock *lock = vvp_env_lock(env);
+
+ lock->cll_descr = whole_file;
+ lock->cll_descr.cld_enq_flags = CEF_PEEK;
+ lock->cll_descr.cld_obj = clob;
+ result = cl_lock_request(env, io, lock);
+ if (result == 0) {
+ ll_merge_attr(env, inode);
+ cl_lock_release(env, lock);
+ }
}
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
new file mode 100644
index 000000000..6c00715b4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -0,0 +1,327 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2015, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * cl code shared between vvp and liblustre (and other Lustre clients in the
+ * future).
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include "../../include/linux/libcfs/libcfs.h"
+# include <linux/fs.h>
+# include <linux/sched.h>
+# include <linux/mm.h>
+# include <linux/quotaops.h>
+# include <linux/highmem.h>
+# include <linux/pagemap.h>
+# include <linux/rbtree.h>
+
+#include "../include/obd.h"
+#include "../include/obd_support.h"
+#include "../include/lustre_fid.h"
+#include "../include/lustre_lite.h"
+#include "../include/lustre_dlm.h"
+#include "../include/lustre_ver.h"
+#include "../include/lustre_mdc.h"
+#include "../include/cl_object.h"
+
+#include "../llite/llite_internal.h"
+
+/*
+ * ccc_ prefix stands for "Common Client Code".
+ */
+
+/*****************************************************************************
+ *
+ * Vvp device and device type functions.
+ *
+ */
+
+/**
+ * An `emergency' environment used by cl_inode_fini() when cl_env_get()
+ * fails. Access to this environment is serialized by cl_inode_fini_guard
+ * mutex.
+ */
+struct lu_env *cl_inode_fini_env;
+int cl_inode_fini_refcheck;
+
+/**
+ * A mutex serializing calls to slp_inode_fini() under extreme memory
+ * pressure, when environments cannot be allocated.
+ */
+static DEFINE_MUTEX(cl_inode_fini_guard);
+
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ int result;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+
+ io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
+ io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
+ io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
+ io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_valid = attr->ia_valid;
+
+again:
+ if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ if (attr->ia_valid & ATTR_FILE)
+ /* populate the file descriptor for ftruncate to honor
+ * group lock - see LU-787
+ */
+ vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file);
+
+ result = cl_io_loop(env, io);
+ } else {
+ result = io->ci_result;
+ }
+ cl_io_fini(env, io);
+ if (unlikely(io->ci_need_restart))
+ goto again;
+ /* HSM import case: file is released, cannot be restored
+ * no need to fail except if restore registration failed
+ * with -ENODATA
+ */
+ if (result == -ENODATA && io->ci_restore_needed &&
+ io->ci_result != -ENODATA)
+ result = 0;
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+/**
+ * Initialize or update CLIO structures for regular files when new
+ * meta-data arrives from the server.
+ *
+ * \param inode regular file inode
+ * \param md new file metadata from MDS
+ * - allocates cl_object if necessary,
+ * - updated layout, if object was already here.
+ */
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
+{
+ struct lu_env *env;
+ struct ll_inode_info *lli;
+ struct cl_object *clob;
+ struct lu_site *site;
+ struct lu_fid *fid;
+ struct cl_object_conf conf = {
+ .coc_inode = inode,
+ .u = {
+ .coc_md = md
+ }
+ };
+ int result = 0;
+ int refcheck;
+
+ LASSERT(md->body->valid & OBD_MD_FLID);
+ LASSERT(S_ISREG(inode->i_mode));
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ site = ll_i2sbi(inode)->ll_site;
+ lli = ll_i2info(inode);
+ fid = &lli->lli_fid;
+ LASSERT(fid_is_sane(fid));
+
+ if (!lli->lli_clob) {
+ /* clob is slave of inode, empty lli_clob means for new inode,
+ * there is no clob in cache with the given fid, so it is
+ * unnecessary to perform lookup-alloc-lookup-insert, just
+ * alloc and insert directly.
+ */
+ LASSERT(inode->i_state & I_NEW);
+ conf.coc_lu.loc_flags = LOC_F_NEW;
+ clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
+ fid, &conf);
+ if (!IS_ERR(clob)) {
+ /*
+ * No locking is necessary, as new inode is
+ * locked by I_NEW bit.
+ */
+ lli->lli_clob = clob;
+ lli->lli_has_smd = lsm_has_objects(md->lsm);
+ lu_object_ref_add(&clob->co_lu, "inode", inode);
+ } else {
+ result = PTR_ERR(clob);
+ }
+ } else {
+ result = cl_conf_set(env, lli->lli_clob, &conf);
+ }
+
+ cl_env_put(env, &refcheck);
+
+ if (result != 0)
+ CERROR("Failure to initialize cl object " DFID ": %d\n",
+ PFID(fid), result);
+ return result;
+}
+
+/**
+ * Wait for others drop their references of the object at first, then we drop
+ * the last one, which will lead to the object be destroyed immediately.
+ * Must be called after cl_object_kill() against this object.
+ *
+ * The reason we want to do this is: destroying top object will wait for sub
+ * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
+ * to initiate top object destroying which may deadlock. See bz22520.
+ */
+static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *header = obj->co_lu.lo_header;
+ wait_queue_t waiter;
+
+ if (unlikely(atomic_read(&header->loh_ref) != 1)) {
+ struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+ struct lu_site_bkt_data *bkt;
+
+ bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+
+ init_waitqueue_entry(&waiter, current);
+ add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&header->loh_ref) == 1)
+ break;
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+ }
+
+ cl_object_put(env, obj);
+}
+
+void cl_inode_fini(struct inode *inode)
+{
+ struct lu_env *env;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *clob = lli->lli_clob;
+ int refcheck;
+ int emergency;
+
+ if (clob) {
+ void *cookie;
+
+ cookie = cl_env_reenter();
+ env = cl_env_get(&refcheck);
+ emergency = IS_ERR(env);
+ if (emergency) {
+ mutex_lock(&cl_inode_fini_guard);
+ LASSERT(cl_inode_fini_env);
+ cl_env_implant(cl_inode_fini_env, &refcheck);
+ env = cl_inode_fini_env;
+ }
+ /*
+ * cl_object cache is a slave to inode cache (which, in turn
+ * is a slave to dentry cache), don't keep cl_object in memory
+ * when its master is evicted.
+ */
+ cl_object_kill(env, clob);
+ lu_object_ref_del(&clob->co_lu, "inode", inode);
+ cl_object_put_last(env, clob);
+ lli->lli_clob = NULL;
+ if (emergency) {
+ cl_env_unplant(cl_inode_fini_env, &refcheck);
+ mutex_unlock(&cl_inode_fini_guard);
+ } else {
+ cl_env_put(env, &refcheck);
+ }
+ cl_env_reexit(cookie);
+ }
+}
+
+/**
+ * build inode number from passed @fid
+ */
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
+{
+ if (BITS_PER_LONG == 32 || api32)
+ return fid_flatten32(fid);
+ else
+ return fid_flatten(fid);
+}
+
+/**
+ * build inode generation from passed @fid. If our FID overflows the 32-bit
+ * inode number then return a non-zero generation to distinguish them.
+ */
+__u32 cl_fid_build_gen(const struct lu_fid *fid)
+{
+ __u32 gen;
+
+ if (fid_is_igif(fid)) {
+ gen = lu_igif_gen(fid);
+ return gen;
+ }
+
+ gen = fid_flatten(fid) >> 32;
+ return gen;
+}
+
+/* lsm is unreliable after hsm implementation as layout can be changed at
+ * any time. This is only to support old, non-clio-ized interfaces. It will
+ * cause deadlock if clio operations are called with this extra layout refcount
+ * because in case the layout changed during the IO, ll_layout_refresh() will
+ * have to wait for the refcount to become zero to destroy the older layout.
+ *
+ * Notice that the lsm returned by this function may not be valid unless called
+ * inside layout lock - MDS_INODELOCK_LAYOUT.
+ */
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
+{
+ return lov_lsm_get(ll_i2info(inode)->lli_clob);
+}
+
+inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
+{
+ lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
+}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index d80bcedd7..12f3e71f4 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -41,9 +41,9 @@
#include "../include/obd_support.h"
#include "../include/obd.h"
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold the
@@ -126,7 +126,7 @@ int cl_ocd_update(struct obd_device *host,
#define GROUPLOCK_SCOPE "grouplock"
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg)
+ struct ll_grouplock *cg)
{
struct lu_env *env;
struct cl_io *io;
@@ -140,20 +140,22 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
if (IS_ERR(env))
return PTR_ERR(env);
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = obj;
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (rc) {
+ if (rc != 0) {
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
/* Does not make sense to take GL for released layout */
if (rc > 0)
rc = -ENOTSUPP;
- cl_env_put(env, &refcheck);
return rc;
}
- descr = &ccc_env_info(env)->cti_descr;
+ lock = vvp_env_lock(env);
+ descr = &lock->cll_descr;
descr->cld_obj = obj;
descr->cld_start = 0;
descr->cld_end = CL_PAGE_EOF;
@@ -163,38 +165,37 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
descr->cld_enq_flags = enqflags;
- lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
- if (IS_ERR(lock)) {
+ rc = cl_lock_request(env, io, lock);
+ if (rc < 0) {
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
- return PTR_ERR(lock);
+ return rc;
}
- cg->cg_env = cl_env_get(&refcheck);
- cg->cg_io = io;
- cg->cg_lock = lock;
- cg->cg_gid = gid;
- LASSERT(cg->cg_env == env);
+ cg->lg_env = cl_env_get(&refcheck);
+ cg->lg_io = io;
+ cg->lg_lock = lock;
+ cg->lg_gid = gid;
+ LASSERT(cg->lg_env == env);
cl_env_unplant(env, &refcheck);
return 0;
}
-void cl_put_grouplock(struct ccc_grouplock *cg)
+void cl_put_grouplock(struct ll_grouplock *cg)
{
- struct lu_env *env = cg->cg_env;
- struct cl_io *io = cg->cg_io;
- struct cl_lock *lock = cg->cg_lock;
+ struct lu_env *env = cg->lg_env;
+ struct cl_io *io = cg->lg_io;
+ struct cl_lock *lock = cg->lg_lock;
int refcheck;
- LASSERT(cg->cg_env);
- LASSERT(cg->cg_gid);
+ LASSERT(cg->lg_env);
+ LASSERT(cg->lg_gid);
cl_env_implant(env, &refcheck);
cl_env_put(env, &refcheck);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
+ cl_lock_release(env, lock);
cl_io_fini(env, io);
cl_env_put(env, NULL);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index a55ac4dcc..2df551d3a 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -46,31 +46,31 @@
#include "llite_internal.h"
/** records that a write is in flight */
-void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ struct ll_inode_info *lli = ll_i2info(club->vob_inode);
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page && list_empty(&page->cpg_pending_linkage))
- list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
+ if (page && list_empty(&page->vpg_pending_linkage))
+ list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
spin_unlock(&lli->lli_lock);
}
/** records that a write has completed */
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ struct ll_inode_info *lli = ll_i2info(club->vob_inode);
int rc = 0;
spin_lock(&lli->lli_lock);
- if (page && !list_empty(&page->cpg_pending_linkage)) {
- list_del_init(&page->cpg_pending_linkage);
+ if (page && !list_empty(&page->vpg_pending_linkage)) {
+ list_del_init(&page->vpg_pending_linkage);
rc = 1;
}
spin_unlock(&lli->lli_lock);
if (rc)
- ll_queue_done_writing(club->cob_inode, 0);
+ ll_queue_done_writing(club->vob_inode, 0);
}
/** Queues DONE_WRITING if
@@ -80,25 +80,25 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
void ll_queue_done_writing(struct inode *inode, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->cob_pending_list)) {
+ list_empty(&club->vob_pending_list)) {
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
spin_lock(&lcq->lcq_lock);
LASSERT(list_empty(&lli->lli_close_list));
- CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
- inode->i_ino, inode->i_generation);
+ CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
+ PFID(ll_inode2fid(inode)));
list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
/* Avoid a concurrent insertion into the close thread queue:
@@ -124,9 +124,9 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
if (!cl_local_size(inode)) {
/* Send Size-on-MDS Attributes if valid. */
@@ -140,10 +140,10 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle **och, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->cob_pending_list))) {
+ if (!(list_empty(&club->vob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
LASSERT(*och);
LASSERT(!lli->lli_pending_och);
@@ -198,7 +198,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
}
}
- LASSERT(list_empty(&club->cob_pending_list));
+ LASSERT(list_empty(&club->vob_pending_list));
lli->lli_flags &= ~LLIF_SOM_DIRTY;
spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
@@ -221,9 +221,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
LASSERT(op_data);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oa) {
@@ -241,9 +241,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
if (rc) {
oa->o_valid = 0;
if (rc != -ENOENT)
- CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
- rc, inode->i_ino,
- inode->i_generation);
+ CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
} else {
CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
PFID(&lli->lli_fid));
@@ -302,9 +302,11 @@ static void ll_done_writing(struct inode *inode)
* OSTs and send setattr to back to MDS.
*/
rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc done_writing failed: rc = %d\n",
- inode->i_ino, rc);
+ else if (rc) {
+ CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
+ }
out:
ll_finish_md_op_data(op_data);
if (och) {
@@ -323,8 +325,9 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
lli_close_list);
list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop))
+ } else if (atomic_read(&lcq->lcq_stop)) {
lli = ERR_PTR(-EALREADY);
+ }
spin_unlock(&lcq->lcq_lock);
return lli;
@@ -348,8 +351,8 @@ static int ll_close_thread(void *arg)
break;
inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
- inode->i_ino, inode->i_generation);
+ CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
ll_done_writing(inode);
iput(inode);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index e3c0f1dd4..3f2f30b65 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -43,11 +43,11 @@
/* for struct cl_lock_descr and struct cl_io */
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../include/lustre_mdc.h"
#include "../include/lustre_intent.h"
#include <linux/compat.h>
#include <linux/posix_acl_xattr.h>
+#include "vvp_internal.h"
#ifndef FMODE_EXEC
#define FMODE_EXEC 0
@@ -99,6 +99,13 @@ struct ll_remote_perm {
*/
};
+struct ll_grouplock {
+ struct lu_env *lg_env;
+ struct cl_io *lg_io;
+ struct cl_lock *lg_lock;
+ unsigned long lg_gid;
+};
+
enum lli_flags {
/* MDS has an authority for the Size-on-MDS attributes. */
LLIF_MDS_SIZE_LOCK = (1 << 0),
@@ -161,7 +168,9 @@ struct ll_inode_info {
struct inode lli_vfs_inode;
/* the most recent timestamps obtained from mds */
- struct ost_lvb lli_lvb;
+ s64 lli_atime;
+ s64 lli_mtime;
+ s64 lli_ctime;
spinlock_t lli_agl_lock;
/* Try to make the d::member and f::member are aligned. Before using
@@ -328,6 +337,7 @@ enum ra_stat {
RA_STAT_EOF,
RA_STAT_MAX_IN_FLIGHT,
RA_STAT_WRONG_GRAB_PAGE,
+ RA_STAT_FAILED_REACH_END,
_NR_RA_STAT,
};
@@ -481,6 +491,12 @@ struct ll_sb_info {
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
+ /*
+ * Used to track "unstable" pages on a client, and maintain a
+ * LRU list of clean pages. An "unstable" page is defined as
+ * any page which is sent to a server as part of a bulk request,
+ * but is uncommitted to stable storage.
+ */
struct cl_client_cache ll_cache;
struct lprocfs_stats *ll_ra_stats;
@@ -525,13 +541,6 @@ struct ll_sb_info {
struct completion ll_kobj_unregister;
};
-struct ll_ra_read {
- pgoff_t lrr_start;
- pgoff_t lrr_count;
- struct task_struct *lrr_reader;
- struct list_head lrr_linkage;
-};
-
/*
* per file-descriptor read-ahead data.
*/
@@ -590,12 +599,6 @@ struct ll_readahead_state {
*/
unsigned long ras_request_index;
/*
- * list of struct ll_ra_read's one per read(2) call current in
- * progress against this file descriptor. Used by read-ahead code,
- * protected by ->ras_lock.
- */
- struct list_head ras_read_beads;
- /*
* The following 3 items are used for detecting the stride I/O
* mode.
* In stride I/O mode,
@@ -622,7 +625,7 @@ extern struct kmem_cache *ll_file_data_slab;
struct lustre_handle;
struct ll_file_data {
struct ll_readahead_state fd_ras;
- struct ccc_grouplock fd_grouplock;
+ struct ll_grouplock fd_grouplock;
__u64 lfd_pos;
__u32 fd_flags;
fmode_t fd_omode;
@@ -663,8 +666,16 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#endif
}
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar);
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar);
+void ll_ras_enter(struct file *f);
+
+/* llite/lcommon_misc.c */
+int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
+int cl_ocd_update(struct obd_device *host,
+ struct obd_device *watched,
+ enum obd_notify_event ev, void *owner, void *data);
+int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
+ struct ll_grouplock *cg);
+void cl_put_grouplock(struct ll_grouplock *cg);
/* llite/lproc_llite.c */
int ldebugfs_register_mountpoint(struct dentry *parent,
@@ -697,15 +708,15 @@ int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
/* llite/rw.c */
-int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
-int ll_commit_write(struct file *, struct page *, unsigned from, unsigned to);
int ll_writepage(struct page *page, struct writeback_control *wbc);
int ll_writepages(struct address_space *, struct writeback_control *wbc);
int ll_readpage(struct file *file, struct page *page);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags);
+ struct cl_page_list *queue, struct ll_readahead_state *ras,
+ bool hit);
+struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
+void ll_cl_fini(struct ll_cl_context *lcc);
extern const struct address_space_operations ll_aops;
@@ -740,7 +751,7 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type);
int ll_inode_permission(struct inode *inode, int mask);
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum,
+ __u64 flags, struct lov_user_md *lum,
int lum_size);
int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
struct lov_mds_md **lmm, int *lmm_size,
@@ -750,9 +761,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
int *lmm_size, struct ptlrpc_request **request);
int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode);
+int ll_merge_attr(const struct lu_env *env, struct inode *inode);
int ll_fid2path(struct inode *inode, void __user *arg);
-int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
+int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
int ll_hsm_release(struct inode *inode);
/* llite/dcache.c */
@@ -824,65 +835,8 @@ struct ll_close_queue {
atomic_t lcq_stop;
};
-struct ccc_object *cl_inode2ccc(struct inode *inode);
-
-void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
-
-/* specific architecture can implement only part of this list */
-enum vvp_io_subtype {
- /** normal IO */
- IO_NORMAL,
- /** io started from splice_{read|write} */
- IO_SPLICE
-};
-
-/* IO subtypes */
-struct vvp_io {
- /** io subtype */
- enum vvp_io_subtype cui_io_subtype;
-
- union {
- struct {
- struct pipe_inode_info *cui_pipe;
- unsigned int cui_flags;
- } splice;
- struct vvp_fault_io {
- /**
- * Inode modification time that is checked across DLM
- * lock request.
- */
- time64_t ft_mtime;
- struct vm_area_struct *ft_vma;
- /**
- * locked page returned from vvp_io
- */
- struct page *ft_vmpage;
- struct vm_fault_api {
- /**
- * kernel fault info
- */
- struct vm_fault *ft_vmf;
- /**
- * fault API used bitflags for return code.
- */
- unsigned int ft_flags;
- /**
- * check that flags are from filemap_fault
- */
- bool ft_flags_valid;
- } fault;
- } fault;
- } u;
- /**
- * Read-ahead state used by read and page-fault IO contexts.
- */
- struct ll_ra_read cui_bead;
- /**
- * Set when cui_bead has been initialized.
- */
- int cui_ra_window_set;
-};
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
/**
* IO arguments for various VFS I/O interfaces.
@@ -911,54 +865,32 @@ struct ll_cl_context {
int lcc_refcheck;
};
-struct vvp_thread_info {
- struct vvp_io_args vti_args;
- struct ra_io_arg vti_ria;
- struct ll_cl_context vti_io_ctx;
+struct ll_thread_info {
+ struct vvp_io_args lti_args;
+ struct ra_io_arg lti_ria;
+ struct ll_cl_context lti_io_ctx;
};
-static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
-{
- extern struct lu_context_key vvp_key;
- struct vvp_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &vvp_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
- enum vvp_io_subtype type)
+extern struct lu_context_key ll_thread_key;
+static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
{
- struct vvp_io_args *ret = &vvp_env_info(env)->vti_args;
-
- ret->via_io_subtype = type;
+ struct ll_thread_info *lti;
- return ret;
+ lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
+ LASSERT(lti);
+ return lti;
}
-struct vvp_session {
- struct vvp_io vs_ios;
-};
-
-static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
+static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
+ enum vvp_io_subtype type)
{
- extern struct lu_context_key vvp_session_key;
- struct vvp_session *ses;
+ struct vvp_io_args *via = &ll_env_info(env)->lti_args;
- ses = lu_context_key_get(env->le_ses, &vvp_session_key);
- LASSERT(ses);
- return ses;
-}
+ via->via_io_subtype = type;
-static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
-{
- return &vvp_env_session(env)->vs_ios;
+ return via;
}
-int vvp_global_init(void);
-void vvp_global_fini(void);
-
void ll_queue_done_writing(struct inode *inode, unsigned long flags);
void ll_close_thread_shutdown(struct ll_close_queue *lcq);
int ll_close_thread_start(struct ll_close_queue **lcq_ret);
@@ -981,6 +913,10 @@ static inline void ll_invalidate_page(struct page *vmpage)
if (!mapping)
return;
+ /*
+ * truncate_complete_page() calls
+ * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
+ */
ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}
@@ -1040,10 +976,10 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
}
/* llite/xattr.c */
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size);
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags);
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size);
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ll_removexattr(struct dentry *dentry, const char *name);
@@ -1055,9 +991,6 @@ void free_rmtperm_hash(struct hlist_head *hash);
int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
int lustre_check_remote_perm(struct inode *inode, int mask);
-/* llite/llite_cl.c */
-extern struct lu_device_type vvp_device_type;
-
/**
* Common IO arguments for various VFS I/O interfaces.
*/
@@ -1069,7 +1002,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
struct ll_readahead_state *ras, unsigned long index,
unsigned hit);
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which);
+void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
/* llite/llite_rmtacl.c */
#ifdef CONFIG_FS_POSIX_ACL
@@ -1163,6 +1096,22 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentry,
int only_unplug);
void ll_stop_statahead(struct inode *dir, void *key);
+blkcnt_t dirty_cnt(struct inode *inode);
+
+int cl_glimpse_size0(struct inode *inode, int agl);
+int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode, struct cl_object *clob, int agl);
+
+static inline int cl_glimpse_size(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 0);
+}
+
+static inline int cl_agl(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 1);
+}
+
static inline int ll_glimpse_size(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -1285,43 +1234,6 @@ typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
void ll_iocontrol_unregister(void *magic);
-/* lclient compat stuff */
-#define cl_inode_info ll_inode_info
-#define cl_i2info(info) ll_i2info(info)
-#define cl_inode_mode(inode) ((inode)->i_mode)
-#define cl_i2sbi ll_i2sbi
-
-static inline struct ll_file_data *cl_iattr2fd(struct inode *inode,
- const struct iattr *attr)
-{
- LASSERT(attr->ia_valid & ATTR_FILE);
- return LUSTRE_FPRIVATE(attr->ia_file);
-}
-
-static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
-{
- LASSERT(mutex_is_locked(&ll_i2info(inode)->lli_size_mutex));
- i_size_write(inode, kms);
-}
-
-static inline void cl_isize_write(struct inode *inode, loff_t kms)
-{
- ll_inode_size_lock(inode);
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode);
-}
-
-#define cl_isize_read(inode) i_size_read(inode)
-
-static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
-{
- return ll_merge_lvb(env, inode);
-}
-
-#define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
-#define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
-#define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
-
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
enum cl_fsync_mode mode, int ignore_layout);
@@ -1350,7 +1262,7 @@ static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
LPROC_LL_OSC_WRITE;
- ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
+ ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
}
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
@@ -1382,18 +1294,16 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
*/
if (it->d.lustre.it_remote_lock_mode) {
handle.cookie = it->d.lustre.it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
- inode,
- inode->i_ino, inode->i_generation,
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
+ PFID(ll_inode2fid(inode)), inode,
handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode, NULL);
}
handle.cookie = it->d.lustre.it_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u) for lock %#llx\n",
- inode, inode->i_ino,
- inode->i_generation, handle.cookie);
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
+ PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
&it->d.lustre.it_lock_bits);
@@ -1471,9 +1381,25 @@ enum {
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
int ll_layout_refresh(struct inode *inode, __u32 *gen);
-int ll_layout_restore(struct inode *inode);
+int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
int ll_xattr_init(void);
void ll_xattr_fini(void);
+int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, enum cl_req_type crt);
+
+/* lcommon_cl.c */
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
+
+extern struct lu_env *cl_inode_fini_env;
+extern int cl_inode_fini_refcheck;
+
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
+void cl_inode_fini(struct inode *inode);
+int cl_local_size(struct inode *inode);
+
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
+__u32 cl_fid_build_gen(const struct lu_fid *fid);
+
#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index b57a99268..96c7e9fc6 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,18 +85,18 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
si_meminfo(&si);
pages = si.totalram - si.totalhigh;
- if (pages >> (20 - PAGE_SHIFT) < 512)
- lru_page_max = pages / 2;
- else
- lru_page_max = (pages / 4) * 3;
+ lru_page_max = pages / 2;
- /* initialize lru data */
+ /* initialize ll_cache data */
atomic_set(&sbi->ll_cache.ccc_users, 0);
sbi->ll_cache.ccc_lru_max = lru_page_max;
atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
+ atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
+ init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
+
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
SBI_DEFAULT_READAHEAD_MAX);
sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
@@ -169,12 +169,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
return -ENOMEM;
}
- if (llite_root) {
- err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
- if (err < 0)
- CERROR("could not register mount in <debugfs>/lustre/llite\n");
- }
-
/* indicate the features supported by this client */
data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
OBD_CONNECT_ATTRFID |
@@ -337,10 +331,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
else
sbi->ll_md_brw_size = PAGE_SIZE;
- if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
- LCONSOLE_INFO("Layout lock feature supported.\n");
+ if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
- }
if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
@@ -453,7 +445,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* make root inode
* XXX: move this to after cbd setup?
*/
- valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS;
+ valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
valid |= OBD_MD_FLRMTPERM;
else if (sbi->ll_flags & LL_SBI_ACL)
@@ -555,6 +547,15 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
kfree(data);
kfree(osfs);
+ if (llite_root) {
+ err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
+ if (err < 0) {
+ CERROR("%s: could not register mount in debugfs: "
+ "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
+ err = 0;
+ }
+ }
+
return err;
out_root:
iput(root);
@@ -573,7 +574,6 @@ out_md:
out:
kfree(data);
kfree(osfs);
- ldebugfs_unregister_mountpoint(sbi);
return err;
}
@@ -897,10 +897,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
cfg->cfg_callback = class_config_llog_handler;
/* set up client obds */
err = lustre_process_log(sb, profilenm, cfg);
- if (err < 0) {
- CERROR("Unable to process log: %d\n", err);
+ if (err < 0)
goto out_free;
- }
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
@@ -947,7 +945,7 @@ void ll_put_super(struct super_block *sb)
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
- int next, force = 1;
+ int ccc_count, next, force = 1, rc = 0;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
@@ -963,6 +961,19 @@ void ll_put_super(struct super_block *sb)
force = obd->obd_force;
}
+ /* Wait for unstable pages to be committed to stable storage */
+ if (!force) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+ rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
+ !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
+ &lwi);
+ }
+
+ ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+ if (!force && rc != -EINTR)
+ LASSERTF(!ccc_count, "count: %i\n", ccc_count);
+
/* We need to set force before the lov_disconnect in
* lustre_common_put_super, since l_d cleans up osc's as well.
*/
@@ -999,6 +1010,8 @@ void ll_put_super(struct super_block *sb)
lustre_common_put_super(sb);
+ cl_env_cache_purge(~0);
+
module_put(THIS_MODULE);
} /* client_put_super */
@@ -1032,8 +1045,8 @@ void ll_clear_inode(struct inode *inode)
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
if (S_ISDIR(inode->i_mode)) {
/* these should have been cleared in ll_file_release */
@@ -1180,9 +1193,11 @@ static int ll_setattr_done_writing(struct inode *inode,
* from OSTs and send setattr to back to MDS.
*/
rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc truncate failed: rc = %d\n",
- inode->i_ino, rc);
+ else if (rc) {
+ CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
+ ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
+ }
return rc;
}
@@ -1210,12 +1225,9 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
bool file_is_released = false;
int rc = 0, rc1 = 0;
- CDEBUG(D_VFSTRACE,
- "%s: setattr inode %p/fid:" DFID
- " from %llu to %llu, valid %x, hsm_import %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
- attr->ia_valid, hsm_import);
+ CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
+ i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
if (attr->ia_valid & ATTR_SIZE) {
/* Check new size against VFS/VM file size limit and rlimit */
@@ -1265,14 +1277,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
(s64)ktime_get_real_seconds());
- /* If we are changing file size, file content is modified, flag it. */
- if (attr->ia_valid & ATTR_SIZE) {
- attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
/* We always do an MDS RPC, even if we're only changing the size;
* only the MDS knows whether truncate() should fail with -ETXTBUSY
*/
@@ -1284,13 +1288,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
if (!S_ISDIR(inode->i_mode))
inode_unlock(inode);
- memcpy(&op_data->op_attr, attr, sizeof(*attr));
-
- /* Open epoch for truncate. */
- if (exp_connect_som(ll_i2mdexp(inode)) &&
- (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
- op_data->op_flags = MF_EPOCH_OPEN;
-
/* truncate on a released file must failed with -ENODATA,
* so size must not be set on MDS for released file
* but other attributes must be set
@@ -1304,29 +1301,40 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
file_is_released = true;
ccc_inode_lsm_put(inode, lsm);
+
+ if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
+ if (file_is_released) {
+ rc = ll_layout_restore(inode, 0, attr->ia_size);
+ if (rc < 0)
+ goto out;
+
+ file_is_released = false;
+ ll_layout_refresh(inode, &gen);
+ }
+
+ /*
+ * If we are changing file size, file content is
+ * modified, flag it.
+ */
+ attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ op_data->op_bias |= MDS_DATA_MODIFIED;
+ }
}
- /* if not in HSM import mode, clear size attr for released file
- * we clear the attribute send to MDT in op_data, not the original
- * received from caller in attr which is used later to
- * decide return code
- */
- if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
- op_data->op_attr.ia_valid &= ~ATTR_SIZE;
+ memcpy(&op_data->op_attr, attr, sizeof(*attr));
+
+ /* Open epoch for truncate. */
+ if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
+ (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
+ op_data->op_flags = MF_EPOCH_OPEN;
rc = ll_md_setattr(dentry, op_data, &mod);
if (rc)
goto out;
- /* truncate failed (only when non HSM import), others succeed */
- if (file_is_released) {
- if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
- rc = -ENODATA;
- else
- rc = 0;
- goto out;
- }
-
/* RPC to MDT is sent, cancel data modification flag */
if (op_data->op_bias & MDS_DATA_MODIFIED) {
spin_lock(&lli->lli_lock);
@@ -1335,7 +1343,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
}
ll_ioepoch_open(lli, op_data->op_ioepoch);
- if (!S_ISREG(inode->i_mode)) {
+ if (!S_ISREG(inode->i_mode) || file_is_released) {
rc = 0;
goto out;
}
@@ -1552,7 +1560,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (body->valid & OBD_MD_FLATIME) {
if (body->atime > LTIME_S(inode->i_atime))
LTIME_S(inode->i_atime) = body->atime;
- lli->lli_lvb.lvb_atime = body->atime;
+ lli->lli_atime = body->atime;
}
if (body->valid & OBD_MD_FLMTIME) {
if (body->mtime > LTIME_S(inode->i_mtime)) {
@@ -1561,12 +1569,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
body->mtime);
LTIME_S(inode->i_mtime) = body->mtime;
}
- lli->lli_lvb.lvb_mtime = body->mtime;
+ lli->lli_mtime = body->mtime;
}
if (body->valid & OBD_MD_FLCTIME) {
if (body->ctime > LTIME_S(inode->i_ctime))
LTIME_S(inode->i_ctime) = body->ctime;
- lli->lli_lvb.lvb_ctime = body->ctime;
+ lli->lli_ctime = body->ctime;
}
if (body->valid & OBD_MD_FLMODE)
inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
@@ -1593,12 +1601,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
/* FID shouldn't be changed! */
if (fid_is_sane(&lli->lli_fid)) {
LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
- "Trying to change FID "DFID
- " to the "DFID", inode %lu/%u(%p)\n",
+ "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
PFID(&lli->lli_fid), PFID(&body->fid1),
- inode->i_ino, inode->i_generation, inode);
- } else
+ PFID(ll_inode2fid(inode)), inode);
+ } else {
lli->lli_fid = body->fid1;
+ }
}
LASSERT(fid_seq(&lli->lli_fid) != 0);
@@ -1622,8 +1630,10 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (lli->lli_flags & (LLIF_DONE_WRITING |
LLIF_EPOCH_PENDING |
LLIF_SOM_DIRTY)) {
- CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
- inode->i_ino, lli->lli_flags);
+ CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
+ sbi->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)),
+ lli->lli_flags);
} else {
/* Use old size assignment to avoid
* deadlock bz14138 & bz14326
@@ -1699,7 +1709,7 @@ void ll_read_inode2(struct inode *inode, void *opaque)
void ll_delete_inode(struct inode *inode)
{
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
if (S_ISREG(inode->i_mode) && lli->lli_clob)
/* discard all dirty pages before truncating them, required by
@@ -1715,8 +1725,8 @@ void ll_delete_inode(struct inode *inode)
spin_lock_irq(&inode->i_data.tree_lock);
spin_unlock_irq(&inode->i_data.tree_lock);
LASSERTF(inode->i_data.nrpages == 0,
- "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
- inode->i_ino, inode->i_generation, inode,
+ "inode="DFID"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
+ PFID(ll_inode2fid(inode)), inode,
inode->i_data.nrpages);
}
/* Workaround end */
@@ -1747,7 +1757,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
- CERROR("failure %d inode %lu\n", rc, inode->i_ino);
+ CERROR("%s: failure inode "DFID": rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
return -abs(rc);
}
@@ -1772,7 +1784,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
+ op_data->op_attr_flags = flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
rc = md_setattr(sbi->ll_md_exp, op_data,
NULL, 0, NULL, 0, &req, NULL);
@@ -2066,11 +2078,11 @@ int ll_obd_statfs(struct inode *inode, void __user *arg)
}
memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
- if (type & LL_STATFS_LMV)
+ if (type & LL_STATFS_LMV) {
exp = sbi->ll_md_exp;
- else if (type & LL_STATFS_LOV)
+ } else if (type & LL_STATFS_LOV) {
exp = sbi->ll_dt_exp;
- else {
+ } else {
rc = -ENODEV;
goto out_statfs;
}
@@ -2271,7 +2283,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
{
char *buf, *path = NULL;
struct dentry *dentry = NULL;
- struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
+ struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
/* this can be called inside spin lock so use GFP_ATOMIC. */
buf = (char *)__get_free_page(GFP_ATOMIC);
@@ -2285,7 +2297,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
"%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
- PFID(&obj->cob_header.coh_lu.loh_fid),
+ PFID(&obj->vob_header.coh_lu.loh_fid),
(path && !IS_ERR(path)) ? path : "", ioret);
if (dentry)
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 5b484e62f..88ef1cac9 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -57,10 +57,10 @@ void policy_from_vma(ldlm_policy_data_t *policy,
struct vm_area_struct *vma, unsigned long addr,
size_t count)
{
- policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+ policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
(vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~CFS_PAGE_MASK;
+ ~PAGE_MASK;
}
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
@@ -123,7 +123,8 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
*env_ret = env;
- io = ccc_env_thread_io(env);
+restart:
+ io = vvp_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
LASSERT(io->ci_obj);
@@ -146,17 +147,20 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) {
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- LASSERT(cio->cui_cl.cis_io == io);
+ LASSERT(vio->vui_cl.cis_io == io);
/* mmap lock must be MANDATORY it has to cache pages. */
io->ci_lockreq = CILR_MANDATORY;
- cio->cui_fd = fd;
+ vio->vui_fd = fd;
} else {
LASSERT(rc < 0);
cl_io_fini(env, io);
+ if (io->ci_need_restart)
+ goto restart;
+
cl_env_nested_put(nest, env);
io = ERR_PTR(rc);
}
@@ -200,7 +204,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
* Otherwise, we could add dirty pages into osc cache
* while truncate is on-going.
*/
- inode = ccc_object_inode(io->ci_obj);
+ inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
down_read(&lli->lli_trunc_sem);
@@ -307,17 +311,17 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = NULL;
- vio->u.fault.fault.ft_vmf = vmf;
- vio->u.fault.fault.ft_flags = 0;
- vio->u.fault.fault.ft_flags_valid = false;
+ vio->u.fault.ft_vmf = vmf;
+ vio->u.fault.ft_flags = 0;
+ vio->u.fault.ft_flags_valid = false;
result = cl_io_loop(env, io);
/* ft_flags are only valid if we reached
* the call to filemap_fault
*/
- if (vio->u.fault.fault.ft_flags_valid)
- fault_ret = vio->u.fault.fault.ft_flags;
+ if (vio->u.fault.ft_flags_valid)
+ fault_ret = vio->u.fault.ft_flags;
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage) {
@@ -390,9 +394,11 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = ll_page_mkwrite0(vma, vmf->page, &retry);
if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
+ const struct dentry *de = vma->vm_file->f_path.dentry;
+
+ CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
current->comm, vmf->pgoff,
- file_inode(vma->vm_file)->i_ino);
+ PFID(ll_inode2fid(de->d_inode)));
printed = true;
}
} while (retry);
@@ -422,16 +428,16 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
/**
* To avoid cancel the locks covering mmapped region for lock cache pressure,
- * we track the mapped vma count in ccc_object::cob_mmap_cnt.
+ * we track the mapped vma count in vvp_object::vob_mmap_cnt.
*/
static void ll_vm_open(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
LASSERT(vma->vm_file);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- atomic_inc(&vob->cob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ atomic_inc(&vob->vob_mmap_cnt);
}
/**
@@ -440,11 +446,11 @@ static void ll_vm_open(struct vm_area_struct *vma)
static void ll_vm_close(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
LASSERT(vma->vm_file);
- atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+ atomic_dec(&vob->vob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
}
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 193aab879..c1eef6198 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -119,7 +119,7 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
kfree(op_data);
if (rc) {
- CERROR("can't get object attrs, fid "DFID", rc %d\n",
+ CDEBUG(D_INFO, "can't get object attrs, fid "DFID", rc %d\n",
PFID(fid), rc);
return ERR_PTR(rc);
}
@@ -191,8 +191,9 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
struct lustre_nfs_fid *nfs_fid = (void *)fh;
- CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n",
- inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len);
+ CDEBUG(D_INFO, "%s: encoding for ("DFID") maxlen=%d minlen=%d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), *plen, fileid_len);
if (*plen < fileid_len) {
*plen = fileid_len;
@@ -298,8 +299,9 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
sbi = ll_s2sbi(dir->i_sb);
- CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n",
- dir->i_ino, PFID(ll_inode2fid(dir)));
+ CDEBUG(D_INFO, "%s: getting parent for ("DFID")\n",
+ ll_get_fsname(dir->i_sb, NULL, 0),
+ PFID(ll_inode2fid(dir)));
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc != 0)
@@ -314,15 +316,20 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
- CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino);
+ CERROR("%s: failure inode "DFID" get parent: rc = %d\n",
+ ll_get_fsname(dir->i_sb, NULL, 0),
+ PFID(ll_inode2fid(dir)), rc);
return ERR_PTR(rc);
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body->valid & OBD_MD_FLID);
-
- CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
- PFID(ll_inode2fid(dir)), PFID(&body->fid1));
-
+ /*
+ * LU-3952: MDT may lost the FID of its parent, we should not crash
+ * the NFS server, ll_iget_for_nfs() will handle the error.
+ */
+ if (body->valid & OBD_MD_FLID) {
+ CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
+ PFID(ll_inode2fid(dir)), PFID(&body->fid1));
+ }
result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index f169c0db6..813a9a354 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -274,8 +274,9 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
if (lo->lo_biotail) {
lo->lo_biotail->bi_next = bio;
lo->lo_biotail = bio;
- } else
+ } else {
lo->lo_bio = lo->lo_biotail = bio;
+ }
spin_unlock_irqrestore(&lo->lo_lock, flags);
atomic_inc(&lo->lo_pending);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 27ab12614..55d62eb11 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -254,7 +254,6 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number > totalram_pages / 2) {
-
CERROR("can't set file readahead more than %lu MB\n",
totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
@@ -393,6 +392,8 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
+ struct lu_env *env;
+ int refcheck;
int mult, rc, pages_number;
int diff = 0;
int nrpages = 0;
@@ -430,6 +431,10 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
goto out;
}
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return 0;
+
diff = -diff;
while (diff > 0) {
int tmp;
@@ -455,19 +460,20 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
break;
if (!sbi->ll_dt_exp) { /* being initialized */
- rc = -ENODEV;
- break;
+ rc = 0;
+ goto out;
}
/* difficult - have to ask OSCs to drop LRU slots. */
tmp = diff << 1;
- rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
+ rc = obd_set_info_async(env, sbi->ll_dt_exp,
sizeof(KEY_CACHE_LRU_SHRINK),
KEY_CACHE_LRU_SHRINK,
sizeof(tmp), &tmp, NULL);
if (rc < 0)
break;
}
+ cl_env_put(env, &refcheck);
out:
if (rc >= 0) {
@@ -818,6 +824,23 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(xattr_cache);
+static ssize_t unstable_stats_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kobj);
+ struct cl_client_cache *cache = &sbi->ll_cache;
+ int pages, mb;
+
+ pages = atomic_read(&cache->ccc_unstable_nr);
+ mb = (pages * PAGE_SIZE) >> 20;
+
+ return sprintf(buf, "unstable_pages: %8d\n"
+ "unstable_mb: %8d\n", pages, mb);
+}
+LUSTRE_RO_ATTR(unstable_stats);
+
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "mntpt_path", ll_rd_path, 0, 0 }, */
{ "site", &ll_site_stats_fops, NULL, 0 },
@@ -853,6 +876,7 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_max_easize.attr,
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
+ &lustre_attr_unstable_stats.attr,
NULL,
};
@@ -953,6 +977,7 @@ static const char *ra_stat_string[] = {
[RA_STAT_EOF] = "read-ahead to EOF",
[RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
[RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
+ [RA_STAT_FAILED_REACH_END] = "failed to reach end"
};
int ldebugfs_register_mountpoint(struct dentry *parent,
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index f8f98e4e8..5eba0ebae 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -128,12 +128,14 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
if (rc != 0) {
iget_failed(inode);
inode = NULL;
- } else
+ } else {
unlock_new_inode(inode);
- } else if (!(inode->i_state & (I_FREEING | I_CLEAR)))
+ }
+ } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
ll_update_inode(inode, md);
- CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n",
- inode, PFID(&md->body->fid1));
+ CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p)\n",
+ PFID(&md->body->fid1), inode);
+ }
}
return inode;
}
@@ -188,7 +190,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
break;
/* Invalidate all dentries associated with this inode */
- LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+ LASSERT(ldlm_is_canceling(lock));
if (!fid_res_name_eq(ll_inode2fid(inode),
&lock->l_resource->lr_name)) {
@@ -255,8 +257,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
}
if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
- CDEBUG(D_INODE, "invalidating inode %lu\n",
- inode->i_ino);
+ CDEBUG(D_INODE, "invalidating inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
truncate_inode_pages(inode->i_mapping, 0);
ll_invalidate_negative_children(inode);
}
@@ -476,9 +478,8 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
return ERR_PTR(-ENAMETOOLONG);
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, LL_IT2STR(it));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),intent=%s\n",
+ dentry, PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it));
if (d_mountpoint(dentry))
CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
@@ -553,9 +554,8 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
struct dentry *de;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, flags);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),flags=%u\n",
+ dentry, PFID(ll_inode2fid(parent)), parent, flags);
/* Optimize away (CREATE && !OPEN). Let .create handle the race. */
if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
@@ -586,10 +586,9 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
- CDEBUG(D_VFSTRACE,
- "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,open_flags %x,mode %x opened %d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, file, open_flags, mode, *opened);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),file %p,open_flags %x,mode %x opened %d\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
+ *opened);
it = kzalloc(sizeof(*it), GFP_NOFS);
if (!it)
@@ -680,8 +679,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
* lock on the inode. Since we finally have an inode pointer,
* stuff it in the lock.
*/
- CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
+ CDEBUG(D_DLMTRACE, "setting l_ast_data to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(dir)), inode);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
out:
ptlrpc_req_finished(request);
@@ -708,9 +707,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
struct inode *inode;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, LL_IT2STR(it));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), intent=%s\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it));
rc = it_open_error(DISP_OPEN_CREATE, it);
if (rc)
@@ -733,8 +731,9 @@ static void ll_update_times(struct ptlrpc_request *request,
LASSERT(body);
if (body->valid & OBD_MD_FLMTIME &&
body->mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
- inode->i_ino, LTIME_S(inode->i_mtime), body->mtime);
+ CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu\n",
+ PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime),
+ body->mtime);
LTIME_S(inode->i_mtime) = body->mtime;
}
if (body->valid & OBD_MD_FLCTIME &&
@@ -791,9 +790,9 @@ static int ll_mknod(struct inode *dir, struct dentry *dchild,
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p) mode %o dev %x\n",
- dchild, dir->i_ino, dir->i_generation, dir,
- mode, old_encode_dev(rdev));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p) mode %o dev %x\n",
+ dchild, PFID(ll_inode2fid(dir)), dir, mode,
+ old_encode_dev(rdev));
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
@@ -831,9 +830,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
{
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u, excl=%d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, mode, want_excl);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), flags=%u, excl=%d\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
rc = ll_mknod(dir, dentry, mode, 0);
@@ -845,12 +843,6 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
return rc;
}
-static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
-{
- if (d_really_is_positive(child))
- *fid = *ll_inode2fid(d_inode(child));
-}
-
int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
{
struct mdt_body *body;
@@ -927,23 +919,25 @@ out:
* is any lock existing. They will recycle dentries and inodes based upon locks
* too. b=20433
*/
-static int ll_unlink(struct inode *dir, struct dentry *dentry)
+static int ll_unlink(struct inode *dir, struct dentry *dchild)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ dchild, dir->i_ino, dir->i_generation, dir);
op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
+ dchild->d_name.name,
+ dchild->d_name.len,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(dentry, &op_data->op_fid3);
+ if (dchild && dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+
op_data->op_fid2 = op_data->op_fid3;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -963,8 +957,8 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir"DFID"(%p)\n",
+ dentry, PFID(ll_inode2fid(dir)), dir);
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
@@ -977,23 +971,25 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return err;
}
-static int ll_rmdir(struct inode *dir, struct dentry *dentry)
+static int ll_rmdir(struct inode *dir, struct dentry *dchild)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p)\n",
+ dchild, PFID(ll_inode2fid(dir)), dir);
op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
+ dchild->d_name.name,
+ dchild->d_name.len,
S_IFDIR, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(dentry, &op_data->op_fid3);
+ if (dchild && dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+
op_data->op_fid2 = op_data->op_fid3;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -1011,9 +1007,8 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),target=%.*s\n",
- dentry, dir->i_ino, dir->i_generation,
- dir, 3000, oldname);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),target=%.*s\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, 3000, oldname);
err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO,
0, LUSTRE_OPC_SYMLINK);
@@ -1033,10 +1028,9 @@ static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct md_op_data *op_data;
int err;
- CDEBUG(D_VFSTRACE,
- "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%pd\n",
- src->i_ino, src->i_generation, src, dir->i_ino,
- dir->i_generation, dir, new_dentry);
+ CDEBUG(D_VFSTRACE, "VFS Op: inode="DFID"(%p), dir="DFID"(%p), target=%pd\n",
+ PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
+ new_dentry);
op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name,
new_dentry->d_name.len,
@@ -1056,42 +1050,45 @@ out:
return err;
}
-static int ll_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+static int ll_rename(struct inode *src, struct dentry *src_dchild,
+ struct inode *tgt, struct dentry *tgt_dchild)
{
struct ptlrpc_request *request = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(old_dir);
+ struct ll_sb_info *sbi = ll_i2sbi(src);
struct md_op_data *op_data;
int err;
CDEBUG(D_VFSTRACE,
- "VFS Op:oldname=%pd,src_dir=%lu/%u(%p),newname=%pd,tgt_dir=%lu/%u(%p)\n",
- old_dentry, old_dir->i_ino, old_dir->i_generation, old_dir,
- new_dentry, new_dir->i_ino, new_dir->i_generation, new_dir);
+ "VFS Op:oldname=%pd, src_dir="DFID"(%p), newname=%pd, tgt_dir="DFID"(%p)\n",
+ src_dchild, PFID(ll_inode2fid(src)), src,
+ tgt_dchild, PFID(ll_inode2fid(tgt)), tgt);
- op_data = ll_prep_md_op_data(NULL, old_dir, new_dir, NULL, 0, 0,
+ op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(old_dentry, &op_data->op_fid3);
- ll_get_child_fid(new_dentry, &op_data->op_fid4);
+ if (src_dchild && src_dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode);
+ if (tgt_dchild && tgt_dchild->d_inode)
+ op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
+
err = md_rename(sbi->ll_md_exp, op_data,
- old_dentry->d_name.name,
- old_dentry->d_name.len,
- new_dentry->d_name.name,
- new_dentry->d_name.len, &request);
+ src_dchild->d_name.name,
+ src_dchild->d_name.len,
+ tgt_dchild->d_name.name,
+ tgt_dchild->d_name.len, &request);
ll_finish_md_op_data(op_data);
if (!err) {
- ll_update_times(request, old_dir);
- ll_update_times(request, new_dir);
+ ll_update_times(request, src);
+ ll_update_times(request, tgt);
ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
- err = ll_objects_destroy(request, old_dir);
+ err = ll_objects_destroy(request, src);
}
ptlrpc_req_finished(request);
if (!err)
- d_move(old_dentry, new_dentry);
+ d_move(src_dchild, tgt_dchild);
return err;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index edab6c5b7..336397773 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -63,7 +63,7 @@
* Finalizes cl-data before exiting typical address_space operation. Dual to
* ll_cl_init().
*/
-static void ll_cl_fini(struct ll_cl_context *lcc)
+void ll_cl_fini(struct ll_cl_context *lcc)
{
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
@@ -84,200 +84,59 @@ static void ll_cl_fini(struct ll_cl_context *lcc)
* Initializes common cl-data at the typical address_space operation entry
* point.
*/
-static struct ll_cl_context *ll_cl_init(struct file *file,
- struct page *vmpage, int create)
+struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
{
struct ll_cl_context *lcc;
struct lu_env *env;
struct cl_io *io;
struct cl_object *clob;
- struct ccc_io *cio;
+ struct vvp_io *vio;
int refcheck;
int result = 0;
- clob = ll_i2info(vmpage->mapping->host)->lli_clob;
+ clob = ll_i2info(file_inode(file))->lli_clob;
LASSERT(clob);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return ERR_CAST(env);
- lcc = &vvp_env_info(env)->vti_io_ctx;
+ lcc = &ll_env_info(env)->lti_io_ctx;
memset(lcc, 0, sizeof(*lcc));
lcc->lcc_env = env;
lcc->lcc_refcheck = refcheck;
lcc->lcc_cookie = current;
- cio = ccc_env_io(env);
- io = cio->cui_cl.cis_io;
- if (!io && create) {
- struct inode *inode = vmpage->mapping->host;
- loff_t pos;
-
- if (inode_trylock(inode)) {
- inode_unlock((inode));
-
- /* this is too bad. Someone is trying to write the
- * page w/o holding inode mutex. This means we can
- * add dirty pages into cache during truncate
- */
- CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
- current->comm);
- dump_stack();
- LBUG();
- return ERR_PTR(-EIO);
- }
-
- /*
- * Loop-back driver calls ->prepare_write().
- * methods directly, bypassing file system ->write() operation,
- * so cl_io has to be created here.
- */
- io = ccc_env_thread_io(env);
- ll_io_init(io, file, 1);
-
- /* No lock at all for this kind of IO - we can't do it because
- * we have held page lock, it would cause deadlock.
- * XXX: This causes poor performance to loop device - One page
- * per RPC.
- * In order to get better performance, users should use
- * lloop driver instead.
- */
- io->ci_lockreq = CILR_NEVER;
-
- pos = vmpage->index << PAGE_SHIFT;
-
- /* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
- if (result == 0) {
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- cio->cui_iter = NULL;
- result = cl_io_iter_init(env, io);
- if (result == 0) {
- result = cl_io_lock(env, io);
- if (result == 0)
- result = cl_io_start(env, io);
- }
- } else
- result = io->ci_result;
- }
-
+ vio = vvp_env_io(env);
+ io = vio->vui_cl.cis_io;
lcc->lcc_io = io;
if (!io)
result = -EIO;
- if (result == 0) {
+
+ if (result == 0 && vmpage) {
struct cl_page *page;
LASSERT(io->ci_state == CIS_IO_GOING);
- LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
+ LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
page = cl_page_find(env, clob, vmpage->index, vmpage,
CPT_CACHEABLE);
if (!IS_ERR(page)) {
lcc->lcc_page = page;
lu_ref_add(&page->cp_reference, "cl_io", io);
result = 0;
- } else
+ } else {
result = PTR_ERR(page);
+ }
}
if (result) {
ll_cl_fini(lcc);
lcc = ERR_PTR(result);
}
- CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
- vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
- env, io);
- return lcc;
-}
-
-static struct ll_cl_context *ll_cl_get(void)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- LASSERT(!IS_ERR(env));
- lcc = &vvp_env_info(env)->vti_io_ctx;
- LASSERT(env == lcc->lcc_env);
- LASSERT(current == lcc->lcc_cookie);
- cl_env_put(env, &refcheck);
-
- /* env has got in ll_cl_init, so it is still usable. */
return lcc;
}
-/**
- * ->prepare_write() address space operation called by generic_file_write()
- * for every page during write.
- */
-int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- int result;
-
- lcc = ll_cl_init(file, vmpage, 1);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- cl_page_assume(env, io, page);
-
- result = cl_io_prepare_write(env, io, page, from, to);
- if (result == 0) {
- /*
- * Add a reference, so that page is not evicted from
- * the cache until ->commit_write() is called.
- */
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "prepare_write",
- current);
- } else {
- cl_page_unassume(env, io, page);
- ll_cl_fini(lcc);
- }
- /* returning 0 in prepare assumes commit must be called
- * afterwards
- */
- } else {
- result = PTR_ERR(lcc);
- }
- return result;
-}
-
-int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- int result = 0;
-
- lcc = ll_cl_get();
- env = lcc->lcc_env;
- page = lcc->lcc_page;
- io = lcc->lcc_io;
-
- LASSERT(cl_page_is_owned(page, io));
- LASSERT(from <= to);
- if (from != to) /* handle short write case. */
- result = cl_io_commit_write(env, io, page, from, to);
- if (cl_page_is_owned(page, io))
- cl_page_unassume(env, io, page);
-
- /*
- * Release reference acquired by ll_prepare_write().
- */
- lu_ref_del(&page->cp_reference, "prepare_write", current);
- cl_page_put(env, page);
- ll_cl_fini(lcc);
- return result;
-}
-
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
/**
@@ -301,7 +160,7 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
*/
static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
struct ra_io_arg *ria,
- unsigned long pages)
+ unsigned long pages, unsigned long min)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
long ret;
@@ -341,6 +200,11 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
}
out:
+ if (ret < min) {
+ /* override ra limit for maximum performance */
+ atomic_add(min - ret, &ra->ra_cur_pages);
+ ret = min;
+ }
return ret;
}
@@ -357,9 +221,9 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
{
- struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
ll_ra_stats_inc_sbi(sbi, which);
}
@@ -388,61 +252,42 @@ static int index_in_window(unsigned long index, unsigned long point,
return start <= index && index <= end;
}
-static struct ll_readahead_state *ll_ras_get(struct file *f)
+void ll_ras_enter(struct file *f)
{
- struct ll_file_data *fd;
-
- fd = LUSTRE_FPRIVATE(f);
- return &fd->fd_ras;
-}
-
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(f);
+ struct ll_readahead_state *ras = &fd->fd_ras;
spin_lock(&ras->ras_lock);
ras->ras_requests++;
ras->ras_request_index = 0;
ras->ras_consecutive_requests++;
- rar->lrr_reader = current;
-
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
-}
-
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
-
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
spin_unlock(&ras->ras_lock);
}
static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue, struct cl_page *page,
- struct page *vmpage)
+ struct cl_object *clob, pgoff_t *max_index)
{
- struct ccc_page *cp;
+ struct page *vmpage = page->cp_vmpage;
+ struct vvp_page *vpg;
int rc;
rc = 0;
cl_page_assume(env, io, page);
lu_ref_add(&page->cp_reference, "ra", current);
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
- rc = cl_page_is_under_lock(env, io, page);
- if (rc == -EBUSY) {
- cp->cpg_defer_uptodate = 1;
- cp->cpg_ra_used = 0;
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
+ CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
+ vvp_index(vpg), *max_index);
+ if (*max_index == 0 || vvp_index(vpg) > *max_index)
+ rc = cl_page_is_under_lock(env, io, page, max_index);
+ if (rc == 0) {
+ vpg->vpg_defer_uptodate = 1;
+ vpg->vpg_ra_used = 0;
cl_page_list_add(queue, page);
rc = 1;
} else {
- cl_page_delete(env, page);
+ cl_page_discard(env, io, page);
rc = -ENOLCK;
}
} else {
@@ -466,24 +311,25 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
*/
static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue,
- pgoff_t index, struct address_space *mapping)
+ pgoff_t index, pgoff_t *max_index)
{
+ struct cl_object *clob = io->ci_obj;
+ struct inode *inode = vvp_object_inode(clob);
struct page *vmpage;
- struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
struct cl_page *page;
enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
int rc = 0;
const char *msg = NULL;
- vmpage = grab_cache_page_nowait(mapping, index);
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
if (vmpage) {
/* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping == mapping) {
+ if (vmpage->mapping == inode->i_mapping) {
page = cl_page_find(env, clob, vmpage->index,
vmpage, CPT_CACHEABLE);
if (!IS_ERR(page)) {
rc = cl_read_ahead_page(env, io, queue,
- page, vmpage);
+ page, clob, max_index);
if (rc == -ENOLCK) {
which = RA_STAT_FAILED_MATCH;
msg = "lock match failed";
@@ -504,7 +350,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
msg = "g_c_p_n failed";
}
if (msg) {
- ll_ra_stats_inc(mapping, which);
+ ll_ra_stats_inc(inode, which);
CDEBUG(D_READA, "%s\n", msg);
}
return rc;
@@ -616,11 +462,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *queue,
struct ra_io_arg *ria,
unsigned long *reserved_pages,
- struct address_space *mapping,
unsigned long *ra_end)
{
- int rc, count = 0, stride_ria;
- unsigned long page_idx;
+ int rc, count = 0;
+ bool stride_ria;
+ pgoff_t page_idx;
+ pgoff_t max_index = 0;
LASSERT(ria);
RIA_DEBUG(ria);
@@ -631,12 +478,13 @@ static int ll_read_ahead_pages(const struct lu_env *env,
if (ras_inside_ra_window(page_idx, ria)) {
/* If the page is inside the read-ahead window*/
rc = ll_read_ahead_page(env, io, queue,
- page_idx, mapping);
+ page_idx, &max_index);
if (rc == 1) {
(*reserved_pages)--;
count++;
- } else if (rc == -ENOLCK)
+ } else if (rc == -ENOLCK) {
break;
+ }
} else if (stride_ria) {
/* If it is not in the read-ahead window, and it is
* read-ahead mode, then check whether it should skip
@@ -666,25 +514,22 @@ static int ll_read_ahead_pages(const struct lu_env *env,
}
int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags)
+ struct cl_page_list *queue, struct ll_readahead_state *ras,
+ bool hit)
{
struct vvp_io *vio = vvp_env_io(env);
- struct vvp_thread_info *vti = vvp_env_info(env);
- struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct ll_thread_info *lti = ll_env_info(env);
+ struct cl_attr *attr = vvp_env_thread_attr(env);
unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len;
+ unsigned long ra_end, len, mlen = 0;
struct inode *inode;
- struct ll_ra_read *bead;
- struct ra_io_arg *ria = &vti->vti_ria;
- struct ll_inode_info *lli;
+ struct ra_io_arg *ria = &lti->lti_ria;
struct cl_object *clob;
int ret = 0;
__u64 kms;
- inode = mapping->host;
- lli = ll_i2info(inode);
- clob = lli->lli_clob;
+ clob = io->ci_obj;
+ inode = vvp_object_inode(clob);
memset(ria, 0, sizeof(*ria));
@@ -696,22 +541,20 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
return ret;
kms = attr->cat_kms;
if (kms == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
return 0;
}
spin_lock(&ras->ras_lock);
- if (vio->cui_ra_window_set)
- bead = &vio->cui_bead;
- else
- bead = NULL;
/* Enlarge the RA window to encompass the full read */
- if (bead && ras->ras_window_start + ras->ras_window_len <
- bead->lrr_start + bead->lrr_count) {
- ras->ras_window_len = bead->lrr_start + bead->lrr_count -
+ if (vio->vui_ra_valid &&
+ ras->ras_window_start + ras->ras_window_len <
+ vio->vui_ra_start + vio->vui_ra_count) {
+ ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
ras->ras_window_start;
}
+
/* Reserve a part of the read-ahead window that we'll be issuing */
if (ras->ras_window_len) {
start = ras->ras_next_readahead;
@@ -755,29 +598,48 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
spin_unlock(&ras->ras_lock);
if (end == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
return 0;
}
len = ria_page_count(ria);
- if (len == 0)
+ if (len == 0) {
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
return 0;
+ }
+
+ CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
+ PFID(lu_object_fid(&clob->co_lu)),
+ ria->ria_start, ria->ria_end,
+ vio->vui_ra_valid ? vio->vui_ra_start : 0,
+ vio->vui_ra_valid ? vio->vui_ra_count : 0,
+ hit);
+
+ /* at least to extend the readahead window to cover current read */
+ if (!hit && vio->vui_ra_valid &&
+ vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) {
+ /* to the end of current read window. */
+ mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start;
+ /* trim to RPC boundary */
+ start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1);
+ mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start);
+ }
- reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen);
if (reserved < len)
- ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
+ ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
- CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
+ CDEBUG(D_READA, "reserved pages %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
+ reserved, len, mlen,
atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
- ret = ll_read_ahead_pages(env, io, queue,
- ria, &reserved, mapping, &ra_end);
+ ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, &ra_end);
if (reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), reserved);
if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
- ll_ra_stats_inc(mapping, RA_STAT_EOF);
+ ll_ra_stats_inc(inode, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
* the ras we need to go back and update the ras so that the
@@ -789,6 +651,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
+ ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
spin_lock(&ras->ras_lock);
if (ra_end < ras->ras_next_readahead &&
index_in_window(ra_end, ras->ras_window_start, 0,
@@ -836,7 +699,6 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
spin_lock_init(&ras->ras_lock);
ras_reset(inode, ras, 0);
ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
@@ -1059,15 +921,18 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
ras->ras_last_readpage = index;
ras_set_start(inode, ras, index);
- if (stride_io_mode(ras))
+ if (stride_io_mode(ras)) {
/* Since stride readahead is sensitive to the offset
* of read-ahead, so we use original offset here,
* instead of ras_window_start, which is RPC aligned
*/
ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- else
- ras->ras_next_readahead = max(ras->ras_window_start,
- ras->ras_next_readahead);
+ } else {
+ if (ras->ras_next_readahead < ras->ras_window_start)
+ ras->ras_next_readahead = ras->ras_window_start;
+ if (!hit)
+ ras->ras_next_readahead = index + 1;
+ }
RAS_CDEBUG(ras);
/* Trigger RA in the mmap case where ras_consecutive_requests
@@ -1129,7 +994,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
clob = ll_i2info(inode)->lli_clob;
LASSERT(clob);
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = clob;
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, clob);
@@ -1240,8 +1105,9 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
- end = i_size_read(inode);
- mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
+ mapping->writeback_index = 0;
+ else
+ mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
}
return result;
}
@@ -1251,7 +1117,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
struct ll_cl_context *lcc;
int result;
- lcc = ll_cl_init(file, vmpage, 0);
+ lcc = ll_cl_init(file, vmpage);
if (!IS_ERR(lcc)) {
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
@@ -1273,3 +1139,28 @@ int ll_readpage(struct file *file, struct page *vmpage)
}
return result;
}
+
+int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, enum cl_req_type crt)
+{
+ struct cl_2queue *queue;
+ int result;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ queue = &io->ci_queue;
+ cl_2queue_init_page(queue, page);
+
+ result = cl_io_submit_sync(env, io, crt, queue, 0);
+ LASSERT(cl_page_is_owned(page, io));
+
+ if (crt == CRT_READ)
+ /*
+ * in CRT_WRITE case page is left locked even in case of
+ * error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 69aa15e8e..c12a048fc 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -95,15 +95,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
if (obj) {
page = cl_vmpage_page(vmpage, obj);
if (page) {
- lu_ref_add(&page->cp_reference,
- "delete", vmpage);
cl_page_delete(env, page);
- lu_ref_del(&page->cp_reference,
- "delete", vmpage);
cl_page_put(env, page);
}
- } else
+ } else {
LASSERT(vmpage->private == 0);
+ }
cl_env_put(env, &refcheck);
}
}
@@ -111,12 +108,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
{
- struct cl_env_nest nest;
struct lu_env *env;
+ void *cookie;
struct cl_object *obj;
struct cl_page *page;
struct address_space *mapping;
- int result;
+ int result = 0;
LASSERT(PageLocked(vmpage));
if (PageWriteback(vmpage) || PageDirty(vmpage))
@@ -130,53 +127,42 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
if (!obj)
return 1;
- /* 1 for page allocator, 1 for cl_page and 1 for page cache */
+ /* 1 for caller, 1 for cl_page and 1 for page cache */
if (page_count(vmpage) > 3)
return 0;
- /* TODO: determine what gfp should be used by @gfp_mask. */
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- /* If we can't allocate an env we won't call cl_page_put()
- * later on which further means it's impossible to drop
- * page refcount by cl_page, so ask kernel to not free
- * this page.
- */
- return 0;
-
page = cl_vmpage_page(vmpage, obj);
- result = !page;
- if (page) {
- if (!cl_page_in_use(page)) {
- result = 1;
- cl_page_delete(env, page);
- }
- cl_page_put(env, page);
- }
- cl_env_nested_put(&nest, env);
- return result;
-}
+ if (!page)
+ return 1;
-static int ll_set_page_dirty(struct page *vmpage)
-{
-#if 0
- struct cl_page *page = vvp_vmpage_page_transient(vmpage);
- struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
- struct vvp_page *cpg;
+ cookie = cl_env_reenter();
+ env = cl_env_percpu_get();
+ LASSERT(!IS_ERR(env));
- /*
- * XXX should page method be called here?
- */
- LASSERT(&obj->co_cl == page->cp_obj);
- cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
- /*
- * XXX cannot do much here, because page is possibly not locked:
- * sys_munmap()->...
- * ->unmap_page_range()->zap_pte_range()->set_page_dirty().
+ if (!cl_page_in_use(page)) {
+ result = 1;
+ cl_page_delete(env, page);
+ }
+
+ /* To use percpu env array, the call path can not be rescheduled;
+ * otherwise percpu array will be messed if ll_releaspage() called
+ * again on the same CPU.
+ *
+ * If this page holds the last refc of cl_object, the following
+ * call path may cause reschedule:
+ * cl_page_put -> cl_page_free -> cl_object_put ->
+ * lu_object_put -> lu_object_free -> lov_delete_raid0.
+ *
+ * However, the kernel can't get rid of this inode until all pages have
+ * been cleaned up. Now that we hold page lock here, it's pretty safe
+ * that we won't get into object delete path.
*/
- vvp_write_pending(obj, cpg);
-#endif
- return __set_page_dirty_nobuffers(vmpage);
+ LASSERT(cl_object_refc(obj) > 1);
+ cl_page_put(env, page);
+
+ cl_env_percpu_put(env);
+ cl_env_reexit(cookie);
+ return result;
}
#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
@@ -266,7 +252,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
* write directly
*/
if (clp->cp_type == CPT_CACHEABLE) {
- struct page *vmpage = cl_page_vmpage(env, clp);
+ struct page *vmpage = cl_page_vmpage(clp);
struct page *src_page;
struct page *dst_page;
void *src;
@@ -358,14 +344,14 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
*/
#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
- loff_t file_offset)
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
{
struct lu_env *env;
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct ccc_object *obj = cl_inode2ccc(inode);
+ struct vvp_object *obj = cl_inode2vvp(inode);
+ loff_t file_offset = iocb->ki_pos;
ssize_t count = iov_iter_count(iter);
ssize_t tot_bytes = 0, result = 0;
struct ll_inode_info *lli = ll_i2info(inode);
@@ -376,22 +362,21 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
return -EBADF;
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
- if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
+ if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
return -EINVAL;
- CDEBUG(D_VFSTRACE,
- "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
- inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
+ PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
file_offset, file_offset, count >> PAGE_SHIFT,
MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
- if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
+ if (iov_iter_alignment(iter) & ~PAGE_MASK)
return -EINVAL;
env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
- io = ccc_env_io(env)->cui_cl.cis_io;
+ io = vvp_env_io(env)->vui_cl.cis_io;
LASSERT(io);
/* 0. Need locking between buffered and direct access. and race with
@@ -401,7 +386,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
if (iov_iter_rw(iter) == READ)
inode_lock(inode);
- LASSERT(obj->cob_transient_pages == 0);
+ LASSERT(obj->vob_transient_pages == 0);
while (iov_iter_count(iter)) {
struct page **pages;
size_t offs;
@@ -435,8 +420,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
size > (PAGE_SIZE / sizeof(*pages)) *
PAGE_SIZE) {
size = ((((size / 2) - 1) |
- ~CFS_PAGE_MASK) + 1) &
- CFS_PAGE_MASK;
+ ~PAGE_MASK) + 1) &
+ PAGE_MASK;
CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
size);
continue;
@@ -449,62 +434,213 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
file_offset += result;
}
out:
- LASSERT(obj->cob_transient_pages == 0);
+ LASSERT(obj->vob_transient_pages == 0);
if (iov_iter_rw(iter) == READ)
inode_unlock(inode);
if (tot_bytes > 0) {
- if (iov_iter_rw(iter) == WRITE) {
- struct lov_stripe_md *lsm;
-
- lsm = ccc_inode_lsm_get(inode);
- LASSERT(lsm);
- lov_stripe_lock(lsm);
- obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
- lov_stripe_unlock(lsm);
- ccc_inode_lsm_put(inode, lsm);
- }
+ struct vvp_io *vio = vvp_env_io(env);
+
+ /* no commit async for direct IO */
+ vio->u.write.vui_written += tot_bytes;
}
cl_env_put(env, &refcheck);
- return tot_bytes ? : result;
+ return tot_bytes ? tot_bytes : result;
+}
+
+/**
+ * Prepare partially written-to page for a write.
+ */
+static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg)
+{
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ struct cl_object *obj = io->ci_obj;
+ struct vvp_page *vpg = cl_object_page_slice(obj, pg);
+ loff_t offset = cl_offset(obj, vvp_index(vpg));
+ int result;
+
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+ if (result == 0) {
+ /*
+ * If are writing to a new page, no need to read old data.
+ * The extent locking will have updated the KMS, and for our
+ * purposes here we can treat it like i_size.
+ */
+ if (attr->cat_kms <= offset) {
+ char *kaddr = kmap_atomic(vpg->vpg_page);
+
+ memset(kaddr, 0, cl_page_size(obj));
+ kunmap_atomic(kaddr);
+ } else if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
+ } else {
+ result = ll_page_sync_io(env, io, pg, CRT_READ);
+ }
+ }
+ return result;
}
static int ll_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
- int rc;
- unsigned from = pos & (PAGE_SIZE - 1);
+ struct page *vmpage = NULL;
+ unsigned int from = pos & (PAGE_SIZE - 1);
+ unsigned int to = from + len;
+ int result = 0;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
+ CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
- *pagep = page;
+ lcc = ll_cl_init(file, NULL);
+ if (IS_ERR(lcc)) {
+ result = PTR_ERR(lcc);
+ goto out;
+ }
- rc = ll_prepare_write(file, page, from, from + len);
- if (rc) {
- unlock_page(page);
- put_page(page);
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
+
+ /* To avoid deadlock, try to lock page first. */
+ vmpage = grab_cache_page_nowait(mapping, index);
+ if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *plist = &vio->u.write.vui_queue;
+
+ /* if the page is already in dirty cache, we have to commit
+ * the pages right now; otherwise, it may cause deadlock
+ * because it holds page lock of a dirty page and request for
+ * more grants. It's okay for the dirty page to be the first
+ * one in commit page list, though.
+ */
+ if (vmpage && plist->pl_nr > 0) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ vmpage = NULL;
+ }
+
+ /* commit pages and then wait for page lock */
+ result = vvp_io_write_commit(env, io);
+ if (result < 0)
+ goto out;
+
+ if (!vmpage) {
+ vmpage = grab_cache_page_write_begin(mapping, index,
+ flags);
+ if (!vmpage) {
+ result = -ENOMEM;
+ goto out;
+ }
+ }
}
- return rc;
+
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page)) {
+ result = PTR_ERR(page);
+ goto out;
+ }
+
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+
+ cl_page_assume(env, io, page);
+ if (!PageUptodate(vmpage)) {
+ /*
+ * We're completely overwriting an existing page,
+ * so _don't_ set it up to date until commit_write
+ */
+ if (from == 0 && to == PAGE_SIZE) {
+ CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
+ POISON_PAGE(vmpage, 0x11);
+ } else {
+ /* TODO: can be optimized at OSC layer to check if it
+ * is a lockless IO. In that case, it's not necessary
+ * to read the data.
+ */
+ result = ll_prepare_partial_page(env, io, page);
+ if (result == 0)
+ SetPageUptodate(vmpage);
+ }
+ }
+ if (result < 0)
+ cl_page_unassume(env, io, page);
+out:
+ if (result < 0) {
+ if (vmpage) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
+ if (!IS_ERR(lcc))
+ ll_cl_fini(lcc);
+ } else {
+ *pagep = vmpage;
+ *fsdata = lcc;
+ }
+ return result;
}
static int ll_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct page *vmpage, void *fsdata)
{
+ struct ll_cl_context *lcc = fsdata;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio;
+ struct cl_page *page;
unsigned from = pos & (PAGE_SIZE - 1);
- int rc;
+ bool unplug = false;
+ int result = 0;
+
+ put_page(vmpage);
+
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+ vio = vvp_env_io(env);
+
+ LASSERT(cl_page_is_owned(page, io));
+ if (copied > 0) {
+ struct cl_page_list *plist = &vio->u.write.vui_queue;
+
+ lcc->lcc_page = NULL; /* page will be queued */
+
+ /* Add it into write queue */
+ cl_page_list_add(plist, page);
+ if (plist->pl_nr == 1) /* first page */
+ vio->u.write.vui_from = from;
+ else
+ LASSERT(from == 0);
+ vio->u.write.vui_to = from + copied;
+
+ /* We may have one full RPC, commit it soon */
+ if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
+ unplug = true;
+
+ CL_PAGE_DEBUG(D_VFSTRACE, env, page,
+ "queued page: %d.\n", plist->pl_nr);
+ } else {
+ cl_page_disown(env, io, page);
+
+ /* page list is not contiguous now, commit it now */
+ unplug = true;
+ }
- rc = ll_commit_write(file, page, from, from + copied);
- unlock_page(page);
- put_page(page);
+ if (unplug ||
+ file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
+ result = vvp_io_write_commit(env, io);
- return rc ?: copied;
+ ll_cl_fini(lcc);
+ return result >= 0 ? copied : result;
}
#ifdef CONFIG_MIGRATION
@@ -523,7 +659,7 @@ const struct address_space_operations ll_aops = {
.direct_IO = ll_direct_IO_26,
.writepage = ll_writepage,
.writepages = ll_writepages,
- .set_page_dirty = ll_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.write_begin = ll_write_begin,
.write_end = ll_write_end,
.invalidatepage = ll_invalidatepage,
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 99ffd1589..6322f8866 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -661,8 +661,9 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
if (rc)
goto out;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- child, child->i_ino, child->i_generation);
+ CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
+ ll_get_fsname(child->i_sb, NULL, 0),
+ PFID(ll_inode2fid(child)), child);
ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
entry->se_inode = child;
@@ -1591,13 +1592,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
*dentryp = alias;
} else if (d_inode(*dentryp) != inode) {
/* revalidate, but inode is recreated */
- CDEBUG(D_READA,
- "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
- *dentryp,
- d_inode(*dentryp)->i_ino,
- d_inode(*dentryp)->i_generation,
- inode->i_ino,
- inode->i_generation);
+ CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
+ ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
+ *dentryp,
+ PFID(ll_inode2fid(d_inode(*dentryp))),
+ PFID(ll_inode2fid(inode)));
ll_sai_unplug(sai, entry);
return -ESTALE;
} else {
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 61856d37a..415750b0b 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -164,9 +164,18 @@ static int __init lustre_init(void)
if (rc != 0)
goto out_sysfs;
+ cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck,
+ LCT_REMEMBER | LCT_NOREF);
+ if (IS_ERR(cl_inode_fini_env)) {
+ rc = PTR_ERR(cl_inode_fini_env);
+ goto out_vvp;
+ }
+
+ cl_inode_fini_env->le_ctx.lc_cookie = 0x4;
+
rc = ll_xattr_init();
if (rc != 0)
- goto out_vvp;
+ goto out_inode_fini_env;
lustre_register_client_fill_super(ll_fill_super);
lustre_register_kill_super_cb(ll_kill_super);
@@ -174,6 +183,8 @@ static int __init lustre_init(void)
return 0;
+out_inode_fini_env:
+ cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
out_vvp:
vvp_global_fini();
out_sysfs:
@@ -198,6 +209,7 @@ static void __exit lustre_exit(void)
kset_unregister(llite_kset);
ll_xattr_fini();
+ cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
vvp_global_fini();
kmem_cache_destroy(ll_inode_cachep);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 46d03ea48..3fc736ccf 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -77,7 +77,9 @@ static int ll_readlink_internal(struct inode *inode,
ll_finish_md_op_data(op_data);
if (rc) {
if (rc != -ENOENT)
- CERROR("inode %lu: rc = %d\n", inode->i_ino, rc);
+ CERROR("%s: inode "DFID": rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
goto failed;
}
@@ -90,8 +92,10 @@ static int ll_readlink_internal(struct inode *inode,
LASSERT(symlen != 0);
if (body->eadatasize != symlen) {
- CERROR("inode %lu: symlink length %d not expected %d\n",
- inode->i_ino, body->eadatasize - 1, symlen - 1);
+ CERROR("%s: inode "DFID": symlink length %d not expected %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), body->eadatasize - 1,
+ symlen - 1);
rc = -EPROTO;
goto failed;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 282b70b77..47101de1c 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -36,6 +36,7 @@
* cl_device and cl_device_type implementation for VVP layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_LLITE
@@ -56,13 +57,33 @@
* "llite_" (var. "ll_") prefix.
*/
-static struct kmem_cache *vvp_thread_kmem;
+static struct kmem_cache *ll_thread_kmem;
+struct kmem_cache *vvp_lock_kmem;
+struct kmem_cache *vvp_object_kmem;
+struct kmem_cache *vvp_req_kmem;
static struct kmem_cache *vvp_session_kmem;
+static struct kmem_cache *vvp_thread_kmem;
+
static struct lu_kmem_descr vvp_caches[] = {
{
- .ckd_cache = &vvp_thread_kmem,
- .ckd_name = "vvp_thread_kmem",
- .ckd_size = sizeof(struct vvp_thread_info),
+ .ckd_cache = &ll_thread_kmem,
+ .ckd_name = "ll_thread_kmem",
+ .ckd_size = sizeof(struct ll_thread_info),
+ },
+ {
+ .ckd_cache = &vvp_lock_kmem,
+ .ckd_name = "vvp_lock_kmem",
+ .ckd_size = sizeof(struct vvp_lock),
+ },
+ {
+ .ckd_cache = &vvp_object_kmem,
+ .ckd_name = "vvp_object_kmem",
+ .ckd_size = sizeof(struct vvp_object),
+ },
+ {
+ .ckd_cache = &vvp_req_kmem,
+ .ckd_name = "vvp_req_kmem",
+ .ckd_size = sizeof(struct vvp_req),
},
{
.ckd_cache = &vvp_session_kmem,
@@ -70,29 +91,40 @@ static struct lu_kmem_descr vvp_caches[] = {
.ckd_size = sizeof(struct vvp_session)
},
{
+ .ckd_cache = &vvp_thread_kmem,
+ .ckd_name = "vvp_thread_kmem",
+ .ckd_size = sizeof(struct vvp_thread_info),
+ },
+ {
.ckd_cache = NULL
}
};
-static void *vvp_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *ll_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct vvp_thread_info *info;
- info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
+ info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS);
if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
-static void vvp_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void ll_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct vvp_thread_info *info = data;
- kmem_cache_free(vvp_thread_kmem, info);
+ kmem_cache_free(ll_thread_kmem, info);
}
+struct lu_context_key ll_thread_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = ll_thread_key_init,
+ .lct_fini = ll_thread_key_fini
+};
+
static void *vvp_session_key_init(const struct lu_context *ctx,
struct lu_context_key *key)
{
@@ -112,34 +144,127 @@ static void vvp_session_key_fini(const struct lu_context *ctx,
kmem_cache_free(vvp_session_kmem, session);
}
-struct lu_context_key vvp_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = vvp_key_init,
- .lct_fini = vvp_key_fini
-};
-
struct lu_context_key vvp_session_key = {
.lct_tags = LCT_SESSION,
.lct_init = vvp_session_key_init,
.lct_fini = vvp_session_key_fini
};
+void *vvp_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct vvp_thread_info *vti;
+
+ vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
+ if (!vti)
+ vti = ERR_PTR(-ENOMEM);
+ return vti;
+}
+
+void vvp_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct vvp_thread_info *vti = data;
+
+ kmem_cache_free(vvp_thread_kmem, vti);
+}
+
+struct lu_context_key vvp_thread_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = vvp_thread_key_init,
+ .lct_fini = vvp_thread_key_fini
+};
+
/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
-LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
+LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key);
static const struct lu_device_operations vvp_lu_ops = {
.ldo_object_alloc = vvp_object_alloc
};
static const struct cl_device_operations vvp_cl_ops = {
- .cdo_req_init = ccc_req_init
+ .cdo_req_init = vvp_req_init
};
+static struct lu_device *vvp_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct vvp_device *vdv = lu2vvp_dev(d);
+ struct cl_site *site = lu2cl_site(d->ld_site);
+ struct lu_device *next = cl2lu_dev(vdv->vdv_next);
+
+ if (d->ld_site) {
+ cl_site_fini(site);
+ kfree(site);
+ }
+ cl_device_fini(lu2cl_dev(d));
+ kfree(vdv);
+ return next;
+}
+
static struct lu_device *vvp_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
- return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
+ struct vvp_device *vdv;
+ struct lu_device *lud;
+ struct cl_site *site;
+ int rc;
+
+ vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
+ if (!vdv)
+ return ERR_PTR(-ENOMEM);
+
+ lud = &vdv->vdv_cl.cd_lu_dev;
+ cl_device_init(&vdv->vdv_cl, t);
+ vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
+ vdv->vdv_cl.cd_ops = &vvp_cl_ops;
+
+ site = kzalloc(sizeof(*site), GFP_NOFS);
+ if (site) {
+ rc = cl_site_init(site, &vdv->vdv_cl);
+ if (rc == 0) {
+ rc = lu_site_init_finish(&site->cs_lu);
+ } else {
+ LASSERT(!lud->ld_site);
+ CERROR("Cannot init lu_site, rc %d.\n", rc);
+ kfree(site);
+ }
+ } else {
+ rc = -ENOMEM;
+ }
+ if (rc != 0) {
+ vvp_device_free(env, lud);
+ lud = ERR_PTR(rc);
+ }
+ return lud;
+}
+
+static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ struct vvp_device *vdv;
+ int rc;
+
+ vdv = lu2vvp_dev(d);
+ vdv->vdv_next = lu2cl_dev(next);
+
+ LASSERT(d->ld_site && next->ld_type);
+ next->ld_site = d->ld_site;
+ rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
+ next->ld_type->ldt_name,
+ NULL);
+ if (rc == 0) {
+ lu_device_get(next);
+ lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
+ }
+ return rc;
+}
+
+static struct lu_device *vvp_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
}
static const struct lu_device_type_operations vvp_device_type_ops = {
@@ -150,9 +275,9 @@ static const struct lu_device_type_operations vvp_device_type_ops = {
.ldto_stop = vvp_type_stop,
.ldto_device_alloc = vvp_device_alloc,
- .ldto_device_free = ccc_device_free,
- .ldto_device_init = ccc_device_init,
- .ldto_device_fini = ccc_device_fini
+ .ldto_device_free = vvp_device_free,
+ .ldto_device_init = vvp_device_init,
+ .ldto_device_fini = vvp_device_fini,
};
struct lu_device_type vvp_device_type = {
@@ -168,20 +293,27 @@ struct lu_device_type vvp_device_type = {
*/
int vvp_global_init(void)
{
- int result;
+ int rc;
- result = lu_kmem_init(vvp_caches);
- if (result == 0) {
- result = ccc_global_init(&vvp_device_type);
- if (result != 0)
- lu_kmem_fini(vvp_caches);
- }
- return result;
+ rc = lu_kmem_init(vvp_caches);
+ if (rc != 0)
+ return rc;
+
+ rc = lu_device_type_init(&vvp_device_type);
+ if (rc != 0)
+ goto out_kmem;
+
+ return 0;
+
+out_kmem:
+ lu_kmem_fini(vvp_caches);
+
+ return rc;
}
void vvp_global_fini(void)
{
- ccc_global_fini(&vvp_device_type);
+ lu_device_type_fini(&vvp_device_type);
lu_kmem_fini(vvp_caches);
}
@@ -205,13 +337,14 @@ int cl_sb_init(struct super_block *sb)
cl = cl_type_setup(env, NULL, &vvp_device_type,
sbi->ll_dt_exp->exp_obd->obd_lu_dev);
if (!IS_ERR(cl)) {
- cl2ccc_dev(cl)->cdv_sb = sb;
+ cl2vvp_dev(cl)->vdv_sb = sb;
sbi->ll_cl = cl;
sbi->ll_site = cl2lu_dev(cl)->ld_site;
}
cl_env_put(env, &refcheck);
- } else
+ } else {
rc = PTR_ERR(env);
+ }
return rc;
}
@@ -356,23 +489,18 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
return ~0ULL;
clob = vvp_pgcache_obj(env, dev, &id);
if (clob) {
- struct cl_object_header *hdr;
- int nr;
- struct cl_page *pg;
-
- /* got an object. Find next page. */
- hdr = cl_object_header(clob);
+ struct inode *inode = vvp_object_inode(clob);
+ struct page *vmpage;
+ int nr;
- spin_lock(&hdr->coh_page_guard);
- nr = radix_tree_gang_lookup(&hdr->coh_tree,
- (void **)&pg,
- id.vpi_index, 1);
+ nr = find_get_pages_contig(inode->i_mapping,
+ id.vpi_index, 1, &vmpage);
if (nr > 0) {
- id.vpi_index = pg->cp_index;
+ id.vpi_index = vmpage->index;
/* Cant support over 16T file */
- nr = !(pg->cp_index > 0xffffffff);
+ nr = !(vmpage->index > 0xffffffff);
+ put_page(vmpage);
}
- spin_unlock(&hdr->coh_page_guard);
lu_object_ref_del(&clob->co_lu, "dump", current);
cl_object_put(env, clob);
@@ -398,21 +526,20 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
static void vvp_pgcache_page_show(const struct lu_env *env,
struct seq_file *seq, struct cl_page *page)
{
- struct ccc_page *cpg;
+ struct vvp_page *vpg;
struct page *vmpage;
int has_flags;
- cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vmpage = cpg->cpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
+ vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
+ vmpage = vpg->vpg_page;
+ seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
0 /* gen */,
- cpg, page,
+ vpg, page,
"none",
- cpg->cpg_write_queued ? "wq" : "- ",
- cpg->cpg_defer_uptodate ? "du" : "- ",
+ vpg->vpg_write_queued ? "wq" : "- ",
+ vpg->vpg_defer_uptodate ? "du" : "- ",
PageWriteback(vmpage) ? "wb" : "-",
- vmpage, vmpage->mapping->host->i_ino,
- vmpage->mapping->host->i_generation,
+ vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
vmpage->mapping->host, vmpage->index,
page_count(vmpage));
has_flags = 0;
@@ -431,8 +558,6 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
struct ll_sb_info *sbi;
struct cl_object *clob;
struct lu_env *env;
- struct cl_page *page;
- struct cl_object_header *hdr;
struct vvp_pgcache_id id;
int refcheck;
int result;
@@ -444,27 +569,38 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
sbi = f->private;
clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
if (clob) {
- hdr = cl_object_header(clob);
-
- spin_lock(&hdr->coh_page_guard);
- page = cl_page_lookup(hdr, id.vpi_index);
- spin_unlock(&hdr->coh_page_guard);
+ struct inode *inode = vvp_object_inode(clob);
+ struct cl_page *page = NULL;
+ struct page *vmpage;
+
+ result = find_get_pages_contig(inode->i_mapping,
+ id.vpi_index, 1,
+ &vmpage);
+ if (result > 0) {
+ lock_page(vmpage);
+ page = cl_vmpage_page(vmpage, clob);
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
- seq_printf(f, "%8x@"DFID": ",
- id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
+ seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
+ PFID(lu_object_fid(&clob->co_lu)));
if (page) {
vvp_pgcache_page_show(env, f, page);
cl_page_put(env, page);
- } else
+ } else {
seq_puts(f, "missing\n");
+ }
lu_object_ref_del(&clob->co_lu, "dump", current);
cl_object_put(env, clob);
- } else
+ } else {
seq_printf(f, "%llx missing\n", pos);
+ }
cl_env_put(env, &refcheck);
result = 0;
- } else
+ } else {
result = PTR_ERR(env);
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index bb393378c..27b9b0a01 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -41,21 +41,337 @@
#ifndef VVP_INTERNAL_H
#define VVP_INTERNAL_H
+#include "../include/lustre/lustre_idl.h"
#include "../include/cl_object.h"
-#include "llite_internal.h"
-int vvp_io_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int vvp_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
+enum obd_notify_event;
+struct inode;
+struct lov_stripe_md;
+struct lustre_md;
+struct obd_capa;
+struct obd_device;
+struct obd_export;
+struct page;
+
+/* specific architecture can implement only part of this list */
+enum vvp_io_subtype {
+ /** normal IO */
+ IO_NORMAL,
+ /** io started from splice_{read|write} */
+ IO_SPLICE
+};
+
+/**
+ * IO state private to IO state private to VVP layer.
+ */
+struct vvp_io {
+ /** super class */
+ struct cl_io_slice vui_cl;
+ struct cl_io_lock_link vui_link;
+ /**
+ * I/O vector information to or from which read/write is going.
+ */
+ struct iov_iter *vui_iter;
+ /**
+ * Total size for the left IO.
+ */
+ size_t vui_tot_count;
+
+ union {
+ struct vvp_fault_io {
+ /**
+ * Inode modification time that is checked across DLM
+ * lock request.
+ */
+ time64_t ft_mtime;
+ struct vm_area_struct *ft_vma;
+ /**
+ * locked page returned from vvp_io
+ */
+ struct page *ft_vmpage;
+ /**
+ * kernel fault info
+ */
+ struct vm_fault *ft_vmf;
+ /**
+ * fault API used bitflags for return code.
+ */
+ unsigned int ft_flags;
+ /**
+ * check that flags are from filemap_fault
+ */
+ bool ft_flags_valid;
+ } fault;
+ struct {
+ struct pipe_inode_info *vui_pipe;
+ unsigned int vui_flags;
+ } splice;
+ struct {
+ struct cl_page_list vui_queue;
+ unsigned long vui_written;
+ int vui_from;
+ int vui_to;
+ } write;
+ } u;
+
+ enum vvp_io_subtype vui_io_subtype;
+
+ /**
+ * Layout version when this IO is initialized
+ */
+ __u32 vui_layout_gen;
+ /**
+ * File descriptor against which IO is done.
+ */
+ struct ll_file_data *vui_fd;
+ struct kiocb *vui_iocb;
+
+ /* Readahead state. */
+ pgoff_t vui_ra_start;
+ pgoff_t vui_ra_count;
+ /* Set when vui_ra_{start,count} have been initialized. */
+ bool vui_ra_valid;
+};
+
+extern struct lu_device_type vvp_device_type;
+
+extern struct lu_context_key vvp_session_key;
+extern struct lu_context_key vvp_thread_key;
+
+extern struct kmem_cache *vvp_lock_kmem;
+extern struct kmem_cache *vvp_object_kmem;
+extern struct kmem_cache *vvp_req_kmem;
+
+struct vvp_thread_info {
+ struct cl_lock vti_lock;
+ struct cl_lock_descr vti_descr;
+ struct cl_io vti_io;
+ struct cl_attr vti_attr;
+};
+
+static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
+{
+ struct vvp_thread_info *vti;
+
+ vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
+ LASSERT(vti);
+
+ return vti;
+}
+
+static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
+{
+ struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
+
+ memset(lock, 0, sizeof(*lock));
+ return lock;
+}
+
+static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
+{
+ struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
+
+ memset(attr, 0, sizeof(*attr));
+
+ return attr;
+}
+
+static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
+{
+ struct cl_io *io = &vvp_env_info(env)->vti_io;
+
+ memset(io, 0, sizeof(*io));
+
+ return io;
+}
+
+struct vvp_session {
+ struct vvp_io cs_ios;
+};
+
+static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
+{
+ struct vvp_session *ses;
+
+ ses = lu_context_key_get(env->le_ses, &vvp_session_key);
+ LASSERT(ses);
+
+ return ses;
+}
+
+static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
+{
+ return &vvp_env_session(env)->cs_ios;
+}
+
+/**
+ * ccc-private object state.
+ */
+struct vvp_object {
+ struct cl_object_header vob_header;
+ struct cl_object vob_cl;
+ struct inode *vob_inode;
+
+ /**
+ * A list of dirty pages pending IO in the cache. Used by
+ * SOM. Protected by ll_inode_info::lli_lock.
+ *
+ * \see vvp_page::vpg_pending_linkage
+ */
+ struct list_head vob_pending_list;
+
+ /**
+ * Access this counter is protected by inode->i_sem. Now that
+ * the lifetime of transient pages must be covered by inode sem,
+ * we don't need to hold any lock..
+ */
+ int vob_transient_pages;
+ /**
+ * Number of outstanding mmaps on this file.
+ *
+ * \see ll_vm_open(), ll_vm_close().
+ */
+ atomic_t vob_mmap_cnt;
+
+ /**
+ * various flags
+ * vob_discard_page_warned
+ * if pages belonging to this object are discarded when a client
+ * is evicted, some debug info will be printed, this flag will be set
+ * during processing the first discarded page, then avoid flooding
+ * debug message for lots of discarded pages.
+ *
+ * \see ll_dirty_page_discard_warn.
+ */
+ unsigned int vob_discard_page_warned:1;
+};
+
+/**
+ * VVP-private page state.
+ */
+struct vvp_page {
+ struct cl_page_slice vpg_cl;
+ int vpg_defer_uptodate;
+ int vpg_ra_used;
+ int vpg_write_queued;
+ /**
+ * Non-empty iff this page is already counted in
+ * vvp_object::vob_pending_list. This list is only used as a flag,
+ * that is, never iterated through, only checked for list_empty(), but
+ * having a list is useful for debugging.
+ */
+ struct list_head vpg_pending_linkage;
+ /** VM page */
+ struct page *vpg_page;
+};
+
+static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
+{
+ return container_of(slice, struct vvp_page, vpg_cl);
+}
+
+static inline pgoff_t vvp_index(struct vvp_page *vvp)
+{
+ return vvp->vpg_cl.cpl_index;
+}
+
+struct vvp_device {
+ struct cl_device vdv_cl;
+ struct super_block *vdv_sb;
+ struct cl_device *vdv_next;
+};
+
+struct vvp_lock {
+ struct cl_lock_slice vlk_cl;
+};
+
+struct vvp_req {
+ struct cl_req_slice vrq_cl;
+};
+
+void *ccc_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key);
+void ccc_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data);
+
+void ccc_umount(const struct lu_env *env, struct cl_device *dev);
+
+static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
+{
+ return &vdv->vdv_cl.cd_lu_dev;
+}
+
+static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
+{
+ return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
+}
+
+static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
+{
+ return container_of0(d, struct vvp_device, vdv_cl);
+}
+
+static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
+{
+ return container_of0(obj, struct vvp_object, vob_cl);
+}
+
+static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
+{
+ return container_of0(obj, struct vvp_object, vob_cl.co_lu);
+}
+
+static inline struct inode *vvp_object_inode(const struct cl_object *obj)
+{
+ return cl2vvp(obj)->vob_inode;
+}
+
+int vvp_object_invariant(const struct cl_object *obj);
+struct vvp_object *cl_inode2vvp(struct inode *inode);
+
+static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
+{
+ return cl2vvp_page(slice)->vpg_page;
+}
+
+static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
+{
+ return container_of(slice, struct vvp_lock, vlk_cl);
+}
+
+# define CLOBINVRNT(env, clob, expr) \
+ ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
+
+/**
+ * New interfaces to get and put lov_stripe_md from lov layer. This violates
+ * layering because lov_stripe_md is supposed to be a private data in lov.
+ *
+ * NB: If you find you have to use these interfaces for your new code, please
+ * think about it again. These interfaces may be removed in the future for
+ * better layering.
+ */
+struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
+void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
+int lov_read_and_clear_async_rc(struct cl_object *clob);
+
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
+void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
+
+int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
+int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
+int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
-struct ccc_object *cl_inode2ccc(struct inode *inode);
+int vvp_global_init(void);
+void vvp_global_fini(void);
extern const struct file_operations vvp_dump_pgcache_file_ops;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 85a835976..5bf9592ae 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -44,21 +44,30 @@
#include "../include/obd.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice);
+struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct vvp_io *vio;
+
+ vio = container_of(slice, struct vvp_io, vui_cl);
+ LASSERT(vio == vvp_env_io(env));
+
+ return vio;
+}
/**
* True, if \a io is a normal io, False for splice_{read,write}
*/
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- return vio->cui_io_subtype == IO_NORMAL;
+ return vio->vui_io_subtype == IO_NORMAL;
}
/**
@@ -71,7 +80,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
bool rc = true;
switch (io->ci_type) {
@@ -80,7 +89,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
/* don't need lock here to check lli_layout_gen as we have held
* extent lock and GROUP lock has to hold to swap layout
*/
- if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
+ if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
io->ci_need_restart = 1;
/* this will return application a short read/write */
io->ci_continue = 0;
@@ -95,20 +104,187 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
return rc;
}
+static void vvp_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ ll_inode_size_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void vvp_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ ll_inode_size_unlock(inode);
+}
+
+/**
+ * Helper function that if necessary adjusts file size (inode->i_size), when
+ * position at the offset \a pos is accessed. File size can be arbitrary stale
+ * on a Lustre client, but client at least knows KMS. If accessed area is
+ * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
+ *
+ * Locking: cl_isize_lock is used to serialize changes to inode size and to
+ * protect consistency between inode size and cl_object
+ * attributes. cl_object_size_lock() protects consistency between cl_attr's of
+ * top-object and sub-objects.
+ */
+static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, loff_t start, size_t count,
+ int *exceed)
+{
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ struct inode *inode = vvp_object_inode(obj);
+ loff_t pos = start + count - 1;
+ loff_t kms;
+ int result;
+
+ /*
+ * Consistency guarantees: following possibilities exist for the
+ * relation between region being accessed and real file size at this
+ * moment:
+ *
+ * (A): the region is completely inside of the file;
+ *
+ * (B-x): x bytes of region are inside of the file, the rest is
+ * outside;
+ *
+ * (C): the region is completely outside of the file.
+ *
+ * This classification is stable under DLM lock already acquired by
+ * the caller, because to change the class, other client has to take
+ * DLM lock conflicting with our lock. Also, any updates to ->i_size
+ * by other threads on this client are serialized by
+ * ll_inode_size_lock(). This guarantees that short reads are handled
+ * correctly in the face of concurrent writes and truncates.
+ */
+ vvp_object_size_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ kms = attr->cat_kms;
+ if (pos > kms) {
+ /*
+ * A glimpse is necessary to determine whether we
+ * return a short read (B) or some zeroes at the end
+ * of the buffer (C)
+ */
+ vvp_object_size_unlock(obj);
+ result = cl_glimpse_lock(env, io, inode, obj, 0);
+ if (result == 0 && exceed) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336
+ */
+ loff_t size = i_size_read(inode);
+ loff_t cur_index = start >> PAGE_SHIFT;
+ loff_t size_index = (size - 1) >> PAGE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ size_index < cur_index)
+ *exceed = 1;
+ }
+ return result;
+ }
+ /*
+ * region is within kms and, hence, within real file
+ * size (A). We need to increase i_size to cover the
+ * read region so that generic_file_read() will do its
+ * job, but that doesn't mean the kms size is
+ * _correct_, it is only the _minimum_ size. If
+ * someone does a stat they will get the correct size
+ * which will always be >= the kms value here.
+ * b=11081
+ */
+ if (i_size_read(inode) < kms) {
+ i_size_write(inode, kms);
+ CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (__u64)i_size_read(inode));
+ }
+ }
+
+ vvp_object_size_unlock(obj);
+
+ return result;
+}
+
/*****************************************************************************
*
* io operations.
*
*/
+static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ pgoff_t start, pgoff_t end)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
+ struct cl_object *obj = io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
+
+ memset(&vio->vui_link, 0, sizeof(vio->vui_link));
+
+ if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
+ } else {
+ descr->cld_mode = mode;
+ }
+ descr->cld_obj = obj;
+ descr->cld_start = start;
+ descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
+
+ cl_io_lock_add(env, io, &vio->vui_link);
+ return 0;
+}
+
+static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ loff_t start, loff_t end)
+{
+ struct cl_object *obj = io->ci_obj;
+
+ return vvp_io_one_lock_index(env, io, enqflags, mode,
+ cl_index(obj, start), cl_index(obj, end));
+}
+
+static int vvp_io_write_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+
+ cl_page_list_init(&vio->u.write.vui_queue);
+ vio->u.write.vui_written = 0;
+ vio->u.write.vui_from = 0;
+ vio->u.write.vui_to = PAGE_SIZE;
+
+ return 0;
+}
+
+static void vvp_io_write_iter_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+
+ LASSERT(vio->u.write.vui_queue.pl_nr == 0);
+}
+
static int vvp_io_fault_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
- LASSERT(inode ==
- file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
+ LASSERT(inode == file_inode(vio->vui_fd->fd_file));
vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
return 0;
}
@@ -117,15 +293,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(obj);
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID
" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
if (io->ci_restore_needed == 1) {
int rc;
@@ -133,7 +310,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
/* file was detected release, we need to restore it
* before finishing the io
*/
- rc = ll_layout_restore(ccc_object_inode(obj));
+ rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
/* if restore registration failed, no restart,
* we will return -ENODATA
*/
@@ -159,16 +336,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
__u32 gen = 0;
/* check layout version */
- ll_layout_refresh(ccc_object_inode(obj), &gen);
- io->ci_need_restart = cio->cui_layout_gen != gen;
+ ll_layout_refresh(inode, &gen);
+ io->ci_need_restart = vio->vui_layout_gen != gen;
if (io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
DFID" layout changed from %d to %d.\n",
PFID(lu_object_fid(&obj->co_lu)),
- cio->cui_layout_gen, gen);
+ vio->vui_layout_gen, gen);
/* today successful restore is the only possible case */
/* restore was done, clear restoring state */
- ll_i2info(ccc_object_inode(obj))->lli_flags &=
+ ll_i2info(vvp_object_inode(obj))->lli_flags &=
~LLIF_FILE_RESTORING;
}
}
@@ -180,7 +357,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_page *page = io->u.ci_fault.ft_page;
- CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+ CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
if (page) {
lu_ref_del(&page->cp_reference, "fault", io);
@@ -203,16 +380,16 @@ static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
}
static int vvp_mmap_locks(const struct lu_env *env,
- struct ccc_io *vio, struct cl_io *io)
+ struct vvp_io *vio, struct cl_io *io)
{
- struct ccc_thread_info *cti = ccc_env_info(env);
+ struct vvp_thread_info *cti = vvp_env_info(env);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct cl_lock_descr *descr = &cti->cti_descr;
+ struct cl_lock_descr *descr = &cti->vti_descr;
ldlm_policy_data_t policy;
unsigned long addr;
ssize_t count;
- int result;
+ int result = 0;
struct iov_iter i;
struct iovec iov;
@@ -221,21 +398,21 @@ static int vvp_mmap_locks(const struct lu_env *env,
if (!cl_is_normalio(env, io))
return 0;
- if (!vio->cui_iter) /* nfs or loop back device write */
+ if (!vio->vui_iter) /* nfs or loop back device write */
return 0;
/* No MM (e.g. NFS)? No vmas too. */
if (!mm)
return 0;
- iov_for_each(iov, i, *(vio->cui_iter)) {
+ iov_for_each(iov, i, *vio->vui_iter) {
addr = (unsigned long)iov.iov_base;
count = iov.iov_len;
if (count == 0)
continue;
- count += addr & (~CFS_PAGE_MASK);
- addr &= CFS_PAGE_MASK;
+ count += addr & (~PAGE_MASK);
+ addr &= PAGE_MASK;
down_read(&mm->mmap_sem);
while ((vma = our_vma(mm, addr, count)) != NULL) {
@@ -244,10 +421,10 @@ static int vvp_mmap_locks(const struct lu_env *env,
if (ll_file_nolock(vma->vm_file)) {
/*
- * For no lock case, a lockless lock will be
- * generated.
+ * For no lock case is not allowed for mmap
*/
- flags = CEF_NEVER;
+ result = -EINVAL;
+ break;
}
/*
@@ -269,10 +446,8 @@ static int vvp_mmap_locks(const struct lu_env *env,
descr->cld_mode, descr->cld_start,
descr->cld_end);
- if (result < 0) {
- up_read(&mm->mmap_sem);
- return result;
- }
+ if (result < 0)
+ break;
if (vma->vm_end - addr >= count)
break;
@@ -281,26 +456,55 @@ static int vvp_mmap_locks(const struct lu_env *env,
addr = vma->vm_end;
}
up_read(&mm->mmap_sem);
+ if (result < 0)
+ break;
}
- return 0;
+ return result;
+}
+
+static void vvp_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ if (!cl_is_normalio(env, io))
+ return;
+
+ iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
+}
+
+static void vvp_io_update_iov(const struct lu_env *env,
+ struct vvp_io *vio, struct cl_io *io)
+{
+ size_t size = io->u.ci_rw.crw_count;
+
+ if (!cl_is_normalio(env, io) || !vio->vui_iter)
+ return;
+
+ iov_iter_truncate(vio->vui_iter, size);
}
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
enum cl_lock_mode mode, loff_t start, loff_t end)
{
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
int result;
int ast_flags = 0;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ccc_io_update_iov(env, cio, io);
+ vvp_io_update_iov(env, vio, io);
if (io->u.ci_rw.crw_nonblock)
ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, cio, io);
+ result = vvp_mmap_locks(env, vio, io);
if (result == 0)
- result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
+ result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
return result;
}
@@ -325,9 +529,11 @@ static int vvp_io_fault_lock(const struct lu_env *env,
/*
* XXX LDLM_FL_CBPENDING
*/
- return ccc_io_one_lock_index
- (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
- io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
+ return vvp_io_one_lock_index(env,
+ io, 0,
+ vvp_mode_from_vma(vio->u.fault.ft_vma),
+ io->u.ci_fault.ft_index,
+ io->u.ci_fault.ft_index);
}
static int vvp_io_write_lock(const struct lu_env *env,
@@ -354,14 +560,13 @@ static int vvp_io_setattr_iter_init(const struct lu_env *env,
}
/**
- * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
+ * Implementation of cl_io_operations::vio_lock() method for CIT_SETATTR io.
*
* Handles "lockless io" mode when extent locking is done by server.
*/
static int vvp_io_setattr_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
struct cl_io *io = ios->cis_io;
__u64 new_size;
__u32 enqflags = 0;
@@ -378,8 +583,8 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
return 0;
new_size = 0;
}
- cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
- return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
+
+ return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
new_size, OBD_OBJECT_EOF);
}
@@ -413,7 +618,7 @@ static int vvp_io_setattr_time(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct cl_attr *attr = vvp_env_thread_attr(env);
int result;
unsigned valid = CAT_CTIME;
@@ -437,7 +642,7 @@ static int vvp_io_setattr_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
int result = 0;
inode_lock(inode);
@@ -453,7 +658,7 @@ static void vvp_io_setattr_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
if (cl_io_is_trunc(io))
/* Truncate in memory pages - they must be clean pages
@@ -474,27 +679,25 @@ static int vvp_io_read_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_ra_read *bead = &vio->cui_bead;
- struct file *file = cio->cui_fd->fd_file;
+ struct inode *inode = vvp_object_inode(obj);
+ struct file *file = vio->vui_fd->fd_file;
int result;
loff_t pos = io->u.ci_rd.rd.crw_pos;
long cnt = io->u.ci_rd.rd.crw_count;
- long tot = cio->cui_tot_count;
+ long tot = vio->vui_tot_count;
int exceed = 0;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
if (!can_populate_pages(env, io, inode))
return 0;
- result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
+ result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
if (result != 0)
return result;
else if (exceed != 0)
@@ -505,30 +708,27 @@ static int vvp_io_read_start(const struct lu_env *env,
inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
- cio->cui_fd->fd_file->f_ra.ra_pages = 0;
+ vio->vui_fd->fd_file->f_ra.ra_pages = 0;
/* initialize read-ahead window once per syscall */
- if (!vio->cui_ra_window_set) {
- vio->cui_ra_window_set = 1;
- bead->lrr_start = cl_index(obj, pos);
- /*
- * XXX: explicit PAGE_SIZE
- */
- bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
- ll_ra_read_in(file, bead);
+ if (!vio->vui_ra_valid) {
+ vio->vui_ra_valid = true;
+ vio->vui_ra_start = cl_index(obj, pos);
+ vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
+ ll_ras_enter(file);
}
/* BUG: 5972 */
file_accessed(file);
- switch (vio->cui_io_subtype) {
+ switch (vio->vui_io_subtype) {
case IO_NORMAL:
- LASSERT(cio->cui_iocb->ki_pos == pos);
- result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
+ vio->u.splice.vui_pipe, cnt,
+ vio->u.splice.vui_flags);
/* LU-1109: do splice read stripe by stripe otherwise if it
* may make nfsd stuck if this read occupied all internal pipe
* buffers.
@@ -536,7 +736,7 @@ static int vvp_io_read_start(const struct lu_env *env,
io->ci_continue = 0;
break;
default:
- CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
+ CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
LBUG();
}
@@ -546,30 +746,201 @@ out:
io->ci_continue = 0;
io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, READ);
+ vio->vui_fd, pos, result, READ);
result = 0;
}
return result;
}
-static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *plist, int from, int to)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_2queue *queue = &io->ci_queue;
+ struct cl_page *page;
+ unsigned int bytes = 0;
+ int rc = 0;
- if (vio->cui_ra_window_set)
- ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
+ if (plist->pl_nr == 0)
+ return 0;
- vvp_io_fini(env, ios);
+ if (from > 0 || to != PAGE_SIZE) {
+ page = cl_page_list_first(plist);
+ if (plist->pl_nr == 1) {
+ cl_page_clip(env, page, from, to);
+ } else {
+ if (from > 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE) {
+ page = cl_page_list_last(plist);
+ cl_page_clip(env, page, 0, to);
+ }
+ }
+ }
+
+ cl_2queue_init(queue);
+ cl_page_list_splice(plist, &queue->c2_qin);
+ rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
+
+ /* plist is not sorted any more */
+ cl_page_list_splice(&queue->c2_qin, plist);
+ cl_page_list_splice(&queue->c2_qout, plist);
+ cl_2queue_fini(env, queue);
+
+ if (rc == 0) {
+ /* calculate bytes */
+ bytes = plist->pl_nr << PAGE_SHIFT;
+ bytes -= from + PAGE_SIZE - to;
+
+ while (plist->pl_nr > 0) {
+ page = cl_page_list_first(plist);
+ cl_page_list_del(env, plist, page);
+
+ cl_page_clip(env, page, 0, PAGE_SIZE);
+
+ SetPageUptodate(cl_page_vmpage(page));
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ }
+
+ return bytes > 0 ? bytes : rc;
+}
+
+static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct vvp_page *vpg;
+ struct page *vmpage = page->cp_vmpage;
+ struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+ SetPageUptodate(vmpage);
+ set_page_dirty(vmpage);
+
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
+
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+}
+
+/* make sure the page list is contiguous */
+static bool page_list_sanity_check(struct cl_object *obj,
+ struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ pgoff_t index = CL_PAGE_EOF;
+
+ cl_page_list_for_each(page, plist) {
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+
+ if (index == CL_PAGE_EOF) {
+ index = vvp_index(vpg);
+ continue;
+ }
+
+ ++index;
+ if (index == vvp_index(vpg))
+ continue;
+
+ return false;
+ }
+ return true;
+}
+
+/* Return how many bytes have queued or written */
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
+{
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *queue = &vio->u.write.vui_queue;
+ struct cl_page *page;
+ int rc = 0;
+ int bytes = 0;
+ unsigned int npages = vio->u.write.vui_queue.pl_nr;
+
+ if (npages == 0)
+ return 0;
+
+ CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
+ npages, vio->u.write.vui_from, vio->u.write.vui_to);
+
+ LASSERT(page_list_sanity_check(obj, queue));
+
+ /* submit IO with async write */
+ rc = cl_io_commit_async(env, io, queue,
+ vio->u.write.vui_from, vio->u.write.vui_to,
+ write_commit_callback);
+ npages -= queue->pl_nr; /* already committed pages */
+ if (npages > 0) {
+ /* calculate how many bytes were written */
+ bytes = npages << PAGE_SHIFT;
+
+ /* first page */
+ bytes -= vio->u.write.vui_from;
+ if (queue->pl_nr == 0) /* last page */
+ bytes -= PAGE_SIZE - vio->u.write.vui_to;
+ LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
+
+ vio->u.write.vui_written += bytes;
+
+ CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
+ npages, bytes, vio->u.write.vui_written);
+
+ /* the first page must have been written. */
+ vio->u.write.vui_from = 0;
+ }
+ LASSERT(page_list_sanity_check(obj, queue));
+ LASSERT(ergo(rc == 0, queue->pl_nr == 0));
+
+ /* out of quota, try sync write */
+ if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
+ rc = vvp_io_commit_sync(env, io, queue,
+ vio->u.write.vui_from,
+ vio->u.write.vui_to);
+ if (rc > 0) {
+ vio->u.write.vui_written += rc;
+ rc = 0;
+ }
+ }
+
+ /* update inode size */
+ ll_merge_attr(env, inode);
+
+ /* Now the pages in queue were failed to commit, discard them
+ * unless they were dirtied before.
+ */
+ while (queue->pl_nr > 0) {
+ page = cl_page_list_first(queue);
+ cl_page_list_del(env, queue, page);
+
+ if (!PageDirty(cl_page_vmpage(page)))
+ cl_page_discard(env, io, page);
+
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ cl_page_list_fini(env, queue);
+
+ return rc;
}
static int vvp_io_write_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -582,25 +953,41 @@ static int vvp_io_write_start(const struct lu_env *env,
* PARALLEL IO This has to be changed for parallel IO doing
* out-of-order writes.
*/
+ ll_merge_attr(env, inode);
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
- cio->cui_iocb->ki_pos = pos;
+ vio->vui_iocb->ki_pos = pos;
} else {
- LASSERT(cio->cui_iocb->ki_pos == pos);
+ LASSERT(vio->vui_iocb->ki_pos == pos);
}
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */
+ if (!vio->vui_iter) /* from a temp io in ll_cl_init(). */
result = 0;
else
- result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
+ result = generic_file_write_iter(vio->vui_iocb, vio->vui_iter);
+
+ if (result > 0) {
+ result = vvp_io_write_commit(env, io);
+ if (vio->u.write.vui_written > 0) {
+ result = vio->u.write.vui_written;
+ io->ci_nob += result;
+ CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
+ io->ci_nob, result);
+ }
+ }
if (result > 0) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+
if (result < cnt)
io->ci_continue = 0;
- io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, WRITE);
+ vio->vui_fd, pos, result, WRITE);
result = 0;
}
return result;
@@ -608,10 +995,10 @@ static int vvp_io_write_start(const struct lu_env *env,
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
- struct vm_fault *vmf = cfio->fault.ft_vmf;
+ struct vm_fault *vmf = cfio->ft_vmf;
- cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
- cfio->fault.ft_flags_valid = 1;
+ cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf);
+ cfio->ft_flags_valid = 1;
if (vmf->page) {
CDEBUG(D_PAGE,
@@ -619,39 +1006,51 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
vmf->page, vmf->page->mapping, vmf->page->index,
(long)vmf->page->flags, page_count(vmf->page),
page_private(vmf->page), vmf->virtual_address);
- if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
+ if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
- cfio->fault.ft_flags |= VM_FAULT_LOCKED;
+ cfio->ft_flags |= VM_FAULT_LOCKED;
}
cfio->ft_vmpage = vmf->page;
return 0;
}
- if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+ if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
- if (cfio->fault.ft_flags & VM_FAULT_OOM) {
+ if (cfio->ft_flags & VM_FAULT_OOM) {
CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
return -ENOMEM;
}
- if (cfio->fault.ft_flags & VM_FAULT_RETRY)
+ if (cfio->ft_flags & VM_FAULT_RETRY)
return -EAGAIN;
- CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
+ CERROR("Unknown error in page fault %d!\n", cfio->ft_flags);
return -EINVAL;
}
+static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct vvp_page *vpg;
+ struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+ set_page_dirty(page->cp_vmpage);
+
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
+}
+
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
@@ -659,7 +1058,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
struct page *vmpage = NULL;
struct cl_page *page;
loff_t size;
- pgoff_t last; /* last page in a file data region */
+ pgoff_t last_index;
if (fio->ft_executable &&
inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
@@ -670,7 +1069,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
+ result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
return result;
@@ -705,15 +1104,15 @@ static int vvp_io_fault_start(const struct lu_env *env,
goto out;
}
+ last_index = cl_index(obj, size - 1);
+
if (fio->ft_mkwrite) {
- pgoff_t last_index;
/*
* Capture the size while holding the lli_trunc_sem from above
* we want to make sure that we complete the mkwrite action
* while holding this lock. We need to make sure that we are
* not past the end of the file.
*/
- last_index = cl_index(obj, size - 1);
if (last_index < fio->ft_index) {
CDEBUG(D_PAGE,
"llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
@@ -745,25 +1144,32 @@ static int vvp_io_fault_start(const struct lu_env *env,
*/
if (fio->ft_mkwrite) {
wait_on_page_writeback(vmpage);
- if (set_page_dirty(vmpage)) {
- struct ccc_page *cp;
+ if (!PageDirty(vmpage)) {
+ struct cl_page_list *plist = &io->ci_queue.c2_qin;
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+ int to = PAGE_SIZE;
/* vvp_page_assume() calls wait_on_page_writeback(). */
cl_page_assume(env, io, page);
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vvp_write_pending(cl2ccc(obj), cp);
+ cl_page_list_init(plist);
+ cl_page_list_add(plist, page);
+
+ /* size fixup */
+ if (last_index == vvp_index(vpg))
+ to = size & ~PAGE_MASK;
/* Do not set Dirty bit here so that in case IO is
* started before the page is really made dirty, we
* still have chance to detect it.
*/
- result = cl_page_cache_add(env, io, page, CRT_WRITE);
+ result = cl_io_commit_async(env, io, plist, 0, to,
+ mkwrite_commit_callback);
LASSERT(cl_page_is_owned(page, io));
+ cl_page_list_fini(env, plist);
vmpage = NULL;
if (result < 0) {
- cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
@@ -773,20 +1179,20 @@ static int vvp_io_fault_start(const struct lu_env *env,
if (result == -EDQUOT)
result = -ENOSPC;
goto out;
- } else
+ } else {
cl_page_disown(env, io, page);
+ }
}
}
- last = cl_index(obj, size - 1);
/*
* The ft_index is only used in the case of
* a mkwrite action. We need to check
* our assertions are correct, since
* we should have caught this above
*/
- LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
- if (fio->ft_index == last)
+ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
+ if (fio->ft_index == last_index)
/*
* Last page is mapped partially.
*/
@@ -801,7 +1207,9 @@ out:
/* return unlocked vmpage to avoid deadlocking */
if (vmpage)
unlock_page(vmpage);
- cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+
+ cfio->ft_flags &= ~VM_FAULT_LOCKED;
+
return result;
}
@@ -820,293 +1228,58 @@ static int vvp_io_read_page(const struct lu_env *env,
const struct cl_page_slice *slice)
{
struct cl_io *io = ios->cis_io;
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *page = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(slice->cpl_obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
+ struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
struct ll_readahead_state *ras = &fd->fd_ras;
- struct page *vmpage = cp->cpg_page;
struct cl_2queue *queue = &io->ci_queue;
- int rc;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- LASSERT(slice->cpl_obj == obj);
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
- ras_update(sbi, inode, ras, page->cp_index,
- cp->cpg_defer_uptodate);
-
- /* Sanity check whether the page is protected by a lock. */
- rc = cl_page_is_under_lock(env, io, page);
- if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
- rc == -ENODATA ? "without a lock" :
- "match failed", rc);
- if (rc != -ENODATA)
- return rc;
- }
+ ras_update(sbi, inode, ras, vvp_index(vpg),
+ vpg->vpg_defer_uptodate);
- if (cp->cpg_defer_uptodate) {
- cp->cpg_ra_used = 1;
+ if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
cl_page_export(env, page, 1);
}
/*
* Add page into the queue even when it is marked uptodate above.
* this will unlock it automatically as part of cl_page_list_disown().
*/
+
cl_page_list_add(&queue->c2_qin, page);
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
- ll_readahead(env, io, ras,
- vmpage->mapping, &queue->c2_qin, fd->fd_flags);
+ ll_readahead(env, io, &queue->c2_qin, ras,
+ vpg->vpg_defer_uptodate);
return 0;
}
-static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, struct ccc_page *cp,
- enum cl_req_type crt)
+void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
{
- struct cl_2queue *queue;
- int result;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- queue = &io->ci_queue;
- cl_2queue_init_page(queue, page);
-
- result = cl_io_submit_sync(env, io, crt, queue, 0);
- LASSERT(cl_page_is_owned(page, io));
-
- if (crt == CRT_READ)
- /*
- * in CRT_WRITE case page is left locked even in case of
- * error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
-
- return result;
-}
-
-/**
- * Prepare partially written-to page for a write.
- */
-static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
- struct cl_object *obj, struct cl_page *pg,
- struct ccc_page *cp,
- unsigned from, unsigned to)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- loff_t offset = cl_offset(obj, pg->cp_index);
- int result;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result == 0) {
- /*
- * If are writing to a new page, no need to read old data.
- * The extent locking will have updated the KMS, and for our
- * purposes here we can treat it like i_size.
- */
- if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(cp->cpg_page);
-
- memset(kaddr, 0, cl_page_size(obj));
- kunmap_atomic(kaddr);
- } else if (cp->cpg_defer_uptodate)
- cp->cpg_ra_used = 1;
- else
- result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
- /*
- * In older implementations, obdo_refresh_inode is called here
- * to update the inode because the write might modify the
- * object info at OST. However, this has been proven useless,
- * since LVB functions will be called when user space program
- * tries to retrieve inode attribute. Also, see bug 15909 for
- * details. -jay
- */
- if (result == 0)
- cl_page_export(env, pg, 1);
- }
- return result;
-}
-
-static int vvp_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
-
- int result;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
-
- result = 0;
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
- if (!PageUptodate(vmpage)) {
- /*
- * We're completely overwriting an existing page, so _don't_
- * set it up to date until commit_write
- */
- if (from == 0 && to == PAGE_SIZE) {
- CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
- POISON_PAGE(page, 0x11);
- } else
- result = vvp_io_prepare_partial(env, ios->cis_io, obj,
- pg, cp, from, to);
- } else
- CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
- return result;
-}
-
-static int vvp_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct cl_io *io = ios->cis_io;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct page *vmpage = cp->cpg_page;
-
- int result;
- int tallyop;
- loff_t size;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == inode);
-
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
- CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
-
- /*
- * queue a write for some time in the future the first time we
- * dirty the page.
- *
- * This is different from what other file systems do: they usually
- * just mark page (and some of its buffers) dirty and rely on
- * balance_dirty_pages() to start a write-back. Lustre wants write-back
- * to be started earlier for the following reasons:
- *
- * (1) with a large number of clients we need to limit the amount
- * of cached data on the clients a lot;
- *
- * (2) large compute jobs generally want compute-only then io-only
- * and the IO should complete as quickly as possible;
- *
- * (3) IO is batched up to the RPC size and is async until the
- * client max cache is hit
- * (/sys/fs/lustre/osc/OSC.../max_dirty_mb)
- *
- */
- if (!PageDirty(vmpage)) {
- tallyop = LPROC_LL_DIRTY_MISSES;
- result = cl_page_cache_add(env, io, pg, CRT_WRITE);
- if (result == 0) {
- /* page was added into cache successfully. */
- set_page_dirty(vmpage);
- vvp_write_pending(cl2ccc(obj), cp);
- } else if (result == -EDQUOT) {
- pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
- bool need_clip = true;
-
- /*
- * Client ran out of disk space grant. Possible
- * strategies are:
- *
- * (a) do a sync write, renewing grant;
- *
- * (b) stop writing on this stripe, switch to the
- * next one.
- *
- * (b) is a part of "parallel io" design that is the
- * ultimate goal. (a) is what "old" client did, and
- * what the new code continues to do for the time
- * being.
- */
- if (last_index > pg->cp_index) {
- to = PAGE_SIZE;
- need_clip = false;
- } else if (last_index == pg->cp_index) {
- int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
-
- if (to < size_to)
- to = size_to;
- }
- if (need_clip)
- cl_page_clip(env, pg, 0, to);
- result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
- if (result)
- CERROR("Write page %lu of inode %p failed %d\n",
- pg->cp_index, inode, result);
- }
- } else {
- tallyop = LPROC_LL_DIRTY_HITS;
- result = 0;
- }
- ll_stats_ops_tally(sbi, tallyop, 1);
-
- /* Inode should be marked DIRTY even if no new page was marked DIRTY
- * because page could have been not flushed between 2 modifications.
- * It is important the file is marked DIRTY as soon as the I/O is done
- * Indeed, when cache is flushed, file could be already closed and it
- * is too late to warn the MDT.
- * It is acceptable that file is marked DIRTY even if I/O is dropped
- * for some reasons before being flushed to OST.
- */
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- size = cl_offset(obj, pg->cp_index) + to;
-
- ll_inode_size_lock(inode);
- if (result == 0) {
- if (size > i_size_read(inode)) {
- cl_isize_write_nolock(inode, size);
- CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (unsigned long)size);
- }
- cl_page_export(env, pg, 1);
- } else {
- if (size > i_size_read(inode))
- cl_page_discard(env, io, pg);
- }
- ll_inode_size_unlock(inode);
- return result;
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ vvp_object_invariant(ios->cis_io->ci_obj));
}
static const struct cl_io_operations vvp_io_ops = {
.op = {
[CIT_READ] = {
- .cio_fini = vvp_io_read_fini,
+ .cio_fini = vvp_io_fini,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
- .cio_advance = ccc_io_advance
+ .cio_advance = vvp_io_advance,
},
[CIT_WRITE] = {
.cio_fini = vvp_io_fini,
+ .cio_iter_init = vvp_io_write_iter_init,
+ .cio_iter_fini = vvp_io_write_iter_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
- .cio_advance = ccc_io_advance
+ .cio_advance = vvp_io_advance,
},
[CIT_SETATTR] = {
.cio_fini = vvp_io_setattr_fini,
@@ -1120,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_iter_init = vvp_io_fault_iter_init,
.cio_lock = vvp_io_fault_lock,
.cio_start = vvp_io_fault_start,
- .cio_end = ccc_io_end
+ .cio_end = vvp_io_end,
},
[CIT_FSYNC] = {
.cio_start = vvp_io_fsync_start,
@@ -1131,29 +1304,26 @@ static const struct cl_io_operations vvp_io_ops = {
}
},
.cio_read_page = vvp_io_read_page,
- .cio_prepare_write = vvp_io_prepare_write,
- .cio_commit_write = vvp_io_commit_write
};
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
int result;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID
" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
- CL_IO_SLICE_CLEAN(cio, cui_cl);
- cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
- vio->cui_ra_window_set = 0;
+ CL_IO_SLICE_CLEAN(vio, vui_cl);
+ cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
+ vio->vui_ra_valid = false;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
size_t count;
@@ -1166,7 +1336,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
if (count == 0)
result = 1;
else
- cio->cui_tot_count = count;
+ vio->vui_tot_count = count;
/* for read/write, we store the jobid in the inode, and
* it'll be fetched by osc when building RPC.
@@ -1192,7 +1362,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
* because it might not grant layout lock in IT_OPEN.
*/
if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ result = ll_layout_refresh(inode, &vio->vui_layout_gen);
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
@@ -1208,11 +1378,3 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
return result;
}
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- /* Calling just for assertion */
- cl2ccc_io(env, slice);
- return vvp_env_io(env);
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index ff0948043..f5bd6c22e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -40,7 +40,7 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/obd.h"
+#include "../include/obd_support.h"
#include "../include/lustre_lite.h"
#include "vvp_internal.h"
@@ -51,36 +51,41 @@
*
*/
-/**
- * Estimates lock value for the purpose of managing the lock cache during
- * memory shortages.
- *
- * Locks for memory mapped files are almost infinitely precious, others are
- * junk. "Mapped locks" are heavy, but not infinitely heavy, so that they are
- * ordered within themselves by weights assigned from other layers.
- */
-static unsigned long vvp_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
+{
+ struct vvp_lock *vlk = cl2vvp_lock(slice);
+
+ kmem_cache_free(vvp_lock_kmem, vlk);
+}
+
+static int vvp_lock_enqueue(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_io *unused, struct cl_sync_io *anchor)
{
- struct ccc_object *cob = cl2ccc(slice->cls_obj);
+ CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
- return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0;
+ return 0;
}
static const struct cl_lock_operations vvp_lock_ops = {
- .clo_delete = ccc_lock_delete,
- .clo_fini = ccc_lock_fini,
- .clo_enqueue = ccc_lock_enqueue,
- .clo_wait = ccc_lock_wait,
- .clo_use = ccc_lock_use,
- .clo_unuse = ccc_lock_unuse,
- .clo_fits_into = ccc_lock_fits_into,
- .clo_state = ccc_lock_state,
- .clo_weigh = vvp_lock_weigh
+ .clo_fini = vvp_lock_fini,
+ .clo_enqueue = vvp_lock_enqueue,
};
int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *unused)
{
- return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops);
+ struct vvp_lock *vlk;
+ int result;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS);
+ if (vlk) {
+ cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
+ result = 0;
+ } else {
+ result = -ENOMEM;
+ }
+ return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 03c887d8e..18c9df7eb 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -45,6 +45,7 @@
#include "../include/obd.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
/*****************************************************************************
@@ -53,16 +54,25 @@
*
*/
+int vvp_object_invariant(const struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
+ lli->lli_clob == obj;
+}
+
static int vvp_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- struct ccc_object *obj = lu2ccc(o);
- struct inode *inode = obj->cob_inode;
+ struct vvp_object *obj = lu2vvp(o);
+ struct inode *inode = obj->vob_inode;
struct ll_inode_info *lli;
(*p)(env, cookie, "(%s %d %d) inode: %p ",
- list_empty(&obj->cob_pending_list) ? "-" : "+",
- obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt),
+ list_empty(&obj->vob_pending_list) ? "-" : "+",
+ obj->vob_transient_pages, atomic_read(&obj->vob_mmap_cnt),
inode);
if (inode) {
lli = ll_i2info(inode);
@@ -77,7 +87,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr)
{
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
/*
* lov overwrites most of these fields in
@@ -99,7 +109,7 @@ static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_attr *attr, unsigned valid)
{
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
if (valid & CAT_UID)
inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
@@ -112,7 +122,7 @@ static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
if (valid & CAT_CTIME)
inode->i_ctime.tv_sec = attr->cat_ctime;
if (0 && valid & CAT_SIZE)
- cl_isize_write_nolock(inode, attr->cat_size);
+ i_size_write(inode, attr->cat_size);
/* not currently necessary */
if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
mark_inode_dirty(inode);
@@ -165,6 +175,40 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
return 0;
}
+static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+ int rc;
+
+ rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
+ if (rc < 0) {
+ CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
+ PFID(lu_object_fid(&obj->co_lu)), rc);
+ return rc;
+ }
+
+ truncate_inode_pages(inode->i_mapping, 0);
+ return 0;
+}
+
+static int vvp_object_glimpse(const struct lu_env *env,
+ const struct cl_object *obj, struct ost_lvb *lvb)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ lvb->lvb_mtime = LTIME_S(inode->i_mtime);
+ lvb->lvb_atime = LTIME_S(inode->i_atime);
+ lvb->lvb_ctime = LTIME_S(inode->i_ctime);
+ /*
+ * LU-417: Add dirty pages block count lest i_blocks reports 0, some
+ * "cp" or "tar" on remote node may think it's a completely sparse file
+ * and skip it.
+ */
+ if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
+ lvb->lvb_blocks = dirty_cnt(inode);
+ return 0;
+}
+
static const struct cl_object_operations vvp_ops = {
.coo_page_init = vvp_page_init,
.coo_lock_init = vvp_lock_init,
@@ -172,29 +216,94 @@ static const struct cl_object_operations vvp_ops = {
.coo_attr_get = vvp_attr_get,
.coo_attr_set = vvp_attr_set,
.coo_conf_set = vvp_conf_set,
- .coo_glimpse = ccc_object_glimpse
+ .coo_prune = vvp_prune,
+ .coo_glimpse = vvp_object_glimpse
};
+static int vvp_object_init0(const struct lu_env *env,
+ struct vvp_object *vob,
+ const struct cl_object_conf *conf)
+{
+ vob->vob_inode = conf->coc_inode;
+ vob->vob_transient_pages = 0;
+ cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
+ return 0;
+}
+
+static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
+ struct vvp_object *vob = lu2vvp(obj);
+ struct lu_object *below;
+ struct lu_device *under;
+ int result;
+
+ under = &dev->vdv_next->cd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
+ if (below) {
+ const struct cl_object_conf *cconf;
+
+ cconf = lu2cl_conf(conf);
+ INIT_LIST_HEAD(&vob->vob_pending_list);
+ lu_object_add(obj, below);
+ result = vvp_object_init0(env, vob, cconf);
+ } else {
+ result = -ENOMEM;
+ }
+
+ return result;
+}
+
+static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct vvp_object *vob = lu2vvp(obj);
+
+ lu_object_fini(obj);
+ lu_object_header_fini(obj->lo_header);
+ kmem_cache_free(vvp_object_kmem, vob);
+}
+
static const struct lu_object_operations vvp_lu_obj_ops = {
- .loo_object_init = ccc_object_init,
- .loo_object_free = ccc_object_free,
- .loo_object_print = vvp_object_print
+ .loo_object_init = vvp_object_init,
+ .loo_object_free = vvp_object_free,
+ .loo_object_print = vvp_object_print,
};
-struct ccc_object *cl_inode2ccc(struct inode *inode)
+struct vvp_object *cl_inode2vvp(struct inode *inode)
{
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
struct lu_object *lu;
lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
LASSERT(lu);
- return lu2ccc(lu);
+ return lu2vvp(lu);
}
struct lu_object *vvp_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
+ const struct lu_object_header *unused,
struct lu_device *dev)
{
- return ccc_object_alloc(env, hdr, dev, &vvp_ops, &vvp_lu_obj_ops);
+ struct vvp_object *vob;
+ struct lu_object *obj;
+
+ vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
+ if (vob) {
+ struct cl_object_header *hdr;
+
+ obj = &vob->vob_cl.co_lu;
+ hdr = &vob->vob_header;
+ cl_object_header_init(hdr);
+ hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
+
+ vob->vob_cl.co_ops = &vvp_ops;
+ obj->lo_ops = &vvp_lu_obj_ops;
+ } else {
+ obj = NULL;
+ }
+ return obj;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 33ca3eb34..6cd2af7a9 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -41,9 +41,16 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/obd.h"
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
/*****************************************************************************
@@ -52,9 +59,9 @@
*
*/
-static void vvp_page_fini_common(struct ccc_page *cp)
+static void vvp_page_fini_common(struct vvp_page *vpg)
{
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage);
put_page(vmpage);
@@ -63,23 +70,23 @@ static void vvp_page_fini_common(struct ccc_page *cp)
static void vvp_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
/*
* vmpage->private was already cleared when page was moved into
* VPG_FREEING state.
*/
LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- vvp_page_fini_common(cp);
+ vvp_page_fini_common(vpg);
}
static int vvp_page_own(const struct lu_env *env,
const struct cl_page_slice *slice, struct cl_io *io,
int nonblock)
{
- struct ccc_page *vpg = cl2ccc_page(slice);
- struct page *vmpage = vpg->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage);
if (nonblock) {
@@ -96,6 +103,7 @@ static int vvp_page_own(const struct lu_env *env,
lock_page(vmpage);
wait_on_page_writeback(vmpage);
+
return 0;
}
@@ -136,41 +144,15 @@ static void vvp_page_discard(const struct lu_env *env,
struct cl_io *unused)
{
struct page *vmpage = cl2vm_page(slice);
- struct address_space *mapping;
- struct ccc_page *cpg = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
- mapping = vmpage->mapping;
-
- if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
- ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
+ if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
+ ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
- /*
- * truncate_complete_page() calls
- * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
- */
- truncate_complete_page(mapping, vmpage);
-}
-
-static int vvp_page_unmap(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- __u64 offset;
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
-
- offset = vmpage->index << PAGE_SHIFT;
-
- /*
- * XXX is it safe to call this with the page lock held?
- */
- ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
- return 0;
+ ll_invalidate_page(vmpage);
}
static void vvp_page_delete(const struct lu_env *env,
@@ -179,12 +161,20 @@ static void vvp_page_delete(const struct lu_env *env,
struct page *vmpage = cl2vm_page(slice);
struct inode *inode = vmpage->mapping->host;
struct cl_object *obj = slice->cpl_obj;
+ struct cl_page *page = slice->cpl_page;
+ int refc;
LASSERT(PageLocked(vmpage));
- LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
- LASSERT(inode == ccc_object_inode(obj));
+ LASSERT((struct cl_page *)vmpage->private == page);
+ LASSERT(inode == vvp_object_inode(obj));
- vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
+ vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
+
+ /* Drop the reference count held in vvp_page_init */
+ refc = atomic_dec_return(&page->cp_ref);
+ LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
+
+ ClearPageUptodate(vmpage);
ClearPagePrivate(vmpage);
vmpage->private = 0;
/*
@@ -237,7 +227,7 @@ static int vvp_page_prep_write(const struct lu_env *env,
if (!pg->cp_sync_io)
set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+ vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
return 0;
}
@@ -250,11 +240,11 @@ static int vvp_page_prep_write(const struct lu_env *env,
*/
static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
{
- struct ccc_object *obj = cl_inode2ccc(inode);
+ struct vvp_object *obj = cl_inode2vvp(inode);
if (ioret == 0) {
ClearPageError(vmpage);
- obj->cob_discard_page_warned = 0;
+ obj->vob_discard_page_warned = 0;
} else {
SetPageError(vmpage);
if (ioret == -ENOSPC)
@@ -263,8 +253,8 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
set_bit(AS_EIO, &inode->i_mapping->flags);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
- obj->cob_discard_page_warned == 0) {
- obj->cob_discard_page_warned = 1;
+ obj->vob_discard_page_warned == 0) {
+ obj->vob_discard_page_warned = 1;
ll_dirty_page_discard_warn(vmpage, ioret);
}
}
@@ -274,22 +264,23 @@ static void vvp_page_completion_read(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
- struct cl_page *page = cl_page_top(slice->cpl_page);
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
+ struct cl_page *page = slice->cpl_page;
+ struct inode *inode = vvp_object_inode(page->cp_obj);
LASSERT(PageLocked(vmpage));
CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
- if (cp->cpg_defer_uptodate)
+ if (vpg->vpg_defer_uptodate)
ll_ra_count_put(ll_i2sbi(inode), 1);
if (ioret == 0) {
- if (!cp->cpg_defer_uptodate)
+ if (!vpg->vpg_defer_uptodate)
cl_page_export(env, page, 1);
- } else
- cp->cpg_defer_uptodate = 0;
+ } else {
+ vpg->vpg_defer_uptodate = 0;
+ }
if (!page->cp_sync_io)
unlock_page(vmpage);
@@ -299,9 +290,9 @@ static void vvp_page_completion_write(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
@@ -315,8 +306,8 @@ static void vvp_page_completion_write(const struct lu_env *env,
* and then re-add the page into pending transfer queue. -jay
*/
- cp->cpg_write_queued = 0;
- vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
+ vpg->vpg_write_queued = 0;
+ vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
if (pg->cp_sync_io) {
LASSERT(PageLocked(vmpage));
@@ -327,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env,
* Only mark the page error only when it's an async write
* because applications won't wait for IO to finish.
*/
- vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
+ vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
end_page_writeback(vmpage);
}
@@ -359,7 +350,7 @@ static int vvp_page_make_ready(const struct lu_env *env,
LASSERT(pg->cp_state == CPS_CACHED);
/* This actually clears the dirty bit in the radix tree. */
set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+ vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
} else if (pg->cp_state == CPS_PAGEOUT) {
/* is it possible for osc_flush_async_page() to already
@@ -375,24 +366,51 @@ static int vvp_page_make_ready(const struct lu_env *env,
return result;
}
+static int vvp_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, pgoff_t *max_index)
+{
+ if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
+ io->ci_type == CIT_FAULT) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
+ *max_index = CL_PAGE_EOF;
+ }
+ return 0;
+}
+
static int vvp_page_print(const struct lu_env *env,
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
{
- struct ccc_page *vp = cl2ccc_page(slice);
- struct page *vmpage = vp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
- vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
- vp->cpg_write_queued, vmpage);
+ vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
+ vpg->vpg_write_queued, vmpage);
if (vmpage) {
(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
(long)vmpage->flags, page_count(vmpage),
page_mapcount(vmpage), vmpage->private,
- page_index(vmpage),
+ vmpage->index,
list_empty(&vmpage->lru) ? "not-" : "");
}
+
(*printer)(env, cookie, "\n");
+
+ return 0;
+}
+
+static int vvp_page_fail(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ /*
+ * Cached read?
+ */
+ LBUG();
+
return 0;
}
@@ -401,32 +419,38 @@ static const struct cl_page_operations vvp_page_ops = {
.cpo_assume = vvp_page_assume,
.cpo_unassume = vvp_page_unassume,
.cpo_disown = vvp_page_disown,
- .cpo_vmpage = ccc_page_vmpage,
.cpo_discard = vvp_page_discard,
.cpo_delete = vvp_page_delete,
- .cpo_unmap = vvp_page_unmap,
.cpo_export = vvp_page_export,
.cpo_is_vmlocked = vvp_page_is_vmlocked,
.cpo_fini = vvp_page_fini,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
+ .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
.cpo_prep = vvp_page_prep_read,
.cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = ccc_fail,
+ .cpo_make_ready = vvp_page_fail,
},
[CRT_WRITE] = {
.cpo_prep = vvp_page_prep_write,
.cpo_completion = vvp_page_completion_write,
.cpo_make_ready = vvp_page_make_ready,
- }
- }
+ },
+ },
};
+static int vvp_transient_page_prep(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ /* transient page should always be sent. */
+ return 0;
+}
+
static void vvp_transient_page_verify(const struct cl_page *page)
{
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct inode *inode = vvp_object_inode(page->cp_obj);
LASSERT(!inode_trylock(inode));
}
@@ -477,7 +501,7 @@ static void vvp_transient_page_discard(const struct lu_env *env,
static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- struct inode *inode = ccc_object_inode(slice->cpl_obj);
+ struct inode *inode = vvp_object_inode(slice->cpl_obj);
int locked;
locked = !inode_trylock(inode);
@@ -497,13 +521,13 @@ vvp_transient_page_completion(const struct lu_env *env,
static void vvp_transient_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *clp = slice->cpl_page;
- struct ccc_object *clobj = cl2ccc(clp->cp_obj);
+ struct vvp_object *clobj = cl2vvp(clp->cp_obj);
- vvp_page_fini_common(cp);
- LASSERT(!inode_trylock(clobj->cob_inode));
- clobj->cob_transient_pages--;
+ vvp_page_fini_common(vpg);
+ LASSERT(!inode_trylock(clobj->vob_inode));
+ clobj->vob_transient_pages--;
}
static const struct cl_page_operations vvp_transient_page_ops = {
@@ -512,45 +536,48 @@ static const struct cl_page_operations vvp_transient_page_ops = {
.cpo_unassume = vvp_transient_page_unassume,
.cpo_disown = vvp_transient_page_disown,
.cpo_discard = vvp_transient_page_discard,
- .cpo_vmpage = ccc_page_vmpage,
.cpo_fini = vvp_transient_page_fini,
.cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
+ .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
},
[CRT_WRITE] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
}
}
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
- struct ccc_page *cpg = cl_object_page_slice(obj, page);
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+ struct page *vmpage = page->cp_vmpage;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- cpg->cpg_page = vmpage;
+ vpg->vpg_page = vmpage;
get_page(vmpage);
- INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+ INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
+ /* in cache, decref in vvp_page_delete */
+ atomic_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops);
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
+ &vvp_page_ops);
} else {
- struct ccc_object *clobj = cl2ccc(obj);
+ struct vvp_object *clobj = cl2vvp(obj);
- LASSERT(!inode_trylock(clobj->cob_inode));
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ LASSERT(!inode_trylock(clobj->vob_inode));
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
&vvp_transient_page_ops);
- clobj->cob_transient_pages++;
+ clobj->vob_transient_pages++;
}
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
new file mode 100644
index 000000000..fb886291a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_req.c
@@ -0,0 +1,121 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include "../include/lustre/lustre_idl.h"
+#include "../include/cl_object.h"
+#include "../include/obd.h"
+#include "../include/obd_support.h"
+#include "../include/lustre_lite.h"
+#include "llite_internal.h"
+#include "vvp_internal.h"
+
+static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
+{
+ return container_of0(slice, struct vvp_req, vrq_cl);
+}
+
+/**
+ * Implementation of struct cl_req_operations::cro_attr_set() for VVP
+ * layer. VVP is responsible for
+ *
+ * - o_[mac]time
+ *
+ * - o_mode
+ *
+ * - o_parent_seq
+ *
+ * - o_[ug]id
+ *
+ * - o_parent_oid
+ *
+ * - o_parent_ver
+ *
+ * - o_ioepoch,
+ *
+ */
+void vvp_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, u64 flags)
+{
+ struct inode *inode;
+ struct obdo *oa;
+ u32 valid_flags;
+
+ oa = attr->cra_oa;
+ inode = vvp_object_inode(obj);
+ valid_flags = OBD_MD_FLTYPE;
+
+ if (slice->crs_req->crq_type == CRT_WRITE) {
+ if (flags & OBD_MD_FLEPOCH) {
+ oa->o_valid |= OBD_MD_FLEPOCH;
+ oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ }
+ }
+ obdo_from_inode(oa, inode, valid_flags & flags);
+ obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+ memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
+ JOBSTATS_JOBID_SIZE);
+}
+
+void vvp_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
+{
+ struct vvp_req *vrq;
+
+ if (ioret > 0)
+ cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
+
+ vrq = cl2vvp_req(slice);
+ kmem_cache_free(vvp_req_kmem, vrq);
+}
+
+static const struct cl_req_operations vvp_req_ops = {
+ .cro_attr_set = vvp_req_attr_set,
+ .cro_completion = vvp_req_completion
+};
+
+int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req)
+{
+ struct vvp_req *vrq;
+ int result;
+
+ vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
+ if (vrq) {
+ cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
+ result = 0;
+ } else {
+ result = -ENOMEM;
+ }
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index b68dcc921..608014b0d 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -181,8 +181,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
size = rc;
pv = (const char *)new_value;
- } else
+ } else {
return -EOPNOTSUPP;
+ }
valid |= rce_ops2valid(rce->rce_ops);
}
@@ -210,16 +211,14 @@ int ll_setxattr_common(struct inode *inode, const char *name,
return 0;
}
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
@@ -243,12 +242,12 @@ int ll_setxattr(struct dentry *dentry, const char *name,
lump->lmm_stripe_offset = -1;
if (lump && S_ISREG(inode->i_mode)) {
- int flags = FMODE_WRITE;
+ __u64 it_flags = FMODE_WRITE;
int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
sizeof(*lump) : sizeof(struct lov_user_md_v3);
- rc = ll_lov_setstripe_ea_info(inode, dentry, flags, lump,
- lum_size);
+ rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags,
+ lump, lum_size);
/* b10667: rc always be 0 here for now */
rc = 0;
} else if (S_ISDIR(inode->i_mode)) {
@@ -272,8 +271,8 @@ int ll_removexattr(struct dentry *dentry, const char *name)
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
return ll_setxattr_common(inode, name, NULL, 0, 0,
@@ -292,8 +291,8 @@ int ll_getxattr_common(struct inode *inode, const char *name,
struct rmtacl_ctl_entry *rce = NULL;
struct ll_inode_info *lli = ll_i2info(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
/* listxattr have slightly different behavior from of ext3:
* without 'user_xattr' ext3 will list all xattr names but
@@ -338,7 +337,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
*/
if (xattr_type == XATTR_ACL_ACCESS_T &&
!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
-
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
@@ -423,8 +421,7 @@ getxattr_nocache:
if (rce && rce->rce_ops == RMT_LSETFACL) {
ext_acl_xattr_header *acl;
- acl = lustre_posix_acl_xattr_2ext(
- (posix_acl_xattr_header *)buffer, rc);
+ acl = lustre_posix_acl_xattr_2ext(buffer, rc);
if (IS_ERR(acl)) {
rc = PTR_ERR(acl);
goto out;
@@ -451,16 +448,14 @@ out:
return rc;
}
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
{
- struct inode *inode = d_inode(dentry);
-
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
@@ -554,8 +549,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
LASSERT(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 3480ce2bb..d7e17abbe 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -229,7 +229,6 @@ static int ll_xattr_cache_valid(struct ll_inode_info *lli)
*/
static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
{
-
if (!ll_xattr_cache_valid(lli))
return 0;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 8a0087190..7007e4c48 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -42,9 +42,6 @@
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex)
-#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex)
-
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 9abb7c2b9..9e31f6b03 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -132,8 +132,9 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
static struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
- return obd_get_uuid(lmv->tgts[0]->ltd_exp);
+ return tgt ? obd_get_uuid(tgt->ltd_exp) : NULL;
}
static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
@@ -249,7 +250,6 @@ static int lmv_connect(const struct lu_env *env,
static void lmv_set_timeouts(struct obd_device *obd)
{
- struct lmv_tgt_desc *tgt;
struct lmv_obd *lmv;
int i;
@@ -261,8 +261,10 @@ static void lmv_set_timeouts(struct obd_device *obd)
return;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
@@ -302,13 +304,14 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
return 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
CWARN("%s: NULL export for %d\n", obd->obd_name, i);
continue;
}
- rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
+ rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize,
cookiesize, def_cookiesize);
if (rc) {
CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
@@ -425,7 +428,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
- lmv_init_lock(lmv);
+ mutex_lock(&lmv->lmv_init_mutex);
if (lmv->desc.ld_tgt_count == 0) {
struct obd_device *mdc_obd;
@@ -433,7 +436,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
&obd->obd_uuid);
if (!mdc_obd) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
CERROR("%s: Target %s not attached: rc = %d\n",
obd->obd_name, uuidp->uuid, -EINVAL);
return -EINVAL;
@@ -445,7 +448,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
obd->obd_name,
obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -EEXIST;
}
@@ -459,7 +462,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
newsize <<= 1;
newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
if (!newtgts) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -ENOMEM;
}
@@ -481,7 +484,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
if (!tgt) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -ENOMEM;
}
@@ -507,7 +510,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
}
}
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return rc;
}
@@ -522,18 +525,27 @@ int lmv_check_connect(struct obd_device *obd)
if (lmv->connected)
return 0;
- lmv_init_lock(lmv);
+ mutex_lock(&lmv->lmv_init_mutex);
if (lmv->connected) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return 0;
}
if (lmv->desc.ld_tgt_count == 0) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
CERROR("%s: no targets configured.\n", obd->obd_name);
return -EINVAL;
}
+ LASSERT(lmv->tgts);
+
+ if (!lmv->tgts[0]) {
+ mutex_unlock(&lmv->lmv_init_mutex);
+ CERROR("%s: no target configured for index 0.\n",
+ obd->obd_name);
+ return -EINVAL;
+ }
+
CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
lmv->cluuid.uuid, obd->obd_name);
@@ -551,7 +563,7 @@ int lmv_check_connect(struct obd_device *obd)
lmv->connected = 1;
easize = lmv_get_easize(lmv);
lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return 0;
out_disc:
@@ -572,7 +584,7 @@ int lmv_check_connect(struct obd_device *obd)
}
}
class_disconnect(lmv->exp);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return rc;
}
@@ -796,6 +808,11 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
/* unregister request (call from llapi_hsm_copytool_fini) */
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
/* best effort: try to clean as much as possible
* (continue on error)
*/
@@ -825,20 +842,28 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
* except if it because of inactive target.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
if (err) {
- if (lmv->tgts[i]->ltd_active) {
+ if (tgt->ltd_active) {
/* permanent error */
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ tgt->ltd_uuid.uuid, i, cmd, err);
rc = err;
lk->lk_flags |= LK_FLG_STOP;
/* unregister from previous MDS */
- for (j = 0; j < i; j++)
- obd_iocontrol(cmd,
- lmv->tgts[j]->ltd_exp,
- len, lk, uarg);
+ for (j = 0; j < i; j++) {
+ tgt = lmv->tgts[j];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+ obd_iocontrol(cmd, tgt->ltd_exp, len,
+ lk, uarg);
+ }
return rc;
}
/* else: transient error.
@@ -877,6 +902,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
{
struct obd_device *obddev = class_exp2obd(exp);
struct lmv_obd *lmv = &obddev->u.lmv;
+ struct lmv_tgt_desc *tgt = NULL;
int i = 0;
int rc = 0;
int set = 0;
@@ -896,10 +922,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (index >= count)
return -ENODEV;
- if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0)
+ tgt = lmv->tgts[index];
+ if (!tgt || !tgt->ltd_active)
return -ENODATA;
- mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
+ mdc_obd = class_exp2obd(tgt->ltd_exp);
if (!mdc_obd)
return -EINVAL;
@@ -909,7 +936,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
(int)sizeof(struct obd_uuid))))
return -EFAULT;
- rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf,
+ rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
0);
if (rc)
@@ -922,11 +949,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl = karg;
- struct lmv_tgt_desc *tgt = NULL;
struct obd_quotactl *oqctl;
if (qctl->qc_valid == QC_MDTIDX) {
- if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
+ if (count <= qctl->qc_idx)
return -EINVAL;
tgt = lmv->tgts[qctl->qc_idx];
@@ -975,18 +1001,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (icc->icc_mdtindex >= count)
return -ENODEV;
- if (!lmv->tgts[icc->icc_mdtindex] ||
- !lmv->tgts[icc->icc_mdtindex]->ltd_exp ||
- lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
+ tgt = lmv->tgts[icc->icc_mdtindex];
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
return -ENODEV;
- rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
- sizeof(*icc), icc, NULL);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL);
break;
}
case LL_IOC_GET_CONNECT_FLAGS: {
- if (!lmv->tgts[0])
+ tgt = lmv->tgts[0];
+
+ if (!tgt || !tgt->ltd_exp)
return -ENODATA;
- rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
break;
}
case OBD_IOC_FID2PATH: {
@@ -997,7 +1023,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
case LL_IOC_HSM_STATE_SET:
case LL_IOC_HSM_ACTION: {
struct md_op_data *op_data = karg;
- struct lmv_tgt_desc *tgt;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
@@ -1011,7 +1036,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case LL_IOC_HSM_PROGRESS: {
const struct hsm_progress_kernel *hpk = karg;
- struct lmv_tgt_desc *tgt;
tgt = lmv_find_target(lmv, &hpk->hpk_fid);
if (IS_ERR(tgt))
@@ -1021,7 +1045,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case LL_IOC_HSM_REQUEST: {
struct hsm_user_request *hur = karg;
- struct lmv_tgt_desc *tgt;
unsigned int reqcount = hur->hur_request.hr_itemcount;
if (reqcount == 0)
@@ -1044,7 +1067,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
int rc1;
struct hsm_user_request *req;
- nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]);
+ tgt = lmv->tgts[i];
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ nr = lmv_hsm_req_count(lmv, hur, tgt);
if (nr == 0) /* nothing for this MDS */
continue;
@@ -1056,10 +1083,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (!req)
return -ENOMEM;
- lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
+ lmv_hsm_req_build(lmv, hur, tgt, req);
- rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
- reqlen, req, uarg);
+ rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
+ req, uarg);
if (rc1 != 0 && rc == 0)
rc = rc1;
kvfree(req);
@@ -1103,27 +1130,27 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
struct obd_device *mdc_obd;
int err;
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
+ tgt = lmv->tgts[i];
+ if (!tgt || !tgt->ltd_exp)
continue;
/* ll_umount_begin() sets force flag but for lmv, not
* mdc. Let's pass it through
*/
- mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp);
+ mdc_obd = class_exp2obd(tgt->ltd_exp);
mdc_obd->obd_force = obddev->obd_force;
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
- karg, uarg);
+ err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
return err;
} else if (err) {
- if (lmv->tgts[i]->ltd_active) {
+ if (tgt->ltd_active) {
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ tgt->ltd_uuid.uuid, i, cmd, err);
if (!rc)
rc = err;
}
- } else
+ } else {
set = 1;
+ }
}
if (!set && !rc)
rc = -EIO;
@@ -1269,7 +1296,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
spin_lock_init(&lmv->lmv_lock);
- mutex_init(&lmv->init_mutex);
+ mutex_init(&lmv->lmv_init_mutex);
lprocfs_lmv_init_vars(&lvars);
@@ -2071,7 +2098,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
/* Check if we've reached the end of the CFS_PAGE. */
- if (!((unsigned long)dp & ~CFS_PAGE_MASK))
+ if (!((unsigned long)dp & ~PAGE_MASK))
break;
/* Save the hash and flags of this lu_dirpage. */
@@ -2268,7 +2295,6 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
lmv = &obd->u.lmv;
if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
- struct lmv_tgt_desc *tgt;
int i;
rc = lmv_check_connect(obd);
@@ -2277,7 +2303,8 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
LASSERT(*vallen == sizeof(__u32));
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
/*
* All tgts should be connected when this gets called.
*/
@@ -2466,12 +2493,13 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
LASSERT(fid);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0)
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- err = md_cancel_unused(lmv->tgts[i]->ltd_exp, fid,
- policy, mode, flags, opaque);
+ err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags,
+ opaque);
if (!rc)
rc = err;
}
@@ -2482,9 +2510,13 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
__u64 *bits)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
int rc;
- rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+
+ rc = md_set_lock_data(tgt->ltd_exp, lockh, data, bits);
return rc;
}
@@ -2509,12 +2541,13 @@ static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
* one fid was created in.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0)
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- rc = md_lock_match(lmv->tgts[i]->ltd_exp, flags, fid,
- type, policy, mode, lockh);
+ rc = md_lock_match(tgt->ltd_exp, flags, fid, type, policy, mode,
+ lockh);
if (rc)
return rc;
}
@@ -2529,18 +2562,24 @@ static int lmv_get_lustre_md(struct obd_export *exp,
struct lustre_md *md)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
- return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+ return md_get_lustre_md(tgt->ltd_exp, req, dt_exp, md_exp, md);
}
static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
if (md->mea)
obd_free_memmd(exp, (void *)&md->mea);
- return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+ return md_free_lustre_md(tgt->ltd_exp, md);
}
static int lmv_set_open_replay_data(struct obd_export *exp,
@@ -2649,7 +2688,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
int rc = 0, i;
__u64 curspace, curinodes;
- if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
+ !lmv->desc.ld_tgt_count) {
CERROR("master lmv inactive\n");
return -EIO;
}
@@ -2665,12 +2705,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- if (!tgt->ltd_active) {
- CDEBUG(D_HA, "mdt %d is inactive.\n", i);
- continue;
- }
err = obd_quotactl(tgt->ltd_exp, oqctl);
if (err) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 7dd3162b5..ac9744e88 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -73,19 +73,6 @@
* - top-page keeps a reference to its sub-page, and destroys it when it
* is destroyed.
*
- * - sub-lock keep a reference to its top-locks. Top-lock keeps a
- * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
- * actively using them (that is, in cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
- * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
- * hold. From this moment top-lock has only a 'weak' reference to its
- * sub-locks. This reference is protected by top-lock
- * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
- * when the latter is destroyed. When a sub-lock is canceled, a
- * reference to it is removed from the top-lock array, and top-lock is
- * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
- * while their top-lock is in CLS_HELD or CLS_CACHED states.
- *
* - IO's are not reference counted.
*
* To implement a connection between top and sub entities, lov layer is split
@@ -281,24 +268,17 @@ struct lov_object {
};
/**
- * Flags that top-lock can set on each of its sub-locks.
- */
-enum lov_sub_flags {
- /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
- LSF_HELD = 1 << 0
-};
-
-/**
* State lov_lock keeps for each sub-lock.
*/
struct lov_lock_sub {
/** sub-lock itself */
- struct lovsub_lock *sub_lock;
- /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
- unsigned sub_flags;
+ struct cl_lock sub_lock;
+ /** Set if the sublock has ever been enqueued, meaning it may
+ * hold resources of underlying layers
+ */
+ unsigned int sub_is_enqueued:1,
+ sub_initialized:1;
int sub_stripe;
- struct cl_lock_descr sub_descr;
- struct cl_lock_descr sub_got;
};
/**
@@ -308,59 +288,8 @@ struct lov_lock {
struct cl_lock_slice lls_cl;
/** Number of sub-locks in this lock */
int lls_nr;
- /**
- * Number of existing sub-locks.
- */
- unsigned lls_nr_filled;
- /**
- * Set when sub-lock was canceled, while top-lock was being
- * used, or unused.
- */
- unsigned int lls_cancel_race:1;
- /**
- * An array of sub-locks
- *
- * There are two issues with managing sub-locks:
- *
- * - sub-locks are concurrently canceled, and
- *
- * - sub-locks are shared with other top-locks.
- *
- * To manage cancellation, top-lock acquires a hold on a sublock
- * (lov_sublock_adopt()) when the latter is inserted into
- * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
- * when top-lock is going into CLS_CACHED state or destroyed. Hold
- * prevents sub-lock from cancellation.
- *
- * Sub-lock sharing means, among other things, that top-lock that is
- * in the process of creation (i.e., not yet inserted into lock list)
- * is already accessible to other threads once at least one of its
- * sub-locks is created, see lov_lock_sub_init().
- *
- * Sub-lock can be in one of the following states:
- *
- * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
- * sub-lock was either never created (top-lock is in CLS_NEW
- * state), or it was created, then canceled, then destroyed
- * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
- *
- * - sub-lock exists and is on
- * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
- * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
- * of a top-lock.
- *
- * - sub-lock exists, but is not held by the top-lock. This
- * happens after top-lock released a hold on sub-locks before
- * going into cache (lov_lock_unuse()).
- *
- * \todo To support wide-striping, array has to be replaced with a set
- * of queues to avoid scanning.
- */
- struct lov_lock_sub *lls_sub;
- /**
- * Original description with which lock was enqueued.
- */
- struct cl_lock_descr lls_orig;
+ /** sublock array */
+ struct lov_lock_sub lls_sub[0];
};
struct lov_page {
@@ -444,8 +373,9 @@ struct lov_thread_info {
struct cl_lock_descr lti_ldescr;
struct ost_lvb lti_lvb;
struct cl_2queue lti_cl2q;
- struct cl_lock_closure lti_closure;
+ struct cl_page_list lti_plist;
wait_queue_t lti_waiter;
+ struct cl_attr lti_attr;
};
/**
@@ -611,14 +541,13 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
const struct cl_lock_descr *d, int idx);
int lov_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-
+ struct cl_page *page, pgoff_t index);
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
struct lu_object *lov_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
@@ -631,6 +560,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
struct lovsub_lock *sub);
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
const struct cl_page_slice *slice);
+int lov_page_stripe(const struct cl_page *page);
#define lov_foreach_target(lov, var) \
for (var = 0; var < lov_targets_nr(lov); ++var)
@@ -789,11 +719,6 @@ static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
return container_of0(slice, struct lovsub_req, lsrq_cl);
}
-static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
-{
- return slice->cpl_page->cp_child;
-}
-
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
const struct cl_io_slice *ios)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 532ef87df..dae8e89bc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -143,9 +143,7 @@ static void *lov_key_init(const struct lu_context *ctx,
struct lov_thread_info *info;
info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS);
- if (info)
- INIT_LIST_HEAD(&info->lti_closure.clc_list);
- else
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -155,7 +153,6 @@ static void lov_key_fini(const struct lu_context *ctx,
{
struct lov_thread_info *info = data;
- LINVRNT(list_empty(&info->lti_closure.clc_list));
kmem_cache_free(lov_thread_kmem, info);
}
@@ -265,8 +262,9 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
if (lr) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -335,14 +333,15 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr)
cl_page_list_init(&em->emrg_page_list);
em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
LCT_REMEMBER | LCT_NOREF);
- if (!IS_ERR(em->emrg_env))
+ if (!IS_ERR(em->emrg_env)) {
em->emrg_env->le_ctx.lc_cookie = 0x2;
- else {
+ } else {
result = PTR_ERR(em->emrg_env);
em->emrg_env = NULL;
}
- } else
+ } else {
result = -ENOMEM;
+ }
}
if (result != 0) {
lov_emerg_free(emerg, nr);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index b6529401c..460f0fa5e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -48,11 +48,6 @@
#include "lov_internal.h"
-struct lovea_unpack_args {
- struct lov_stripe_md *lsm;
- int cursor;
-};
-
static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
__u16 stripe_count)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 590f9326a..eef9afac8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -72,6 +72,21 @@
})
#endif
+#define pool_tgt_size(p) ((p)->pool_obds.op_size)
+#define pool_tgt_count(p) ((p)->pool_obds.op_count)
+#define pool_tgt_array(p) ((p)->pool_obds.op_array)
+#define pool_tgt_rw_sem(p) ((p)->pool_obds.op_rw_sem)
+
+struct pool_desc {
+ char pool_name[LOV_MAXPOOLNAME + 1];
+ struct ost_pool pool_obds;
+ atomic_t pool_refcount;
+ struct hlist_node pool_hash; /* access by poolname */
+ struct list_head pool_list; /* serial access */
+ struct dentry *pool_debugfs_entry; /* file in debugfs */
+ struct obd_device *pool_lobd; /* owner */
+};
+
struct lov_request {
struct obd_info rq_oi;
struct lov_request_set *rq_rqset;
@@ -88,7 +103,6 @@ struct lov_request {
};
struct lov_request_set {
- struct ldlm_enqueue_info *set_ei;
struct obd_info *set_oi;
atomic_t set_refcount;
struct obd_export *set_exp;
@@ -102,10 +116,8 @@ struct lov_request_set {
atomic_t set_finish_checked;
struct llog_cookie *set_cookies;
int set_cookie_sent;
- struct obd_trans_info *set_oti;
struct list_head set_list;
wait_queue_head_t set_waitq;
- spinlock_t set_lock;
};
extern struct kmem_cache *lov_oinfo_slab;
@@ -114,12 +126,6 @@ extern struct lu_kmem_descr lov_caches[];
void lov_finish_set(struct lov_request_set *set);
-static inline void lov_get_reqset(struct lov_request_set *set)
-{
- LASSERT(atomic_read(&set->set_refcount) > 0);
- atomic_inc(&set->set_refcount);
-}
-
static inline void lov_put_reqset(struct lov_request_set *set)
{
if (atomic_dec_and_test(&set->set_refcount))
@@ -146,10 +152,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
u64 start, u64 end,
u64 *obd_start, u64 *obd_end);
int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off);
-
-/* lov_qos.c */
-#define LOV_USES_ASSIGNED_STRIPE 0
-#define LOV_USES_DEFAULT_STRIPE 1
+pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
+ int stripe);
/* lov_request.c */
int lov_update_common_set(struct lov_request_set *set,
@@ -176,6 +180,8 @@ int lov_fini_statfs_set(struct lov_request_set *set);
int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc);
/* lov_obd.c */
+void lov_stripe_lock(struct lov_stripe_md *md);
+void lov_stripe_unlock(struct lov_stripe_md *md);
void lov_fix_desc(struct lov_desc *desc);
void lov_fix_desc_stripe_size(__u64 *val);
void lov_fix_desc_stripe_count(__u32 *val);
@@ -231,8 +237,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname);
int lov_pool_del(struct obd_device *obd, char *poolname);
int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname);
int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname);
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname);
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
void lov_pool_putref(struct pool_desc *pool);
static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 4296aacd8..86cb3f8f9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -225,8 +225,9 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env,
if (!sub->sub_io_initialized) {
sub->sub_stripe = stripe;
rc = lov_io_sub_init(env, lio, sub);
- } else
+ } else {
rc = 0;
+ }
if (rc == 0)
lov_sub_enter(sub);
else
@@ -245,13 +246,15 @@ void lov_sub_put(struct lov_io_sub *sub)
*
*/
-static int lov_page_stripe(const struct cl_page *page)
+int lov_page_stripe(const struct cl_page *page)
{
struct lovsub_object *subobj;
+ const struct cl_page_slice *slice;
+
+ slice = cl_page_at(page, &lovsub_device_type);
+ LASSERT(slice->cpl_obj);
- subobj = lu2lovsub(
- lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
- &lovsub_device_type));
+ subobj = cl2lovsub(slice->cpl_obj);
return subobj->lso_index;
}
@@ -274,10 +277,11 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
struct cl_io *io)
{
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct lov_stripe_md *lsm;
int result;
LASSERT(lio->lis_object);
+ lsm = lio->lis_object->lo_lsm;
/*
* Need to be optimized, we can't afford to allocate a piece of memory
@@ -292,8 +296,9 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
lio->lis_single_subio_index = -1;
lio->lis_active_subios = 0;
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -411,8 +416,9 @@ static int lov_io_iter_init(const struct lu_env *env,
lov_sub_put(sub);
CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
stripe, start, end);
- } else
+ } else {
rc = PTR_ERR(sub);
+ }
if (!rc)
list_add_tail(&sub->sub_linkage, &lio->lis_active);
@@ -436,7 +442,6 @@ static int lov_io_rw_iter_init(const struct lu_env *env,
/* fast path for common case. */
if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
-
lov_do_div64(start, ssize);
next = (start + 1) * ssize;
if (next <= start * ssize)
@@ -543,13 +548,6 @@ static void lov_io_unlock(const struct lu_env *env,
LASSERT(rc == 0);
}
-static struct cl_page_list *lov_io_submit_qin(struct lov_device *ld,
- struct cl_page_list *qin,
- int idx, int alloc)
-{
- return alloc ? &qin[idx] : &ld->ld_emrg[idx]->emrg_page_list;
-}
-
/**
* lov implementation of cl_operations::cio_submit() method. It takes a list
* of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -569,25 +567,17 @@ static int lov_io_submit(const struct lu_env *env,
const struct cl_io_slice *ios,
enum cl_req_type crt, struct cl_2queue *queue)
{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_object *obj = lio->lis_object;
- struct lov_device *ld = lu2lov_dev(lov2cl(obj)->co_lu.lo_dev);
- struct cl_page_list *qin = &queue->c2_qin;
- struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
- struct cl_page_list *stripes_qin = NULL;
+ struct cl_page_list *qin = &queue->c2_qin;
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_io_sub *sub;
+ struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
struct cl_page *page;
- struct cl_page *tmp;
int stripe;
-#define QIN(stripe) lov_io_submit_qin(ld, stripes_qin, stripe, alloc)
-
int rc = 0;
- int alloc =
- !(current->flags & PF_MEMALLOC);
if (lio->lis_active_subios == 1) {
int idx = lio->lis_single_subio_index;
- struct lov_io_sub *sub;
LASSERT(idx < lio->lis_nr_subios);
sub = lov_sub_get(env, lio, idx);
@@ -600,119 +590,120 @@ static int lov_io_submit(const struct lu_env *env,
}
LASSERT(lio->lis_subs);
- if (alloc) {
- stripes_qin =
- libcfs_kvzalloc(sizeof(*stripes_qin) *
- lio->lis_nr_subios,
- GFP_NOFS);
- if (!stripes_qin)
- return -ENOMEM;
-
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
- cl_page_list_init(&stripes_qin[stripe]);
- } else {
- /*
- * If we get here, it means pageout & swap doesn't help.
- * In order to not make things worse, even don't try to
- * allocate the memory with __GFP_NOWARN. -jay
- */
- mutex_lock(&ld->ld_mutex);
- lio->lis_mem_frozen = 1;
- }
- cl_2queue_init(cl2q);
- cl_page_list_for_each_safe(page, tmp, qin) {
- stripe = lov_page_stripe(page);
- cl_page_list_move(QIN(stripe), qin, page);
- }
+ cl_page_list_init(plist);
+ while (qin->pl_nr > 0) {
+ struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct lov_io_sub *sub;
- struct cl_page_list *sub_qin = QIN(stripe);
+ cl_2queue_init(cl2q);
- if (list_empty(&sub_qin->pl_pages))
- continue;
+ page = cl_page_list_first(qin);
+ cl_page_list_move(&cl2q->c2_qin, qin, page);
+
+ stripe = lov_page_stripe(page);
+ while (qin->pl_nr > 0) {
+ page = cl_page_list_first(qin);
+ if (stripe != lov_page_stripe(page))
+ break;
+
+ cl_page_list_move(&cl2q->c2_qin, qin, page);
+ }
- cl_page_list_splice(sub_qin, &cl2q->c2_qin);
sub = lov_sub_get(env, lio, stripe);
if (!IS_ERR(sub)) {
rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
crt, cl2q);
lov_sub_put(sub);
- } else
+ } else {
rc = PTR_ERR(sub);
- cl_page_list_splice(&cl2q->c2_qin, &queue->c2_qin);
+ }
+
+ cl_page_list_splice(&cl2q->c2_qin, plist);
cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
+ cl_2queue_fini(env, cl2q);
+
if (rc != 0)
break;
}
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct cl_page_list *sub_qin = QIN(stripe);
+ cl_page_list_splice(plist, qin);
+ cl_page_list_fini(env, plist);
- if (list_empty(&sub_qin->pl_pages))
- continue;
+ return rc;
+}
+
+static int lov_io_commit_async(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb)
+{
+ struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_io_sub *sub;
+ struct cl_page *page;
+ int rc = 0;
+
+ if (lio->lis_active_subios == 1) {
+ int idx = lio->lis_single_subio_index;
- cl_page_list_splice(sub_qin, qin);
+ LASSERT(idx < lio->lis_nr_subios);
+ sub = lov_sub_get(env, lio, idx);
+ LASSERT(!IS_ERR(sub));
+ LASSERT(sub->sub_io == &lio->lis_single_subio);
+ rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue,
+ from, to, cb);
+ lov_sub_put(sub);
+ return rc;
}
- if (alloc) {
- kvfree(stripes_qin);
- } else {
- int i;
+ LASSERT(lio->lis_subs);
- for (i = 0; i < lio->lis_nr_subios; i++) {
- struct cl_io *cio = lio->lis_subs[i].sub_io;
+ cl_page_list_init(plist);
+ while (queue->pl_nr > 0) {
+ int stripe_to = to;
+ int stripe;
- if (cio && cio == &ld->ld_emrg[i]->emrg_subio)
- lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
+ LASSERT(plist->pl_nr == 0);
+ page = cl_page_list_first(queue);
+ cl_page_list_move(plist, queue, page);
+
+ stripe = lov_page_stripe(page);
+ while (queue->pl_nr > 0) {
+ page = cl_page_list_first(queue);
+ if (stripe != lov_page_stripe(page))
+ break;
+
+ cl_page_list_move(plist, queue, page);
}
- lio->lis_mem_frozen = 0;
- mutex_unlock(&ld->ld_mutex);
- }
- return rc;
-#undef QIN
-}
+ if (queue->pl_nr > 0) /* still has more pages */
+ stripe_to = PAGE_SIZE;
-static int lov_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
+ sub = lov_sub_get(env, lio, stripe);
+ if (!IS_ERR(sub)) {
+ rc = cl_io_commit_async(sub->sub_env, sub->sub_io,
+ plist, from, stripe_to, cb);
+ lov_sub_put(sub);
+ } else {
+ rc = PTR_ERR(sub);
+ break;
+ }
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
-}
+ if (plist->pl_nr > 0) /* short write */
+ break;
-static int lov_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
+ from = 0;
+ }
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_commit_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
+ /* for error case, add the page back into the qin list */
+ LASSERT(ergo(rc == 0, plist->pl_nr == 0));
+ while (plist->pl_nr > 0) {
+ /* error occurred, add the uncommitted pages back into queue */
+ page = cl_page_list_last(plist);
+ cl_page_list_move_head(queue, plist, page);
+ }
+
+ return rc;
}
static int lov_io_fault_start(const struct lu_env *env,
@@ -803,16 +794,8 @@ static const struct cl_io_operations lov_io_ops = {
.cio_fini = lov_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = lov_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = lov_io_submit
- }
- },
- .cio_prepare_write = lov_io_prepare_write,
- .cio_commit_write = lov_io_commit_write
+ .cio_submit = lov_io_submit,
+ .cio_commit_async = lov_io_commit_async,
};
/*****************************************************************************
@@ -880,15 +863,8 @@ static const struct cl_io_operations lov_empty_io_ops = {
.cio_fini = lov_empty_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- },
- [CRT_WRITE] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- }
- },
- .cio_commit_write = LOV_EMPTY_IMPOSSIBLE
+ .cio_submit = LOV_EMPTY_IMPOSSIBLE,
+ .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
};
int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
@@ -943,7 +919,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
}
io->ci_result = result < 0 ? result : 0;
- return result != 0;
+ return result;
}
int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
@@ -986,7 +962,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
}
io->ci_result = result < 0 ? result : 0;
- return result != 0;
+ return result;
}
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index ae854bc25..1b203d18c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -46,11 +46,6 @@
* @{
*/
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent);
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice);
/*****************************************************************************
*
* Lov lock operations.
@@ -58,7 +53,7 @@ static int lov_lock_unuse(const struct lu_env *env,
*/
static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
- struct cl_lock *parent,
+ const struct cl_lock *parent,
struct lov_lock_sub *lls)
{
struct lov_sublock_env *subenv;
@@ -100,185 +95,26 @@ static void lov_sublock_env_put(struct lov_sublock_env *subenv)
lov_sub_put(subenv->lse_sub);
}
-static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock, int idx,
- struct lov_lock_link *link)
+static int lov_sublock_init(const struct lu_env *env,
+ const struct cl_lock *parent,
+ struct lov_lock_sub *lls)
{
- struct lovsub_lock *lsl;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
- int rc;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sublock));
-
- lsl = cl2sub_lock(sublock);
- /*
- * check that sub-lock doesn't have lock link to this top-lock.
- */
- LASSERT(!lov_lock_link_find(env, lck, lsl));
- LASSERT(idx < lck->lls_nr);
-
- lck->lls_sub[idx].sub_lock = lsl;
- lck->lls_nr_filled++;
- LASSERT(lck->lls_nr_filled <= lck->lls_nr);
- list_add_tail(&link->lll_list, &lsl->lss_parents);
- link->lll_idx = idx;
- link->lll_super = lck;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lov-child", sublock);
- lck->lls_sub[idx].sub_flags |= LSF_HELD;
- cl_lock_user_add(env, sublock);
-
- rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
- LASSERT(rc == 0); /* there is no way this can fail, currently */
-}
-
-static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
- const struct cl_io *io,
- struct lov_lock *lck,
- int idx, struct lov_lock_link **out)
-{
- struct cl_lock *sublock;
- struct cl_lock *parent;
- struct lov_lock_link *link;
-
- LASSERT(idx < lck->lls_nr);
-
- link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS);
- if (link) {
- struct lov_sublock_env *subenv;
- struct lov_lock_sub *lls;
- struct cl_lock_descr *descr;
-
- parent = lck->lls_cl.cls_lock;
- lls = &lck->lls_sub[idx];
- descr = &lls->sub_got;
-
- subenv = lov_sublock_env_get(env, parent, lls);
- if (!IS_ERR(subenv)) {
- /* CAVEAT: Don't try to add a field in lov_lock_sub
- * to remember the subio. This is because lock is able
- * to be cached, but this is not true for IO. This
- * further means a sublock might be referenced in
- * different io context. -jay
- */
-
- sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
- descr, "lov-parent", parent);
- lov_sublock_env_put(subenv);
- } else {
- /* error occurs. */
- sublock = (void *)subenv;
- }
-
- if (!IS_ERR(sublock))
- *out = link;
- else
- kmem_cache_free(lov_lock_link_kmem, link);
- } else
- sublock = ERR_PTR(-ENOMEM);
- return sublock;
-}
-
-static void lov_sublock_unlock(const struct lu_env *env,
- struct lovsub_lock *lsl,
- struct cl_lock_closure *closure,
- struct lov_sublock_env *subenv)
-{
- lov_sublock_env_put(subenv);
- lsl->lss_active = NULL;
- cl_lock_disclosure(env, closure);
-}
-
-static int lov_sublock_lock(const struct lu_env *env,
- struct lov_lock *lck,
- struct lov_lock_sub *lls,
- struct cl_lock_closure *closure,
- struct lov_sublock_env **lsep)
-{
- struct lovsub_lock *sublock;
- struct cl_lock *child;
- int result = 0;
-
- LASSERT(list_empty(&closure->clc_list));
-
- sublock = lls->sub_lock;
- child = sublock->lss_cl.cls_lock;
- result = cl_lock_closure_build(env, child, closure);
- if (result == 0) {
- struct cl_lock *parent = closure->clc_origin;
-
- LASSERT(cl_lock_is_mutexed(child));
- sublock->lss_active = parent;
-
- if (unlikely((child->cll_state == CLS_FREEING) ||
- (child->cll_flags & CLF_CANCELLED))) {
- struct lov_lock_link *link;
- /*
- * we could race with lock deletion which temporarily
- * put the lock in freeing state, bug 19080.
- */
- LASSERT(!(lls->sub_flags & LSF_HELD));
-
- link = lov_lock_link_find(env, lck, sublock);
- LASSERT(link);
- lov_lock_unlink(env, link, sublock);
- lov_sublock_unlock(env, sublock, closure, NULL);
- lck->lls_cancel_race = 1;
- result = CLO_REPEAT;
- } else if (lsep) {
- struct lov_sublock_env *subenv;
+ struct lov_sublock_env *subenv;
+ int result;
- subenv = lov_sublock_env_get(env, parent, lls);
- if (IS_ERR(subenv)) {
- lov_sublock_unlock(env, sublock,
- closure, NULL);
- result = PTR_ERR(subenv);
- } else {
- *lsep = subenv;
- }
- }
+ subenv = lov_sublock_env_get(env, parent, lls);
+ if (!IS_ERR(subenv)) {
+ result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
+ subenv->lse_io);
+ lov_sublock_env_put(subenv);
+ } else {
+ /* error occurs. */
+ result = PTR_ERR(subenv);
}
return result;
}
/**
- * Updates the result of a top-lock operation from a result of sub-lock
- * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
- * over sub-locks and lov_subresult() is used to calculate return value of a
- * top-operation. To this end, possible return values of sub-operations are
- * ordered as
- *
- * - 0 success
- * - CLO_WAIT wait for event
- * - CLO_REPEAT repeat top-operation
- * - -ne fundamental error
- *
- * Top-level return code can only go down through this list. CLO_REPEAT
- * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
- * has to be rechecked by the upper layer.
- */
-static int lov_subresult(int result, int rc)
-{
- int result_rank;
- int rc_rank;
-
- LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
- "result = %d\n", result);
- LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
- "rc = %d\n", rc);
- CLASSERT(CLO_WAIT < CLO_REPEAT);
-
- /* calculate ranks in the ordering above */
- result_rank = result < 0 ? 1 + CLO_REPEAT : result;
- rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
-
- if (result_rank < rc_rank)
- result = rc;
- return result;
-}
-
-/**
* Creates sub-locks for a given lov_lock for the first time.
*
* Goes through all sub-objects of top-object, and creates sub-locks on every
@@ -286,8 +122,9 @@ static int lov_subresult(int result, int rc)
* fact that top-lock (that is being created) can be accessed concurrently
* through already created sub-locks (possibly shared with other top-locks).
*/
-static int lov_lock_sub_init(const struct lu_env *env,
- struct lov_lock *lck, const struct cl_io *io)
+static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
+ const struct cl_object *obj,
+ struct cl_lock *lock)
{
int result = 0;
int i;
@@ -297,241 +134,86 @@ static int lov_lock_sub_init(const struct lu_env *env,
u64 file_start;
u64 file_end;
- struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
+ struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct cl_lock *parent = lck->lls_cl.cls_lock;
+ struct lov_lock *lovlck;
- lck->lls_orig = parent->cll_descr;
- file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
- file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
+ file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
+ file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
for (i = 0, nr = 0; i < r0->lo_nr; i++) {
/*
* XXX for wide striping smarter algorithm is desirable,
* breaking out of the loop, early.
*/
- if (likely(r0->lo_sub[i]) &&
+ if (likely(r0->lo_sub[i]) && /* spare layout */
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end))
nr++;
}
LASSERT(nr > 0);
- lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS);
- if (!lck->lls_sub)
- return -ENOMEM;
+ lovlck = libcfs_kvzalloc(offsetof(struct lov_lock, lls_sub[nr]),
+ GFP_NOFS);
+ if (!lovlck)
+ return ERR_PTR(-ENOMEM);
- lck->lls_nr = nr;
- /*
- * First, fill in sub-lock descriptions in
- * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
- * (called below in this function, and by lov_lock_enqueue()) to
- * create sub-locks. At this moment, no other thread can access
- * top-lock.
- */
+ lovlck->lls_nr = nr;
for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
if (likely(r0->lo_sub[i]) &&
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end)) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
struct cl_lock_descr *descr;
- descr = &lck->lls_sub[nr].sub_descr;
+ descr = &lls->sub_lock.cll_descr;
LASSERT(!descr->cld_obj);
descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
descr->cld_start = cl_index(descr->cld_obj, start);
descr->cld_end = cl_index(descr->cld_obj, end);
- descr->cld_mode = parent->cll_descr.cld_mode;
- descr->cld_gid = parent->cll_descr.cld_gid;
- descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
- /* XXX has no effect */
- lck->lls_sub[nr].sub_got = *descr;
- lck->lls_sub[nr].sub_stripe = i;
+ descr->cld_mode = lock->cll_descr.cld_mode;
+ descr->cld_gid = lock->cll_descr.cld_gid;
+ descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
+ lls->sub_stripe = i;
+
+ /* initialize sub lock */
+ result = lov_sublock_init(env, lock, lls);
+ if (result < 0)
+ break;
+
+ lls->sub_initialized = 1;
nr++;
}
}
- LASSERT(nr == lck->lls_nr);
-
- /*
- * Some sub-locks can be missing at this point. This is not a problem,
- * because enqueue will create them anyway. Main duty of this function
- * is to fill in sub-lock descriptions in a race free manner.
- */
- return result;
-}
+ LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
-static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
- int i, int deluser, int rc)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (lck->lls_sub[i].sub_flags & LSF_HELD) {
- struct cl_lock *sublock;
- int dying;
-
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
+ if (result != 0) {
+ for (i = 0; i < nr; ++i) {
+ if (!lovlck->lls_sub[i].sub_initialized)
+ break;
- lck->lls_sub[i].sub_flags &= ~LSF_HELD;
- if (deluser)
- cl_lock_user_del(env, sublock);
- /*
- * If the last hold is released, and cancellation is pending
- * for a sub-lock, release parent mutex, to avoid keeping it
- * while sub-lock is being paged out.
- */
- dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
- sublock->cll_descr.cld_mode == CLM_GROUP ||
- (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
- sublock->cll_holds == 1;
- if (dying)
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- if (dying) {
- cl_lock_mutex_get(env, parent);
- rc = lov_subresult(rc, CLO_REPEAT);
+ cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
}
- /*
- * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
- * not backed by a reference on a
- * sub-lock. lovsub_lock_delete() will clear
- * lck->lls_sub[i].sub_lock under semaphores, just before
- * sub-lock is destroyed.
- */
+ kvfree(lovlck);
+ lovlck = ERR_PTR(result);
}
- return rc;
-}
-
-static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
- int i)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
- struct cl_lock *sublock;
-
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
- LASSERT(sublock->cll_state != CLS_FREEING);
- lck->lls_sub[i].sub_flags |= LSF_HELD;
-
- cl_lock_get_trust(sublock);
- cl_lock_hold_add(env, sublock, "lov-parent", parent);
- cl_lock_user_add(env, sublock);
- cl_lock_put(env, sublock);
- }
+ return lovlck;
}
static void lov_lock_fini(const struct lu_env *env,
struct cl_lock_slice *slice)
{
- struct lov_lock *lck;
+ struct lov_lock *lovlck;
int i;
- lck = cl2lov_lock(slice);
- LASSERT(lck->lls_nr_filled == 0);
- if (lck->lls_sub) {
- for (i = 0; i < lck->lls_nr; ++i)
- /*
- * No sub-locks exists at this point, as sub-lock has
- * a reference on its parent.
- */
- LASSERT(!lck->lls_sub[i].sub_lock);
- kvfree(lck->lls_sub);
+ lovlck = cl2lov_lock(slice);
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
+ if (lovlck->lls_sub[i].sub_initialized)
+ cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
}
- kmem_cache_free(lov_lock_kmem, lck);
-}
-
-static int lov_lock_enqueue_wait(const struct lu_env *env,
- struct lov_lock *lck,
- struct cl_lock *sublock)
-{
- struct cl_lock *lock = lck->lls_cl.cls_lock;
- int result;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- cl_lock_mutex_put(env, lock);
- result = cl_lock_enqueue_wait(env, sublock, 0);
- cl_lock_mutex_get(env, lock);
- return result ?: CLO_REPEAT;
-}
-
-/**
- * Tries to advance a state machine of a given sub-lock toward enqueuing of
- * the top-lock.
- *
- * \retval 0 if state-transition can proceed
- * \retval -ve otherwise.
- */
-static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock,
- struct cl_io *io, __u32 enqflags, int last)
-{
- int result;
-
- /* first, try to enqueue a sub-lock ... */
- result = cl_enqueue_try(env, sublock, io, enqflags);
- if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
- /* if it is enqueued, try to `wait' on it---maybe it's already
- * granted
- */
- result = cl_wait_try(env, sublock);
- if (result == CLO_REENQUEUED)
- result = CLO_WAIT;
- }
- /*
- * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
- * parallel, otherwise---enqueue has to wait until sub-lock is granted
- * before proceeding to the next one.
- */
- if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
- (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
- result = 0;
- return result;
-}
-
-/**
- * Helper function for lov_lock_enqueue() that creates missing sub-lock.
- */
-static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
- struct cl_io *io, struct lov_lock *lck, int idx)
-{
- struct lov_lock_link *link = NULL;
- struct cl_lock *sublock;
- int result;
-
- LASSERT(parent->cll_depth == 1);
- cl_lock_mutex_put(env, parent);
- sublock = lov_sublock_alloc(env, io, lck, idx, &link);
- if (!IS_ERR(sublock))
- cl_lock_mutex_get(env, sublock);
- cl_lock_mutex_get(env, parent);
-
- if (!IS_ERR(sublock)) {
- cl_lock_get_trust(sublock);
- if (parent->cll_state == CLS_QUEUING &&
- !lck->lls_sub[idx].sub_lock) {
- lov_sublock_adopt(env, lck, sublock, idx, link);
- } else {
- kmem_cache_free(lov_lock_link_kmem, link);
- /* other thread allocated sub-lock, or enqueue is no
- * longer going on
- */
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- cl_lock_mutex_get(env, parent);
- }
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- result = CLO_REPEAT;
- } else
- result = PTR_ERR(sublock);
- return result;
+ kvfree(lovlck);
}
/**
@@ -543,529 +225,59 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
*/
static int lov_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags)
+ struct cl_io *io, struct cl_sync_io *anchor)
{
- struct cl_lock *lock = slice->cls_lock;
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, lock);
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lovlck = cl2lov_lock(slice);
int i;
- int result;
- enum cl_lock_state minstate;
+ int rc = 0;
- for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct lov_lock_sub *lls;
- struct cl_lock *sublock;
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[i];
struct lov_sublock_env *subenv;
- if (lock->cll_state != CLS_QUEUING) {
- /*
- * Lock might have left QUEUING state if previous
- * iteration released its mutex. Stop enqueing in this
- * case and let the upper layer to decide what to do.
- */
- LASSERT(i > 0 && result != 0);
- break;
- }
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- if (!sub) {
- result = lov_sublock_fill(env, lock, io, lck, i);
- /* lov_sublock_fill() released @lock mutex,
- * restart.
- */
+ subenv = lov_sublock_env_get(env, lock, lls);
+ if (IS_ERR(subenv)) {
+ rc = PTR_ERR(subenv);
break;
}
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- lov_sublock_hold(env, lck, i);
- rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
- subenv->lse_io, enqflags,
- i == lck->lls_nr - 1);
- minstate = min(minstate, sublock->cll_state);
- if (rc == CLO_WAIT) {
- switch (sublock->cll_state) {
- case CLS_QUEUING:
- /* take recursive mutex, the lock is
- * released in lov_lock_enqueue_wait.
- */
- cl_lock_mutex_get(env, sublock);
- lov_sublock_unlock(env, sub, closure,
- subenv);
- rc = lov_lock_enqueue_wait(env, lck,
- sublock);
- break;
- case CLS_CACHED:
- cl_lock_get(sublock);
- /* take recursive mutex of sublock */
- cl_lock_mutex_get(env, sublock);
- /* need to release all locks in closure
- * otherwise it may deadlock. LU-2683.
- */
- lov_sublock_unlock(env, sub, closure,
- subenv);
- /* sublock and parent are held. */
- rc = lov_sublock_release(env, lck, i,
- 1, rc);
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- break;
- default:
- lov_sublock_unlock(env, sub, closure,
- subenv);
- break;
- }
- } else {
- LASSERT(!sublock->cll_conflict);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- }
- result = lov_subresult(result, rc);
- if (result != 0)
+ rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
+ &lls->sub_lock, anchor);
+ lov_sublock_env_put(subenv);
+ if (rc != 0)
break;
- }
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int i;
- int result;
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion.
- */
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (lls->sub_flags & LSF_HELD) {
- LASSERT(sublock->cll_state == CLS_HELD ||
- sublock->cll_state == CLS_ENQUEUED);
- rc = cl_unuse_try(subenv->lse_env, sublock);
- rc = lov_sublock_release(env, lck, i, 0, rc);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
+ lls->sub_is_enqueued = 1;
}
-
- if (result == 0 && lck->lls_cancel_race) {
- lck->lls_cancel_race = 0;
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
+ return rc;
}
static void lov_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lovlck = cl2lov_lock(slice);
int i;
- int result;
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[i];
+ struct cl_lock *sublock = &lls->sub_lock;
struct lov_sublock_env *subenv;
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion.
- */
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (!(lls->sub_flags & LSF_HELD)) {
- lov_sublock_unlock(env, sub, closure, subenv);
- continue;
- }
-
- switch (sublock->cll_state) {
- case CLS_HELD:
- rc = cl_unuse_try(subenv->lse_env, sublock);
- lov_sublock_release(env, lck, i, 0, 0);
- break;
- default:
- lov_sublock_release(env, lck, i, 1, 0);
- break;
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
-
- if (rc == CLO_REPEAT) {
- --i;
- continue;
- }
-
- result = lov_subresult(result, rc);
- }
-
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
- "lov_lock_cancel fails with %d.\n", result);
-
- cl_lock_closure_fini(closure);
-}
-
-static int lov_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- enum cl_lock_state minstate;
- int reenqueued;
- int result;
- int i;
-
-again:
- for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
- i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state >= CLS_ENQUEUED);
- if (sublock->cll_state < CLS_HELD)
- rc = cl_wait_try(env, sublock);
-
- minstate = min(minstate, sublock->cll_state);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- if (rc == CLO_REENQUEUED) {
- reenqueued++;
- rc = 0;
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
- /* Each sublock only can be reenqueued once, so will not loop
- * forever.
- */
- if (result == 0 && reenqueued != 0)
- goto again;
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int result;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub) {
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- result = -ESTALE;
- break;
- }
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state != CLS_FREEING);
- lov_sublock_hold(env, lck, i);
- if (sublock->cll_state == CLS_CACHED) {
- rc = cl_use_try(subenv->lse_env, sublock, 0);
- if (rc != 0)
- rc = lov_sublock_release(env, lck,
- i, 1, rc);
- } else if (sublock->cll_state == CLS_NEW) {
- /* Sub-lock might have been canceled, while
- * top-lock was cached.
- */
- result = -ESTALE;
- lov_sublock_release(env, lck, i, 1, result);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
-
- if (lck->lls_cancel_race) {
- /*
- * If there is unlocking happened at the same time, then
- * sublock_lock state should be FREEING, and lov_sublock_lock
- * should return CLO_REPEAT. In this case, it should return
- * ESTALE, and up layer should reset the lock state to be NEW.
- */
- lck->lls_cancel_race = 0;
- LASSERT(result != 0);
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
-}
-
-/**
- * Check if the extent region \a descr is covered by \a child against the
- * specific \a stripe.
- */
-static int lov_lock_stripe_is_matching(const struct lu_env *env,
- struct lov_object *lov, int stripe,
- const struct cl_lock_descr *child,
- const struct cl_lock_descr *descr)
-{
- struct lov_stripe_md *lsm = lov->lo_lsm;
- u64 start;
- u64 end;
- int result;
-
- if (lov_r0(lov)->lo_nr == 1)
- return cl_lock_ext_match(child, descr);
-
- /*
- * For a multi-stripes object:
- * - make sure the descr only covers child's stripe, and
- * - check if extent is matching.
- */
- start = cl_offset(&lov->lo_cl, descr->cld_start);
- end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
- result = 0;
- /* glimpse should work on the object with LOV EA hole. */
- if (end - start <= lsm->lsm_stripe_size) {
- int idx;
-
- idx = lov_stripe_number(lsm, start);
- if (idx == stripe ||
- unlikely(!lov_r0(lov)->lo_sub[idx])) {
- idx = lov_stripe_number(lsm, end);
- if (idx == stripe ||
- unlikely(!lov_r0(lov)->lo_sub[idx]))
- result = 1;
- }
- }
-
- if (result != 0) {
- struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
- u64 sub_start;
- u64 sub_end;
-
- subd->cld_obj = NULL; /* don't need sub object at all */
- subd->cld_mode = descr->cld_mode;
- subd->cld_gid = descr->cld_gid;
- result = lov_stripe_intersects(lsm, stripe, start, end,
- &sub_start, &sub_end);
- LASSERT(result);
- subd->cld_start = cl_index(child->cld_obj, sub_start);
- subd->cld_end = cl_index(child->cld_obj, sub_end);
- result = cl_lock_ext_match(child, subd);
- }
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_fits_into() method.
- *
- * Checks whether a lock (given by \a slice) is suitable for \a
- * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
- * O_APPEND write.
- *
- * \see ccc_lock_fits_into().
- */
-static int lov_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct lov_lock *lov = cl2lov_lock(slice);
- struct lov_object *obj = cl2lov(slice->cls_obj);
- int result;
-
- LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
- LASSERT(lov->lls_nr > 0);
-
- /* for top lock, it's necessary to match enq flags otherwise it will
- * run into problem if a sublock is missing and reenqueue.
- */
- if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
- return 0;
-
- if (need->cld_mode == CLM_GROUP)
- /*
- * always allow to match group lock.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- else if (lov->lls_nr == 1) {
- struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
-
- result = lov_lock_stripe_is_matching(env,
- cl2lov(slice->cls_obj),
- lov->lls_sub[0].sub_stripe,
- got, need);
- } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
- !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
- /*
- * Multi-stripe locks are only suitable for `quick' IO and for
- * glimpse.
- */
- result = 0;
- else
- /*
- * Most general case: multi-stripe existing lock, and
- * (potentially) multi-stripe @need lock. Check that @need is
- * covered by @lov's sub-locks.
- *
- * For now, ignore lock expansions made by the server, and
- * match against original lock extent.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
- PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
- lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
- result);
- return result;
-}
-
-void lov_lock_unlink(const struct lu_env *env,
- struct lov_lock_link *link, struct lovsub_lock *sub)
-{
- struct lov_lock *lck = link->lll_super;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_del_init(&link->lll_list);
- LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
- /* yank this sub-lock from parent's array */
- lck->lls_sub[link->lll_idx].sub_lock = NULL;
- LASSERT(lck->lls_nr_filled > 0);
- lck->lls_nr_filled--;
- lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
- cl_lock_put(env, parent);
- kmem_cache_free(lov_lock_link_kmem, link);
-}
-
-struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
- struct lov_lock *lck,
- struct lovsub_lock *sub)
-{
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- if (scan->lll_super == lck)
- return scan;
- }
- return NULL;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked for "top-to-bottom" delete, when lock destruction starts from the
- * top-lock, e.g., as a result of inode destruction.
- *
- * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
- * this is done separately elsewhere:
- *
- * - for inode destruction, lov_object_delete() calls cl_object_kill() for
- * each sub-object, purging its locks;
- *
- * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
- * left in the cache.
- */
-static void lov_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- struct lov_lock_link *link;
- int rc;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
-
- for (i = 0; i < lck->lls_nr; ++i) {
- struct lov_lock_sub *lls = &lck->lls_sub[i];
- struct lovsub_lock *lsl = lls->sub_lock;
-
- if (!lsl) /* already removed */
+ if (!lls->sub_is_enqueued)
continue;
- rc = lov_sublock_lock(env, lck, lls, closure, NULL);
- if (rc == CLO_REPEAT) {
- --i;
- continue;
+ lls->sub_is_enqueued = 0;
+ subenv = lov_sublock_env_get(env, lock, lls);
+ if (!IS_ERR(subenv)) {
+ cl_lock_cancel(subenv->lse_env, sublock);
+ lov_sublock_env_put(subenv);
+ } else {
+ CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
+ "lov_lock_cancel fails with %ld.\n",
+ PTR_ERR(subenv));
}
-
- LASSERT(rc == 0);
- LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
-
- if (lls->sub_flags & LSF_HELD)
- lov_sublock_release(env, lck, i, 1, 0);
-
- link = lov_lock_link_find(env, lck, lsl);
- LASSERT(link);
- lov_lock_unlink(env, link, lsl);
- LASSERT(!lck->lls_sub[i].sub_lock);
-
- lov_sublock_unlock(env, lsl, closure, NULL);
}
-
- cl_lock_closure_fini(closure);
}
static int lov_lock_print(const struct lu_env *env, void *cookie,
@@ -1079,12 +291,8 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
struct lov_lock_sub *sub;
sub = &lck->lls_sub[i];
- (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
- if (sub->sub_lock)
- cl_lock_print(env, cookie, p,
- sub->sub_lock->lss_cl.cls_lock);
- else
- (*p)(env, cookie, "---\n");
+ (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
+ cl_lock_print(env, cookie, p, &sub->sub_lock);
}
return 0;
}
@@ -1092,12 +300,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
static const struct cl_lock_operations lov_lock_ops = {
.clo_fini = lov_lock_fini,
.clo_enqueue = lov_lock_enqueue,
- .clo_wait = lov_lock_wait,
- .clo_use = lov_lock_use,
- .clo_unuse = lov_lock_unuse,
.clo_cancel = lov_lock_cancel,
- .clo_fits_into = lov_lock_fits_into,
- .clo_delete = lov_lock_delete,
.clo_print = lov_lock_print
};
@@ -1105,14 +308,13 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io)
{
struct lov_lock *lck;
- int result;
+ int result = 0;
- lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
- if (lck) {
+ lck = lov_lock_sub_init(env, obj, lock);
+ if (!IS_ERR(lck))
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
- result = lov_lock_sub_init(env, lck, io);
- } else
- result = -ENOMEM;
+ else
+ result = PTR_ERR(lck);
return result;
}
@@ -1147,21 +349,9 @@ int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
- lck->lls_orig = lock->cll_descr;
result = 0;
}
return result;
}
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent)
-{
- struct cl_lock_closure *closure;
-
- closure = &lov_env_info(env)->lti_closure;
- LASSERT(list_empty(&closure->clc_list));
- cl_lock_closure_init(env, closure, parent, 1);
- return closure;
-}
-
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 029cd4d62..56ef41d17 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -154,6 +154,7 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
valid &= src->o_valid;
if (*set) {
+ tgt->o_valid &= valid;
if (valid & OBD_MD_FLSIZE) {
/* this handles sparse files properly */
u64 lov_size;
@@ -172,12 +173,22 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
tgt->o_mtime = src->o_mtime;
if (valid & OBD_MD_FLDATAVERSION)
tgt->o_data_version += src->o_data_version;
+
+ /* handle flags */
+ if (valid & OBD_MD_FLFLAGS)
+ tgt->o_flags &= src->o_flags;
+ else
+ tgt->o_flags = 0;
} else {
memcpy(tgt, src, sizeof(*tgt));
tgt->o_oi = lsm->lsm_oi;
+ tgt->o_valid = valid;
if (valid & OBD_MD_FLSIZE)
tgt->o_size = lov_stripe_size(lsm, src->o_size,
stripeno);
+ tgt->o_flags = 0;
+ if (valid & OBD_MD_FLFLAGS)
+ tgt->o_flags = src->o_flags;
}
/* data_version needs to be valid on all stripes to be correct! */
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 5daa7faf4..e15ef2ece 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -54,7 +54,6 @@
#include "../include/lprocfs_status.h"
#include "../include/lustre_param.h"
#include "../include/cl_object.h"
-#include "../include/lclient.h" /* for cl_client_lru */
#include "../include/lustre/ll_fiemap.h"
#include "../include/lustre_fid.h"
@@ -124,7 +123,6 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
static int lov_notify(struct obd_device *obd, struct obd_device *watched,
enum obd_notify_event ev, void *data);
-#define MAX_STRING_SIZE 128
int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
struct obd_connect_data *data)
{
@@ -965,7 +963,6 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
rc = -EINVAL;
goto out;
-
}
}
out:
@@ -1734,6 +1731,27 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
if (!lsm_has_objects(lsm)) {
+ if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
+ fm_key->oa.o_size)) {
+ /*
+ * released file, return a minimal FIEMAP if
+ * request fits in file-size.
+ */
+ fiemap->fm_mapped_extents = 1;
+ fiemap->fm_extents[0].fe_logical =
+ fm_key->fiemap.fm_start;
+ if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
+ fm_key->oa.o_size) {
+ fiemap->fm_extents[0].fe_length =
+ fm_key->fiemap.fm_length;
+ } else {
+ fiemap->fm_extents[0].fe_length =
+ fm_key->oa.o_size - fm_key->fiemap.fm_start;
+ fiemap->fm_extents[0].fe_flags |=
+ (FIEMAP_EXTENT_UNKNOWN |
+ FIEMAP_EXTENT_LAST);
+ }
+ }
rc = 0;
goto out;
}
@@ -2173,7 +2191,6 @@ void lov_stripe_lock(struct lov_stripe_md *md)
LASSERT(md->lsm_lock_owner == 0);
md->lsm_lock_owner = current_pid();
}
-EXPORT_SYMBOL(lov_stripe_lock);
void lov_stripe_unlock(struct lov_stripe_md *md)
__releases(&md->lsm_lock)
@@ -2182,7 +2199,6 @@ void lov_stripe_unlock(struct lov_stripe_md *md)
md->lsm_lock_owner = 0;
spin_unlock(&md->lsm_lock);
}
-EXPORT_SYMBOL(lov_stripe_unlock);
static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
struct obd_quotactl *oqctl)
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 1f8ed95a6..561d493b2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -67,7 +67,7 @@ struct lov_layout_operations {
int (*llo_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int (*llo_lock_init)(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
@@ -193,6 +193,18 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
return result;
}
+static int lov_page_slice_fixup(struct lov_object *lov,
+ struct cl_object *stripe)
+{
+ struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
+ struct cl_object *o;
+
+ cl_object_for_each(o, stripe)
+ o->co_slice_off += hdr->coh_page_bufsize;
+
+ return cl_object_header(stripe)->coh_page_bufsize;
+}
+
static int lov_init_raid0(const struct lu_env *env,
struct lov_device *dev, struct lov_object *lov,
const struct cl_object_conf *conf,
@@ -222,6 +234,8 @@ static int lov_init_raid0(const struct lu_env *env,
r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
GFP_NOFS);
if (r0->lo_sub) {
+ int psz = 0;
+
result = 0;
subconf->coc_inode = conf->coc_inode;
spin_lock_init(&r0->lo_sub_lock);
@@ -254,13 +268,24 @@ static int lov_init_raid0(const struct lu_env *env,
if (result == -EAGAIN) { /* try again */
--i;
result = 0;
+ continue;
}
} else {
result = PTR_ERR(stripe);
}
+
+ if (result == 0) {
+ int sz = lov_page_slice_fixup(lov, stripe);
+
+ LASSERT(ergo(psz > 0, psz == sz));
+ psz = sz;
+ }
}
- } else
+ if (result == 0)
+ cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
+ } else {
result = -ENOMEM;
+ }
out:
return result;
}
@@ -286,8 +311,6 @@ static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
lov_layout_wait(env, lov);
-
- cl_object_prune(env, &lov->lo_cl);
return 0;
}
@@ -355,7 +378,7 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
struct lovsub_object *los = r0->lo_sub[i];
if (los) {
- cl_locks_prune(env, &los->lso_cl, 1);
+ cl_object_prune(env, &los->lso_cl);
/*
* If top-level object is to be evicted from
* the cache, so are its sub-objects.
@@ -364,7 +387,6 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
}
}
}
- cl_object_prune(env, &lov->lo_cl);
return 0;
}
@@ -666,7 +688,6 @@ static int lov_layout_change(const struct lu_env *unused,
const struct lov_layout_operations *old_ops;
const struct lov_layout_operations *new_ops;
- struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
void *cookie;
struct lu_env *env;
int refcheck;
@@ -691,13 +712,15 @@ static int lov_layout_change(const struct lu_env *unused,
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
+ result = cl_object_prune(env, &lov->lo_cl);
+ if (result != 0)
+ goto out;
+
result = old_ops->llo_delete(env, lov, &lov->u);
if (result == 0) {
old_ops->llo_fini(env, lov, &lov->u);
LASSERT(atomic_read(&lov->lo_active_ios) == 0);
- LASSERT(!hdr->coh_tree.rnode);
- LASSERT(hdr->coh_pages == 0);
lov->lo_type = LLT_EMPTY;
result = new_ops->llo_init(env,
@@ -713,6 +736,7 @@ static int lov_layout_change(const struct lu_env *unused,
}
}
+out:
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
return result;
@@ -793,7 +817,8 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
goto out;
}
- lov->lo_layout_invalid = lov_layout_change(env, lov, conf);
+ result = lov_layout_change(env, lov, conf);
+ lov->lo_layout_invalid = result != 0;
out:
lov_conf_unlock(lov);
@@ -825,10 +850,10 @@ static int lov_object_print(const struct lu_env *env, void *cookie,
}
int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
- llo_page_init, env, obj, page, vmpage);
+ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
+ index);
}
/**
@@ -911,8 +936,9 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
* for object with different layouts.
*/
obj->lo_ops = &lov_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index ae83eb0f6..9302f06c3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -66,6 +66,18 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno)
return lov_size;
}
+/**
+ * Compute file level page index by stripe level page offset
+ */
+pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
+ int stripe)
+{
+ loff_t offset;
+
+ offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
+ return offset >> PAGE_SHIFT;
+}
+
/* we have an offset in file backed by an lov and want to find out where
* that offset lands in our given stripe of the file. for the easy
* case where the offset is within the stripe, we just have to scale the
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 3925633a9..0215ea54d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -136,7 +136,6 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
return -EINVAL;
-
}
if (lsm) {
@@ -444,8 +443,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
- memmove((char *)(&lmmk->lmm_stripe_count) +
- sizeof(lmmk->lmm_stripe_count),
+ memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
lmmk->lmm_stripe_count *
sizeof(struct lov_ost_data_v1));
@@ -457,9 +455,9 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
}
/* User wasn't expecting this many OST entries */
- if (lum.lmm_stripe_count == 0)
+ if (lum.lmm_stripe_count == 0) {
lmm_size = lum_size;
- else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
+ } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
rc = -EOVERFLOW;
goto out_set;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index fdcaf8047..0306f00c3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -36,6 +36,7 @@
* Implementation of cl_page for LOV layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_LOV
@@ -52,116 +53,66 @@
*
*/
-static int lov_page_invariant(const struct cl_page_slice *slice)
+/**
+ * Adjust the stripe index by layout of raid0. @max_index is the maximum
+ * page index covered by an underlying DLM lock.
+ * This function converts max_index from stripe level to file level, and make
+ * sure it's not beyond one stripe.
+ */
+static int lov_raid0_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused,
+ pgoff_t *max_index)
{
- const struct cl_page *page = slice->cpl_page;
- const struct cl_page *sub = lov_sub_page(slice);
-
- return ergo(sub,
- page->cp_child == sub &&
- sub->cp_parent == page &&
- page->cp_state == sub->cp_state);
-}
+ struct lov_object *loo = cl2lov(slice->cpl_obj);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ pgoff_t index = *max_index;
+ unsigned int pps; /* pages per stripe */
-static void lov_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct cl_page *sub = lov_sub_page(slice);
+ CDEBUG(D_READA, "*max_index = %lu, nr = %d\n", index, r0->lo_nr);
+ if (index == 0) /* the page is not covered by any lock */
+ return 0;
- LINVRNT(lov_page_invariant(slice));
+ if (r0->lo_nr == 1) /* single stripe file */
+ return 0;
- if (sub) {
- LASSERT(sub->cp_state == CPS_FREEING);
- lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
- sub->cp_parent = NULL;
- slice->cpl_page->cp_child = NULL;
- cl_page_put(env, sub);
+ /* max_index is stripe level, convert it into file level */
+ if (index != CL_PAGE_EOF) {
+ int stripeno = lov_page_stripe(slice->cpl_page);
+ *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
}
-}
-
-static int lov_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
+ /* calculate the end of current stripe */
+ pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
+ index = ((slice->cpl_index + pps) & ~(pps - 1)) - 1;
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- lov_sub_page(slice)->cp_owner = sub->sub_io;
- lov_sub_put(sub);
- } else
- LBUG(); /* Arrgh */
+ /* never exceed the end of the stripe */
+ *max_index = min_t(pgoff_t, *max_index, index);
return 0;
}
-static void lov_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- lov_page_own(env, slice, io, 0);
-}
-
-static int lov_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
- int rc = 0;
-
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
-
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
- slice->cpl_page->cp_child, CRT_WRITE);
- lov_sub_put(sub);
- } else {
- rc = PTR_ERR(sub);
- CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
- }
- return rc;
-}
-
-static int lov_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
+static int lov_raid0_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
{
struct lov_page *lp = cl2lov_page(slice);
- return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp);
+ return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, raid0\n", lp);
}
-static const struct cl_page_operations lov_page_ops = {
- .cpo_fini = lov_page_fini,
- .cpo_own = lov_page_own,
- .cpo_assume = lov_page_assume,
- .io = {
- [CRT_WRITE] = {
- .cpo_cache_add = lov_page_cache_add
- }
- },
- .cpo_print = lov_page_print
+static const struct cl_page_operations lov_raid0_page_ops = {
+ .cpo_is_under_lock = lov_raid0_page_is_under_lock,
+ .cpo_print = lov_raid0_page_print
};
-static void lov_empty_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- LASSERT(!slice->cpl_page->cp_child);
-}
-
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
struct lov_io *lio = lov_env_io(env);
- struct cl_page *subpage;
struct cl_object *subobj;
+ struct cl_object *o;
struct lov_io_sub *sub;
struct lov_page *lpg = cl_object_page_slice(obj, page);
loff_t offset;
@@ -169,59 +120,57 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
int stripe;
int rc;
- offset = cl_offset(obj, page->cp_index);
+ offset = cl_offset(obj, index);
stripe = lov_stripe_number(loo->lo_lsm, offset);
LASSERT(stripe < r0->lo_nr);
rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
LASSERT(rc == 0);
- lpg->lps_invalid = 1;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);
sub = lov_sub_get(env, lio, stripe);
- if (IS_ERR(sub)) {
- rc = PTR_ERR(sub);
- goto out;
- }
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
subobj = lovsub2cl(r0->lo_sub[stripe]);
- subpage = cl_page_find_sub(sub->sub_env, subobj,
- cl_index(subobj, suboff), vmpage, page);
- lov_sub_put(sub);
- if (IS_ERR(subpage)) {
- rc = PTR_ERR(subpage);
- goto out;
- }
-
- if (likely(subpage->cp_parent == page)) {
- lu_ref_add(&subpage->cp_reference, "lov", page);
- lpg->lps_invalid = 0;
- rc = 0;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
- CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
- LASSERT(0);
+ list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
+ co_lu.lo_linkage) {
+ if (o->co_ops->coo_page_init) {
+ rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
+ cl_index(subobj, suboff));
+ if (rc != 0)
+ break;
+ }
}
+ lov_sub_put(sub);
-out:
return rc;
}
+static int lov_empty_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
+{
+ struct lov_page *lp = cl2lov_page(slice);
+
+ return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, empty.\n",
+ lp);
+}
+
static const struct cl_page_operations lov_empty_page_ops = {
- .cpo_fini = lov_empty_page_fini,
- .cpo_print = lov_page_print
+ .cpo_print = lov_empty_page_print
};
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct lov_page *lpg = cl_object_page_slice(obj, page);
void *addr;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
- addr = kmap(vmpage);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
+ addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));
- kunmap(vmpage);
+ kunmap(page->cp_vmpage);
cl_page_export(env, page, 1);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 9ae1d6f42..690292ece 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -65,7 +65,6 @@ void lov_pool_putref(struct pool_desc *pool)
LASSERT(hlist_unhashed(&pool->pool_hash));
LASSERT(list_empty(&pool->pool_list));
LASSERT(!pool->pool_debugfs_entry);
- lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
lov_ost_pool_free(&(pool->pool_obds));
kfree(pool);
}
@@ -424,11 +423,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
if (rc)
goto out_err;
- memset(&(new_pool->pool_rr), 0, sizeof(struct lov_qos_rr));
- rc = lov_ost_pool_init(&new_pool->pool_rr.lqr_pool, 0);
- if (rc)
- goto out_free_pool_obds;
-
INIT_HLIST_NODE(&new_pool->pool_hash);
/* get ref for debugfs file */
@@ -469,13 +463,10 @@ out_err:
list_del_init(&new_pool->pool_list);
lov->lov_pool_count--;
spin_unlock(&obd->obd_dev_lock);
-
ldebugfs_remove(&new_pool->pool_debugfs_entry);
-
- lov_ost_pool_free(&new_pool->pool_rr.lqr_pool);
-out_free_pool_obds:
lov_ost_pool_free(&new_pool->pool_obds);
kfree(new_pool);
+
return rc;
}
@@ -543,8 +534,6 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
if (rc)
goto out;
- pool->pool_rr.lqr_dirty = 1;
-
CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
ostname, poolname, pool_tgt_count(pool));
@@ -589,8 +578,6 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
lov_ost_pool_remove(&pool->pool_obds, lov_idx);
- pool->pool_rr.lqr_dirty = 1;
-
CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
poolname);
@@ -599,50 +586,3 @@ out:
lov_pool_putref(pool);
return rc;
}
-
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
-{
- int i, rc;
-
- /* caller may no have a ref on pool if it got the pool
- * without calling lov_find_pool() (e.g. go through the lov pool
- * list)
- */
- lov_pool_getref(pool);
-
- down_read(&pool_tgt_rw_sem(pool));
-
- for (i = 0; i < pool_tgt_count(pool); i++) {
- if (pool_tgt_array(pool)[i] == idx) {
- rc = 0;
- goto out;
- }
- }
- rc = -ENOENT;
-out:
- up_read(&pool_tgt_rw_sem(pool));
-
- lov_pool_putref(pool);
- return rc;
-}
-
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname)
-{
- struct pool_desc *pool;
-
- pool = NULL;
- if (poolname[0] != '\0') {
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (!pool)
- CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
- poolname);
- if (pool && (pool_tgt_count(pool) == 0)) {
- CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
- poolname);
- /* pool is ignored, so we remove ref on it */
- lov_pool_putref(pool);
- pool = NULL;
- }
- }
- return pool;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 7178a02d6..1be4b921c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -52,7 +52,6 @@ static void lov_init_set(struct lov_request_set *set)
INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
- spin_lock_init(&set->set_lock);
}
void lov_finish_set(struct lov_request_set *set)
@@ -235,7 +234,6 @@ out:
if (tmp_oa)
kmem_cache_free(obdo_cachep, tmp_oa);
return rc;
-
}
int lov_fini_getattr_set(struct lov_request_set *set)
@@ -363,7 +361,6 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
set->set_oi = oinfo;
set->set_oi->oi_md = lsm;
set->set_oi->oi_oa = src_oa;
- set->set_oti = oti;
if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
@@ -480,7 +477,6 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
lov_init_set(set);
set->set_exp = exp;
- set->set_oti = oti;
set->set_oi = oinfo;
if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
@@ -716,12 +712,15 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
struct lov_request *req;
if (!lov->lov_tgts[i] ||
- (!lov_check_and_wait_active(lov, i) &&
- (oinfo->oi_flags & OBD_STATFS_NODELAY))) {
+ (oinfo->oi_flags & OBD_STATFS_NODELAY &&
+ !lov->lov_tgts[i]->ltd_active)) {
CDEBUG(D_HA, "lov idx %d inactive\n", i);
continue;
}
+ if (!lov->lov_tgts[i]->ltd_active)
+ lov_check_and_wait_active(lov, i);
+
/* skip targets that have been explicitly disabled by the
* administrator
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index c335c020f..35f6b1d66 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -151,8 +151,9 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
if (lsr) {
cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -182,10 +183,12 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
d = lovsub2lu_dev(lsd);
d->ld_ops = &lovsub_lu_ops;
lsd->acid_cl.cd_ops = &lovsub_cl_ops;
- } else
+ } else {
d = ERR_PTR(result);
- } else
+ }
+ } else {
d = ERR_PTR(-ENOMEM);
+ }
return d;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 3bb0c9068..e92edfb61 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -62,391 +62,8 @@ static void lovsub_lock_fini(const struct lu_env *env,
kmem_cache_free(lovsub_lock_kmem, lsl);
}
-static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_mutex_get(env, parent);
-}
-
-static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
- lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_put(env, parent);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for lovsub layer, which
- * method is called whenever sub-lock state changes. Propagates state change
- * to the top-locks.
- */
-static void lovsub_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- struct lov_lock *lov = scan->lll_super;
- struct cl_lock *parent = lov->lls_cl.cls_lock;
-
- if (sub->lss_active != parent) {
- lovsub_parent_lock(env, lov);
- cl_lock_signal(env, parent);
- lovsub_parent_unlock(env, lov);
- }
- }
-}
-
-/**
- * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
- * asking parent lock.
- */
-static unsigned long lovsub_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- unsigned long dumbbell;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- if (!list_empty(&lock->lss_parents)) {
- /*
- * It is not clear whether all parents have to be asked and
- * their estimations summed, or it is enough to ask one. For
- * the current usages, one is always enough.
- */
- lov = container_of(lock->lss_parents.next,
- struct lov_lock_link, lll_list)->lll_super;
-
- lovsub_parent_lock(env, lov);
- dumbbell = cl_lock_weigh(env, lov->lls_cl.cls_lock);
- lovsub_parent_unlock(env, lov);
- } else
- dumbbell = 0;
-
- return dumbbell;
-}
-
-/**
- * Maps start/end offsets within a stripe, to offsets within a file.
- */
-static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
- struct lov_object *lov,
- int stripe, struct cl_lock_descr *out)
-{
- pgoff_t size; /* stripe size in pages */
- pgoff_t skip; /* how many pages in every stripe are occupied by
- * "other" stripes
- */
- pgoff_t start;
- pgoff_t end;
-
- start = in->cld_start;
- end = in->cld_end;
-
- if (lov->lo_lsm->lsm_stripe_count > 1) {
- size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size);
- skip = (lov->lo_lsm->lsm_stripe_count - 1) * size;
-
- /* XXX overflow check here? */
- start += start/size * skip + stripe * size;
-
- if (end != CL_PAGE_EOF) {
- end += end/size * skip + stripe * size;
- /*
- * And check for overflow...
- */
- if (end < in->cld_end)
- end = CL_PAGE_EOF;
- }
- }
- out->cld_start = start;
- out->cld_end = end;
-}
-
-/**
- * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
- * called in two ways:
- *
- * - as part of receive call-back, when server returns granted extent to
- * the client, and
- *
- * - when top-lock finds existing sub-lock in the cache.
- *
- * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
- * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
- */
-int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
- struct lovsub_lock *sublock,
- const struct cl_lock_descr *d, int idx)
-{
- struct cl_lock *parent;
- struct lovsub_object *subobj;
- struct cl_lock_descr *pd;
- struct cl_lock_descr *parent_descr;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- parent_descr = &parent->cll_descr;
- LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
-
- subobj = cl2lovsub(sublock->lss_cl.cls_obj);
- pd = &lov_env_info(env)->lti_ldescr;
-
- pd->cld_obj = parent_descr->cld_obj;
- pd->cld_mode = parent_descr->cld_mode;
- pd->cld_gid = parent_descr->cld_gid;
- lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd);
- lov->lls_sub[idx].sub_got = *d;
- /*
- * Notify top-lock about modification, if lock description changes
- * materially.
- */
- if (!cl_lock_ext_match(parent_descr, pd))
- result = cl_lock_modify(env, parent, pd);
- else
- result = 0;
- return result;
-}
-
-static int lovsub_lock_modify(const struct lu_env *env,
- const struct cl_lock_slice *s,
- const struct cl_lock_descr *d)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(s);
- struct lov_lock_link *scan;
- struct lov_lock *lov;
- int result = 0;
-
- LASSERT(cl_lock_mode_match(d->cld_mode,
- s->cls_lock->cll_descr.cld_mode));
- list_for_each_entry(scan, &lock->lss_parents, lll_list) {
- int rc;
-
- lov = scan->lll_super;
- lovsub_parent_lock(env, lov);
- rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx);
- lovsub_parent_unlock(env, lov);
- result = result ?: rc;
- }
- return result;
-}
-
-static int lovsub_lock_closure(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure)
-{
- struct lovsub_lock *sub;
- struct cl_lock *parent;
- struct lov_lock_link *scan;
- int result;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- sub = cl2lovsub_lock(slice);
- result = 0;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- parent = scan->lll_super->lls_cl.cls_lock;
- result = cl_lock_closure_build(env, parent, closure);
- if (result != 0)
- break;
- }
- return result;
-}
-
-/**
- * A helper function for lovsub_lock_delete() that deals with a given parent
- * top-lock.
- */
-static int lovsub_lock_delete_one(const struct lu_env *env,
- struct cl_lock *child, struct lov_lock *lov)
-{
- struct cl_lock *parent;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- if (parent->cll_error)
- return 0;
-
- result = 0;
- switch (parent->cll_state) {
- case CLS_ENQUEUED:
- /* See LU-1355 for the case that a glimpse lock is
- * interrupted by signal
- */
- LASSERT(parent->cll_flags & CLF_CANCELLED);
- break;
- case CLS_QUEUING:
- case CLS_FREEING:
- cl_lock_signal(env, parent);
- break;
- case CLS_INTRANSIT:
- /*
- * Here lies a problem: a sub-lock is canceled while top-lock
- * is being unlocked. Top-lock cannot be moved into CLS_NEW
- * state, because unlocking has to succeed eventually by
- * placing lock into CLS_CACHED (or failing it), see
- * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED
- * state, because lov maintains an invariant that all
- * sub-locks exist in CLS_CACHED (this allows cached top-lock
- * to be reused immediately). Nor can we wait for top-lock
- * state to change, because this can be synchronous to the
- * current thread.
- *
- * We know for sure that lov_lock_unuse() will be called at
- * least one more time to finish un-using, so leave a mark on
- * the top-lock, that will be seen by the next call to
- * lov_lock_unuse().
- */
- if (cl_lock_is_intransit(parent))
- lov->lls_cancel_race = 1;
- break;
- case CLS_CACHED:
- /*
- * if a sub-lock is canceled move its top-lock into CLS_NEW
- * state to preserve an invariant that a top-lock in
- * CLS_CACHED is immediately ready for re-use (i.e., has all
- * sub-locks), and so that next attempt to re-use the top-lock
- * enqueues missing sub-lock.
- */
- cl_lock_state_set(env, parent, CLS_NEW);
- /* fall through */
- case CLS_NEW:
- /*
- * if last sub-lock is canceled, destroy the top-lock (which
- * is now `empty') proactively.
- */
- if (lov->lls_nr_filled == 0) {
- /* ... but unfortunately, this cannot be done easily,
- * as cancellation of a top-lock might acquire mutices
- * of its other sub-locks, violating lock ordering,
- * see cl_lock_{cancel,delete}() preconditions.
- *
- * To work around this, the mutex of this sub-lock is
- * released, top-lock is destroyed, and sub-lock mutex
- * acquired again. The list of parents has to be
- * re-scanned from the beginning after this.
- *
- * Only do this if no mutices other than on @child and
- * @parent are held by the current thread.
- *
- * TODO: The lock modal here is too complex, because
- * the lock may be canceled and deleted by voluntarily:
- * cl_lock_request
- * -> osc_lock_enqueue_wait
- * -> osc_lock_cancel_wait
- * -> cl_lock_delete
- * -> lovsub_lock_delete
- * -> cl_lock_cancel/delete
- * -> ...
- *
- * The better choice is to spawn a kernel thread for
- * this purpose. -jay
- */
- if (cl_lock_nr_mutexed(env) == 2) {
- cl_lock_mutex_put(env, child);
- cl_lock_cancel(env, parent);
- cl_lock_delete(env, parent);
- result = 1;
- }
- }
- break;
- case CLS_HELD:
- CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
- default:
- CERROR("Impossible state: %d\n", parent->cll_state);
- LBUG();
- break;
- }
-
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked in "bottom-to-top" delete, when lock destruction starts from the
- * sub-lock (e.g, as a result of ldlm lock LRU policy).
- */
-static void lovsub_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct cl_lock *child = slice->cls_lock;
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- int restart;
-
- LASSERT(cl_lock_is_mutexed(child));
-
- /*
- * Destruction of a sub-lock might take multiple iterations, because
- * when the last sub-lock of a given top-lock is deleted, top-lock is
- * canceled proactively, and this requires to release sub-lock
- * mutex. Once sub-lock mutex has been released, list of its parents
- * has to be re-scanned from the beginning.
- */
- do {
- struct lov_lock *lov;
- struct lov_lock_link *scan;
- struct lov_lock_link *temp;
- struct lov_lock_sub *subdata;
-
- restart = 0;
- list_for_each_entry_safe(scan, temp,
- &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- subdata = &lov->lls_sub[scan->lll_idx];
- lovsub_parent_lock(env, lov);
- subdata->sub_got = subdata->sub_descr;
- lov_lock_unlink(env, scan, sub);
- restart = lovsub_lock_delete_one(env, child, lov);
- lovsub_parent_unlock(env, lov);
-
- if (restart) {
- cl_lock_mutex_get(env, child);
- break;
- }
- }
- } while (restart);
-}
-
-static int lovsub_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- struct lov_lock_link *scan;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
- if (lov)
- cl_lock_descr_print(env, cookie, p,
- &lov->lls_cl.cls_lock->cll_descr);
- (*p)(env, cookie, "] ");
- }
- return 0;
-}
-
static const struct cl_lock_operations lovsub_lock_ops = {
.clo_fini = lovsub_lock_fini,
- .clo_state = lovsub_lock_state,
- .clo_delete = lovsub_lock_delete,
- .clo_modify = lovsub_lock_modify,
- .clo_closure = lovsub_lock_closure,
- .clo_weigh = lovsub_lock_weigh,
- .clo_print = lovsub_lock_print
};
int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
@@ -460,8 +77,9 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 6c5430d93..bcaae1e5b 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -67,10 +67,10 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
lu_object_add(obj, below);
cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
-
}
static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
@@ -154,8 +154,9 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
lu_object_add_top(&hdr->coh_lu, obj);
los->lso_cl.co_ops = &lovsub_ops;
obj->lo_ops = &lovsub_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index 2d945532b..9badedcce 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -60,11 +60,11 @@ static const struct cl_page_operations lovsub_page_ops = {
};
int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *unused)
+ struct cl_page *page, pgoff_t index)
{
struct lovsub_page *lsb = cl_object_page_slice(obj, page);
- cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
+ cl_page_slice_add(page, &lsb->lsb_cl, obj, index, &lovsub_page_ops);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 38f267a60..5c7a15dd7 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -49,9 +49,9 @@ static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
obd_kobj);
struct client_obd *cli = &dev->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -74,9 +74,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
if (val < 1 || val > MDC_MAX_RIF_MAX)
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index b3bfdcb73..856c54e03 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -279,8 +279,7 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
- rec->sa_attr_flags =
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
+ rec->sa_attr_flags = op_data->op_attr_flags;
if ((op_data->op_attr.ia_valid & ATTR_GID) &&
in_group_p(op_data->op_attr.ia_gid))
rec->sa_suppgid =
@@ -439,7 +438,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-
}
}
@@ -455,7 +453,7 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req,
lock = ldlm_handle2lock(&op_data->op_lease_handle);
if (lock) {
data->cd_handle = lock->l_remote_handle;
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
@@ -481,9 +479,9 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
};
@@ -497,23 +495,23 @@ int mdc_enter_request(struct client_obd *cli)
struct mdc_cache_waiter mcw;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw),
&lwi);
if (rc) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (list_empty(&mcw.mcw_entry))
cli->cl_r_in_flight--;
list_del_init(&mcw.mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
} else {
cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
return rc;
}
@@ -523,7 +521,7 @@ void mdc_exit_request(struct client_obd *cli)
struct list_head *l, *tmp;
struct mdc_cache_waiter *mcw;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
@@ -538,5 +536,5 @@ void mdc_exit_request(struct client_obd *cli)
}
/* Empty waiting list? Decrease reqs in-flight number */
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 958a164f6..3b1bc9111 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -869,7 +869,9 @@ resend:
* (explicits or automatically generated by Kernel to clean
* current FLocks upon exit) that can't be trashed
*/
- if ((rc == -EINTR) || (rc == -ETIMEDOUT))
+ if (((rc == -EINTR) || (rc == -ETIMEDOUT)) &&
+ (einfo->ei_type == LDLM_FLOCK) &&
+ (einfo->ei_mode == LCK_NL))
goto resend;
return rc;
}
@@ -963,7 +965,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
if (fid_is_sane(&op_data->op_fid2) &&
it->it_create_mode & M_CHECK_STALE &&
it->it_op != IT_GETATTR) {
-
/* Also: did we find the same inode? */
/* sever can return one of two fids:
* op_fid2 - new allocated fid - if file is created.
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index b91d3ff18..86b744536 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -142,9 +142,8 @@ static int mdc_getattr_common(struct obd_export *exp,
CDEBUG(D_NET, "mode: %o\n", body->mode);
+ mdc_update_max_ea_from_body(exp, body);
if (body->eadatasize != 0) {
- mdc_update_max_ea_from_body(exp, body);
-
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
if (!eadata)
@@ -1169,7 +1168,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_progress struct */
req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
@@ -1203,7 +1202,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_progress struct */
archive_mask = req_capsule_client_get(&req->rq_pill,
@@ -1278,7 +1277,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
ptlrpc_request_set_replen(req);
@@ -1395,7 +1394,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_request struct */
req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
@@ -1952,7 +1951,7 @@ static void lustre_swab_hal(struct hsm_action_list *h)
__swab32s(&h->hal_count);
__swab32s(&h->hal_archive_id);
__swab64s(&h->hal_flags);
- hai = hai_zero(h);
+ hai = hai_first(h);
for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
lustre_swab_hai(hai);
}
@@ -2249,7 +2248,7 @@ static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
* recovery, non zero value will be return if the lock can be canceled,
* or zero returned for not
*/
-static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
+static int mdc_cancel_weight(struct ldlm_lock *lock)
{
if (lock->l_resource->lr_type != LDLM_IBITS)
return 0;
@@ -2314,12 +2313,14 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
return -ENOMEM;
mdc_init_rpc_lock(cli->cl_rpc_lock);
- ptlrpcd_addref();
+ rc = ptlrpcd_addref();
+ if (rc < 0)
+ goto err_rpc_lock;
cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
if (!cli->cl_close_lock) {
rc = -ENOMEM;
- goto err_rpc_lock;
+ goto err_ptlrpcd_decref;
}
mdc_init_rpc_lock(cli->cl_close_lock);
@@ -2331,7 +2332,7 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
sptlrpc_lprocfs_cliobd_attach(obd);
ptlrpc_lprocfs_register_obd(obd);
- ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
+ ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
obd->obd_namespace->ns_lvbo = &inode_lvbo;
@@ -2345,9 +2346,10 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
err_close_lock:
kfree(cli->cl_close_lock);
+err_ptlrpcd_decref:
+ ptlrpcd_decref();
err_rpc_lock:
kfree(cli->cl_rpc_lock);
- ptlrpcd_decref();
return rc;
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3924b095b..2311a437c 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -502,8 +502,12 @@ static void do_requeue(struct config_llog_data *cld)
*/
down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
+ int rc;
+
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
- mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ if (rc && rc != -ENOENT)
+ CERROR("failed processing log: %d\n", rc);
} else {
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
@@ -734,7 +738,9 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
struct task_struct *task;
int rc;
- ptlrpcd_addref();
+ rc = ptlrpcd_addref();
+ if (rc < 0)
+ goto err_noref;
rc = client_obd_setup(obd, lcfg);
if (rc)
@@ -773,6 +779,7 @@ err_cleanup:
client_obd_cleanup(obd);
err_decref:
ptlrpcd_decref();
+err_noref:
return rc;
}
@@ -1720,7 +1727,6 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
rc = -EINVAL;
goto out;
-
}
}
out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index f5128b4f1..583fb5f33 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -36,6 +36,7 @@
* Client IO.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -132,6 +133,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
case CIT_WRITE:
break;
case CIT_FAULT:
+ break;
case CIT_FSYNC:
LASSERT(!io->ci_need_restart);
break;
@@ -159,7 +161,6 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
io->ci_type = iot;
INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
- INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
INIT_LIST_HEAD(&io->ci_lockset.cls_done);
INIT_LIST_HEAD(&io->ci_layers);
@@ -241,37 +242,7 @@ static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
const struct cl_lock_descr *d1)
{
return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
- lu_object_fid(&d1->cld_obj->co_lu)) ?:
- __diff_normalize(d0->cld_start, d1->cld_start);
-}
-
-static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- int ret;
-
- ret = lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
- lu_object_fid(&d1->cld_obj->co_lu));
- if (ret)
- return ret;
- if (d0->cld_end < d1->cld_start)
- return -1;
- if (d0->cld_start > d0->cld_end)
- return 1;
- return 0;
-}
-
-static void cl_lock_descr_merge(struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- d0->cld_start = min(d0->cld_start, d1->cld_start);
- d0->cld_end = max(d0->cld_end, d1->cld_end);
-
- if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
- d0->cld_mode = CLM_WRITE;
-
- if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
- d0->cld_mode = CLM_GROUP;
+ lu_object_fid(&d1->cld_obj->co_lu));
}
/*
@@ -320,33 +291,35 @@ static void cl_io_locks_sort(struct cl_io *io)
} while (!done);
}
-/**
- * Check whether \a queue contains locks matching \a need.
- *
- * \retval +ve there is a matching lock in the \a queue
- * \retval 0 there are no matching locks in the \a queue
- */
-int cl_queue_match(const struct list_head *queue,
- const struct cl_lock_descr *need)
+static void cl_lock_descr_merge(struct cl_lock_descr *d0,
+ const struct cl_lock_descr *d1)
{
- struct cl_io_lock_link *scan;
+ d0->cld_start = min(d0->cld_start, d1->cld_start);
+ d0->cld_end = max(d0->cld_end, d1->cld_end);
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_match(&scan->cill_descr, need))
- return 1;
- }
- return 0;
+ if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
+ d0->cld_mode = CLM_WRITE;
+
+ if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
+ d0->cld_mode = CLM_GROUP;
}
-EXPORT_SYMBOL(cl_queue_match);
-static int cl_queue_merge(const struct list_head *queue,
- const struct cl_lock_descr *need)
+static int cl_lockset_merge(const struct cl_lockset *set,
+ const struct cl_lock_descr *need)
{
struct cl_io_lock_link *scan;
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_cmp(&scan->cill_descr, need))
+ list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
+ if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
continue;
+
+ /* Merge locks for the same object because ldlm lock server
+ * may expand the lock extent, otherwise there is a deadlock
+ * case if two conflicted locks are queueud for the same object
+ * and lock server expands one lock to overlap the another.
+ * The side effect is that it can generate a multi-stripe lock
+ * that may cause casacading problem
+ */
cl_lock_descr_merge(&scan->cill_descr, need);
CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
@@ -356,87 +329,20 @@ static int cl_queue_merge(const struct list_head *queue,
return 0;
}
-static int cl_lockset_match(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_match(&set->cls_curr, need) ||
- cl_queue_match(&set->cls_done, need);
-}
-
-static int cl_lockset_merge(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_merge(&set->cls_todo, need) ||
- cl_lockset_match(set, need);
-}
-
-static int cl_lockset_lock_one(const struct lu_env *env,
- struct cl_io *io, struct cl_lockset *set,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock;
- int result;
-
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
-
- if (!IS_ERR(lock)) {
- link->cill_lock = lock;
- list_move(&link->cill_linkage, &set->cls_curr);
- if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
- } else
- result = 0;
- } else
- result = PTR_ERR(lock);
- return result;
-}
-
-static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock = link->cill_lock;
-
- list_del_init(&link->cill_linkage);
- if (lock) {
- cl_lock_release(env, lock, "io", io);
- link->cill_lock = NULL;
- }
- if (link->cill_fini)
- link->cill_fini(env, link);
-}
-
static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
struct cl_lockset *set)
{
struct cl_io_lock_link *link;
struct cl_io_lock_link *temp;
- struct cl_lock *lock;
int result;
result = 0;
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- if (!cl_lockset_match(set, &link->cill_descr)) {
- /* XXX some locking to guarantee that locks aren't
- * expanded in between.
- */
- result = cl_lockset_lock_one(env, io, set, link);
- if (result != 0)
- break;
- } else
- cl_lock_link_fini(env, io, link);
- }
- if (result == 0) {
- list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
- lock = link->cill_lock;
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
- else
- break;
- }
+ result = cl_lock_request(env, io, &link->cill_lock);
+ if (result < 0)
+ break;
+
+ list_move(&link->cill_linkage, &set->cls_done);
}
return result;
}
@@ -492,16 +398,19 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
set = &io->ci_lockset;
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
- cl_lock_link_fini(env, io, link);
-
- list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
- cl_lock_link_fini(env, io, link);
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ list_del_init(&link->cill_linkage);
+ if (link->cill_fini)
+ link->cill_fini(env, link);
+ }
list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
- cl_unuse(env, link->cill_lock);
- cl_lock_link_fini(env, io, link);
+ list_del_init(&link->cill_linkage);
+ cl_lock_release(env, &link->cill_lock);
+ if (link->cill_fini)
+ link->cill_fini(env, link);
}
+
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_unlock)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
@@ -595,9 +504,9 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
{
int result;
- if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
+ if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) {
result = 1;
- else {
+ } else {
list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
result = 0;
}
@@ -627,8 +536,9 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
result = cl_io_lock_add(env, io, link);
if (result) /* lock match */
link->cill_fini(env, link);
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -692,42 +602,6 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
}
/**
- * True iff \a page is within \a io range.
- */
-static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
-{
- int result = 1;
- loff_t start;
- loff_t end;
- pgoff_t idx;
-
- idx = page->cp_index;
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- /*
- * check that [start, end) and [pos, pos + count) extents
- * overlap.
- */
- if (!cl_io_is_append(io)) {
- const struct cl_io_rw_common *crw = &(io->u.ci_rw);
-
- start = cl_offset(page->cp_obj, idx);
- end = cl_offset(page->cp_obj, idx + 1);
- result = crw->crw_pos < end &&
- start < crw->crw_pos + crw->crw_count;
- }
- break;
- case CIT_FAULT:
- result = io->u.ci_fault.ft_index == idx;
- break;
- default:
- LBUG();
- }
- return result;
-}
-
-/**
* Called by read io, when page has to be read from the server.
*
* \see cl_io_operations::cio_read_page()
@@ -742,7 +616,6 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
LINVRNT(cl_page_is_owned(page, io));
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_page_in_io(page, io));
LINVRNT(cl_io_invariant(io));
queue = &io->ci_queue;
@@ -769,7 +642,7 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
break;
}
}
- if (result == 0)
+ if (result == 0 && queue->c2_qin.pl_nr > 0)
result = cl_io_submit_rw(env, io, CRT_READ, queue);
/*
* Unlock unsent pages in case of error.
@@ -781,77 +654,29 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_io_read_page);
/**
- * Called by write io to prepare page to receive data from user buffer.
+ * Commit a list of contiguous pages into writeback cache.
*
- * \see cl_io_operations::cio_prepare_write()
+ * \returns 0 if all pages committed, or errcode if error occurred.
+ * \see cl_io_operations::cio_commit_async()
*/
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
+int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb)
{
const struct cl_io_slice *scan;
int result = 0;
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(cl_page_is_owned(page, io));
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- LASSERT(cl_page_in_io(page, io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->cio_prepare_write) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_prepare_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_prepare_write);
-
-/**
- * Called by write io after user data were copied into a page.
- *
- * \see cl_io_operations::cio_commit_write()
- */
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- /*
- * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
- * already called cl_page_cache_add(), moving page into CPS_CACHED
- * state. Better (and more general) way of dealing with such situation
- * is needed.
- */
- LASSERT(cl_page_is_owned(page, io) || page->cp_parent);
- LASSERT(cl_page_in_io(page, io));
-
cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_commit_write) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_commit_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
+ if (!scan->cis_iop->cio_commit_async)
+ continue;
+ result = scan->cis_iop->cio_commit_async(env, scan, queue,
+ from, to, cb);
+ if (result != 0)
+ break;
}
- LINVRNT(result <= 0);
return result;
}
-EXPORT_SYMBOL(cl_io_commit_write);
+EXPORT_SYMBOL(cl_io_commit_async);
/**
* Submits a list of pages for immediate io.
@@ -869,13 +694,10 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
const struct cl_io_slice *scan;
int result = 0;
- LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
-
cl_io_for_each(scan, io) {
- if (!scan->cis_iop->req_op[crt].cio_submit)
+ if (!scan->cis_iop->cio_submit)
continue;
- result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
- queue);
+ result = scan->cis_iop->cio_submit(env, scan, crt, queue);
if (result != 0)
break;
}
@@ -887,6 +709,9 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
}
EXPORT_SYMBOL(cl_io_submit_rw);
+static void cl_page_list_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+
/**
* Submit a sync_io and wait for the IO to be finished, or error happens.
* If \a timeout is zero, it means to wait for the IO unconditionally.
@@ -904,7 +729,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
pg->cp_sync_io = anchor;
}
- cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
+ cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
rc = cl_io_submit_rw(env, io, iot, queue);
if (rc == 0) {
/*
@@ -915,12 +740,12 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
*/
cl_page_list_for_each(pg, &queue->c2_qin) {
pg->cp_sync_io = NULL;
- cl_sync_io_note(anchor, 1);
+ cl_sync_io_note(env, anchor, 1);
}
/* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, io, &queue->c2_qout,
- anchor, timeout);
+ rc = cl_sync_io_wait(env, anchor, timeout);
+ cl_page_list_assume(env, io, &queue->c2_qout);
} else {
LASSERT(list_empty(&queue->c2_qout.pl_pages));
cl_page_list_for_each(pg, &queue->c2_qin)
@@ -931,26 +756,6 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_io_submit_sync);
/**
- * Cancel an IO which has been submitted by cl_io_submit_rw.
- */
-static int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue)
-{
- struct cl_page *page;
- int result = 0;
-
- CERROR("Canceling ongoing page transmission\n");
- cl_page_list_for_each(page, queue) {
- int rc;
-
- LINVRNT(cl_page_in_io(page, io));
- rc = cl_page_cancel(env, page);
- result = result ?: rc;
- }
- return result;
-}
-
-/**
* Main io loop.
*
* Pumps io through iterations calling
@@ -1072,8 +877,8 @@ EXPORT_SYMBOL(cl_page_list_add);
/**
* Removes a page from a page list.
*/
-static void cl_page_list_del(const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page)
+void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
+ struct cl_page *page)
{
LASSERT(plist->pl_nr > 0);
LINVRNT(plist->pl_owner == current);
@@ -1086,6 +891,7 @@ static void cl_page_list_del(const struct lu_env *env,
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
}
+EXPORT_SYMBOL(cl_page_list_del);
/**
* Moves a page from one page list to another.
@@ -1106,6 +912,24 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
EXPORT_SYMBOL(cl_page_list_move);
/**
+ * Moves a page from one page list to the head of another list.
+ */
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page)
+{
+ LASSERT(src->pl_nr > 0);
+ LINVRNT(dst->pl_owner == current);
+ LINVRNT(src->pl_owner == current);
+
+ list_move(&page->cp_batch, &dst->pl_pages);
+ --src->pl_nr;
+ ++dst->pl_nr;
+ lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ src, dst);
+}
+EXPORT_SYMBOL(cl_page_list_move_head);
+
+/**
* splice the cl_page_list, just as list head does
*/
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
@@ -1162,8 +986,7 @@ EXPORT_SYMBOL(cl_page_list_disown);
/**
* Releases pages from queue.
*/
-static void cl_page_list_fini(const struct lu_env *env,
- struct cl_page_list *plist)
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
{
struct cl_page *page;
struct cl_page *temp;
@@ -1174,6 +997,7 @@ static void cl_page_list_fini(const struct lu_env *env,
cl_page_list_del(env, plist, page);
LASSERT(plist->pl_nr == 0);
}
+EXPORT_SYMBOL(cl_page_list_fini);
/**
* Assumes all pages in a queue.
@@ -1260,7 +1084,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
/**
* Returns top-level io.
*
- * \see cl_object_top(), cl_page_top().
+ * \see cl_object_top()
*/
struct cl_io *cl_io_top(struct cl_io *io)
{
@@ -1323,19 +1147,14 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
int result;
result = 0;
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init) {
- result = dev->cd_ops->cdo_req_init(env,
- dev, req);
- if (result != 0)
- break;
- }
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
+ if (dev->cd_ops->cdo_req_init) {
+ result = dev->cd_ops->cdo_req_init(env, dev, req);
+ if (result != 0)
+ break;
}
- page = page->cp_child;
- } while (page && result == 0);
+ }
return result;
}
@@ -1384,14 +1203,16 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
if (req->crq_o) {
req->crq_nrobjs = nr_objects;
result = cl_req_init(env, req, page);
- } else
+ } else {
result = -ENOMEM;
+ }
if (result != 0) {
cl_req_completion(env, req, result);
req = ERR_PTR(result);
}
- } else
+ } else {
req = ERR_PTR(-ENOMEM);
+ }
return req;
}
EXPORT_SYMBOL(cl_req_alloc);
@@ -1406,8 +1227,6 @@ void cl_req_page_add(const struct lu_env *env,
struct cl_req_obj *rqo;
int i;
- page = cl_page_top(page);
-
LASSERT(list_empty(&page->cp_flight));
LASSERT(!page->cp_req);
@@ -1438,8 +1257,6 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
{
struct cl_req *req = page->cp_req;
- page = cl_page_top(page);
-
LASSERT(!list_empty(&page->cp_flight));
LASSERT(req->crq_nrpages > 0);
@@ -1511,25 +1328,39 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
}
EXPORT_SYMBOL(cl_req_attr_set);
+/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
+ * wait for the IO to finish.
+ */
+void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
+{
+ wake_up_all(&anchor->csi_waitq);
+
+ /* it's safe to nuke or reuse anchor now */
+ atomic_set(&anchor->csi_barrier, 0);
+}
+EXPORT_SYMBOL(cl_sync_io_end);
/**
- * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
+ * Initialize synchronous io wait anchor
*/
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
+void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
+ void (*end)(const struct lu_env *, struct cl_sync_io *))
{
+ memset(anchor, 0, sizeof(*anchor));
init_waitqueue_head(&anchor->csi_waitq);
- atomic_set(&anchor->csi_sync_nr, nrpages);
- atomic_set(&anchor->csi_barrier, nrpages > 0);
+ atomic_set(&anchor->csi_sync_nr, nr);
+ atomic_set(&anchor->csi_barrier, nr > 0);
anchor->csi_sync_rc = 0;
+ anchor->csi_end_io = end;
+ LASSERT(end);
}
EXPORT_SYMBOL(cl_sync_io_init);
/**
- * Wait until all transfer completes. Transfer completion routine has to call
- * cl_sync_io_note() for every page.
+ * Wait until all IO completes. Transfer completion routine has to call
+ * cl_sync_io_note() for every entity.
*/
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout)
{
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
@@ -1542,11 +1373,9 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
- CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n",
+ CERROR("IO failed: %d, still wait for %d remaining entries\n",
rc, atomic_read(&anchor->csi_sync_nr));
- (void)cl_io_cancel(env, io, queue);
-
lwi = (struct l_wait_info) { 0 };
(void)l_wait_event(anchor->csi_waitq,
atomic_read(&anchor->csi_sync_nr) == 0,
@@ -1555,14 +1384,12 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
rc = anchor->csi_sync_rc;
}
LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
- cl_page_list_assume(env, io, queue);
/* wait until cl_sync_io_note() has done wakeup */
while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
cpu_relax();
}
- POISON(anchor, 0x5a, sizeof(*anchor));
return rc;
}
EXPORT_SYMBOL(cl_sync_io_wait);
@@ -1570,7 +1397,8 @@ EXPORT_SYMBOL(cl_sync_io_wait);
/**
* Indicate that transfer of a single page completed.
*/
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret)
{
if (anchor->csi_sync_rc == 0 && ioret < 0)
anchor->csi_sync_rc = ioret;
@@ -1581,9 +1409,9 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
*/
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
- wake_up_all(&anchor->csi_waitq);
- /* it's safe to nuke or reuse anchor now */
- atomic_set(&anchor->csi_barrier, 0);
+ LASSERT(anchor->csi_end_io);
+ anchor->csi_end_io(env, anchor);
+ /* Can't access anchor any more */
}
}
EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index aec644eb4..26a576b63 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -36,6 +36,7 @@
* Client Extent Lock.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -47,138 +48,18 @@
#include "../include/cl_object.h"
#include "cl_internal.h"
-/** Lock class of cl_lock::cll_guard */
-static struct lock_class_key cl_lock_guard_class;
-static struct kmem_cache *cl_lock_kmem;
-
-static struct lu_kmem_descr cl_lock_caches[] = {
- {
- .ckd_cache = &cl_lock_kmem,
- .ckd_name = "cl_lock_kmem",
- .ckd_size = sizeof (struct cl_lock)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-#define CS_LOCK_INC(o, item)
-#define CS_LOCK_DEC(o, item)
-#define CS_LOCKSTATE_INC(o, state)
-#define CS_LOCKSTATE_DEC(o, state)
-
-/**
- * Basic lock invariant that is maintained at all times. Caller either has a
- * reference to \a lock, or somehow assures that \a lock cannot be freed.
- *
- * \see cl_lock_invariant()
- */
-static int cl_lock_invariant_trusted(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
- atomic_read(&lock->cll_ref) >= lock->cll_holds &&
- lock->cll_holds >= lock->cll_users &&
- lock->cll_holds >= 0 &&
- lock->cll_users >= 0 &&
- lock->cll_depth >= 0;
-}
-
-/**
- * Stronger lock invariant, checking that caller has a reference on a lock.
- *
- * \see cl_lock_invariant_trusted()
- */
-static int cl_lock_invariant(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- int result;
-
- result = atomic_read(&lock->cll_ref) > 0 &&
- cl_lock_invariant_trusted(env, lock);
- if (!result && env)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
- return result;
-}
-
-/**
- * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
- */
-static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
-{
- return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
-}
-
-/**
- * Returns a set of counters for this lock, depending on a lock nesting.
- */
-static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- struct cl_thread_info *info;
- enum clt_nesting_level nesting;
-
- info = cl_env_info(env);
- nesting = cl_lock_nesting(lock);
- LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
- return &info->clt_counters[nesting];
-}
-
static void cl_lock_trace0(int level, const struct lu_env *env,
const char *prefix, const struct cl_lock *lock,
const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
- CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
- prefix, lock, atomic_read(&lock->cll_ref),
- lock->cll_guarder, lock->cll_depth,
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags,
- env, h->coh_nesting, cl_lock_nr_mutexed(env),
- func, line);
+ CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n",
+ prefix, lock, env, h->coh_nesting, func, line);
}
-
-#define cl_lock_trace(level, env, prefix, lock) \
+#define cl_lock_trace(level, env, prefix, lock) \
cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
-#define RETIP ((unsigned long)__builtin_return_address(0))
-
-#ifdef CONFIG_LOCKDEP
-static struct lock_class_key cl_lock_key;
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{
- lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
-}
-
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
- lock_map_acquire(&lock->dep_map);
-}
-
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
- lock_release(&lock->dep_map, 0, RETIP);
-}
-
-#else /* !CONFIG_LOCKDEP */
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{}
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{}
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{}
-
-#endif /* !CONFIG_LOCKDEP */
-
/**
* Adds lock slice to the compound lock.
*
@@ -199,62 +80,10 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
}
EXPORT_SYMBOL(cl_lock_slice_add);
-/**
- * Returns true iff a lock with the mode \a has provides at least the same
- * guarantees as a lock with the mode \a need.
- */
-int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
-{
- LINVRNT(need == CLM_READ || need == CLM_WRITE ||
- need == CLM_PHANTOM || need == CLM_GROUP);
- LINVRNT(has == CLM_READ || has == CLM_WRITE ||
- has == CLM_PHANTOM || has == CLM_GROUP);
- CLASSERT(CLM_PHANTOM < CLM_READ);
- CLASSERT(CLM_READ < CLM_WRITE);
- CLASSERT(CLM_WRITE < CLM_GROUP);
-
- if (has != CLM_GROUP)
- return need <= has;
- else
- return need == has;
-}
-EXPORT_SYMBOL(cl_lock_mode_match);
-
-/**
- * Returns true iff extent portions of lock descriptions match.
- */
-int cl_lock_ext_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
-{
- return
- has->cld_start <= need->cld_start &&
- has->cld_end >= need->cld_end &&
- cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
- (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
-}
-EXPORT_SYMBOL(cl_lock_ext_match);
-
-/**
- * Returns true iff a lock with the description \a has provides at least the
- * same guarantees as a lock with the description \a need.
- */
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
+void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock)
{
- return
- cl_object_same(has->cld_obj, need->cld_obj) &&
- cl_lock_ext_match(has, need);
-}
-EXPORT_SYMBOL(cl_lock_descr_match);
+ cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock);
-static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj = lock->cll_descr.cld_obj;
-
- LINVRNT(!cl_lock_is_mutexed(lock));
-
- cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- might_sleep();
while (!list_empty(&lock->cll_layers)) {
struct cl_lock_slice *slice;
@@ -263,350 +92,36 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
- CS_LOCK_DEC(obj, total);
- CS_LOCKSTATE_DEC(obj, lock->cll_state);
- lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
- cl_object_put(env, obj);
- lu_ref_fini(&lock->cll_reference);
- lu_ref_fini(&lock->cll_holders);
- mutex_destroy(&lock->cll_guard);
- kmem_cache_free(cl_lock_kmem, lock);
-}
-
-/**
- * Releases a reference on a lock.
- *
- * When last reference is released, lock is returned to the cache, unless it
- * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
- * immediately.
- *
- * \see cl_object_put(), cl_page_put()
- */
-void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj;
-
- LINVRNT(cl_lock_invariant(env, lock));
- obj = lock->cll_descr.cld_obj;
- LINVRNT(obj);
-
- CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
-
- if (atomic_dec_and_test(&lock->cll_ref)) {
- if (lock->cll_state == CLS_FREEING) {
- LASSERT(list_empty(&lock->cll_linkage));
- cl_lock_free(env, lock);
- }
- CS_LOCK_DEC(obj, busy);
- }
-}
-EXPORT_SYMBOL(cl_lock_put);
-
-/**
- * Acquires an additional reference to a lock.
- *
- * This can be called only by caller already possessing a reference to \a
- * lock.
- *
- * \see cl_object_get(), cl_page_get()
- */
-void cl_lock_get(struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(NULL, lock));
- CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- atomic_inc(&lock->cll_ref);
-}
-EXPORT_SYMBOL(cl_lock_get);
-
-/**
- * Acquires a reference to a lock.
- *
- * This is much like cl_lock_get(), except that this function can be used to
- * acquire initial reference to the cached lock. Caller has to deal with all
- * possible races. Use with care!
- *
- * \see cl_page_get_trust()
- */
-void cl_lock_get_trust(struct cl_lock *lock)
-{
- CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- if (atomic_inc_return(&lock->cll_ref) == 1)
- CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
-}
-EXPORT_SYMBOL(cl_lock_get_trust);
-
-/**
- * Helper function destroying the lock that wasn't completely initialized.
- *
- * Other threads can acquire references to the top-lock through its
- * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
- */
-static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
-}
-
-static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *descr)
-{
- struct cl_lock *lock;
- struct lu_object_header *head;
-
- lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS);
- if (lock) {
- atomic_set(&lock->cll_ref, 1);
- lock->cll_descr = *descr;
- lock->cll_state = CLS_NEW;
- cl_object_get(obj);
- lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
- lock);
- INIT_LIST_HEAD(&lock->cll_layers);
- INIT_LIST_HEAD(&lock->cll_linkage);
- INIT_LIST_HEAD(&lock->cll_inclosure);
- lu_ref_init(&lock->cll_reference);
- lu_ref_init(&lock->cll_holders);
- mutex_init(&lock->cll_guard);
- lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
- init_waitqueue_head(&lock->cll_wq);
- head = obj->co_lu.lo_header;
- CS_LOCKSTATE_INC(obj, CLS_NEW);
- CS_LOCK_INC(obj, total);
- CS_LOCK_INC(obj, create);
- cl_lock_lockdep_init(lock);
- list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
- int err;
-
- err = obj->co_ops->coo_lock_init(env, obj, lock, io);
- if (err != 0) {
- cl_lock_finish(env, lock);
- lock = ERR_PTR(err);
- break;
- }
- }
- } else
- lock = ERR_PTR(-ENOMEM);
- return lock;
-}
-
-/**
- * Transfer the lock into INTRANSIT state and return the original state.
- *
- * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
- * \post state: CLS_INTRANSIT
- * \see CLS_INTRANSIT
- */
-static enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
- struct cl_lock *lock)
-{
- enum cl_lock_state state = lock->cll_state;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(state != CLS_INTRANSIT);
- LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
- "Malformed lock state %d.\n", state);
-
- cl_lock_state_set(env, lock, CLS_INTRANSIT);
- lock->cll_intransit_owner = current;
- cl_lock_hold_add(env, lock, "intransit", current);
- return state;
-}
-
-/**
- * Exit the intransit state and restore the lock state to the original state
- */
-static void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(state != CLS_INTRANSIT);
- LASSERT(lock->cll_intransit_owner == current);
-
- lock->cll_intransit_owner = NULL;
- cl_lock_state_set(env, lock, state);
- cl_lock_unhold(env, lock, "intransit", current);
-}
-
-/**
- * Checking whether the lock is intransit state
- */
-int cl_lock_is_intransit(struct cl_lock *lock)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- return lock->cll_state == CLS_INTRANSIT &&
- lock->cll_intransit_owner != current;
-}
-EXPORT_SYMBOL(cl_lock_is_intransit);
-/**
- * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
- * truncate and O_APPEND cannot be reused for read/non-append-write, as they
- * cover multiple stripes and can trigger cascading timeouts.
- */
-static int cl_lock_fits_into(const struct lu_env *env,
- const struct cl_lock *lock,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_fits_into &&
- !slice->cls_ops->clo_fits_into(env, slice, need, io))
- return 0;
- }
- return 1;
+ POISON(lock, 0x5a, sizeof(*lock));
}
+EXPORT_SYMBOL(cl_lock_fini);
-static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
+int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_io *io)
{
- struct cl_lock *lock;
- struct cl_object_header *head;
-
- head = cl_object_header(obj);
- assert_spin_locked(&head->coh_lock_guard);
- CS_LOCK_INC(obj, lookup);
- list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
- int matched;
-
- matched = cl_lock_ext_match(&lock->cll_descr, need) &&
- lock->cll_state < CLS_FREEING &&
- lock->cll_error == 0 &&
- !(lock->cll_flags & CLF_CANCELLED) &&
- cl_lock_fits_into(env, lock, need, io);
- CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
- PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
- matched);
- if (matched) {
- cl_lock_get_trust(lock);
- CS_LOCK_INC(obj, hit);
- return lock;
- }
- }
- return NULL;
-}
-
-/**
- * Returns a lock matching description \a need.
- *
- * This is the main entry point into the cl_lock caching interface. First, a
- * cache (implemented as a per-object linked list) is consulted. If lock is
- * found there, it is returned immediately. Otherwise new lock is allocated
- * and returned. In any case, additional reference to lock is acquired.
- *
- * \see cl_object_find(), cl_page_find()
- */
-static struct cl_lock *cl_lock_find(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
+ struct cl_object *obj = lock->cll_descr.cld_obj;
+ struct cl_object *scan;
+ int result = 0;
- if (!lock) {
- lock = cl_lock_alloc(env, obj, io, need);
- if (!IS_ERR(lock)) {
- struct cl_lock *ghost;
+ /* Make sure cl_lock::cll_descr is initialized. */
+ LASSERT(obj);
- spin_lock(&head->coh_lock_guard);
- ghost = cl_lock_lookup(env, obj, io, need);
- if (!ghost) {
- cl_lock_get_trust(lock);
- list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
- CS_LOCK_INC(obj, busy);
- } else {
- spin_unlock(&head->coh_lock_guard);
- /*
- * Other threads can acquire references to the
- * top-lock through its sub-locks. Hence, it
- * cannot be cl_lock_free()-ed immediately.
- */
- cl_lock_finish(env, lock);
- lock = ghost;
- }
+ INIT_LIST_HEAD(&lock->cll_layers);
+ list_for_each_entry(scan, &obj->co_lu.lo_header->loh_layers,
+ co_lu.lo_linkage) {
+ result = scan->co_ops->coo_lock_init(env, scan, lock, io);
+ if (result != 0) {
+ cl_lock_fini(env, lock);
+ break;
}
}
- return lock;
-}
-/**
- * Returns existing lock matching given description. This is similar to
- * cl_lock_find() except that no new lock is created, and returned lock is
- * guaranteed to be in enum cl_lock_state::CLS_HELD state.
- */
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- do {
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
- if (!lock)
- return NULL;
-
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state == CLS_INTRANSIT)
- /* Don't care return value. */
- cl_lock_state_wait(env, lock);
- if (lock->cll_state == CLS_FREEING) {
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
- } while (!lock);
-
- cl_lock_hold_add(env, lock, scope, source);
- cl_lock_user_add(env, lock);
- if (lock->cll_state == CLS_CACHED)
- cl_use_try(env, lock, 1);
- if (lock->cll_state == CLS_HELD) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock, 0);
- cl_lock_put(env, lock);
- } else {
- cl_unuse_try(env, lock);
- cl_lock_unhold(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
-
- return lock;
+ return result;
}
-EXPORT_SYMBOL(cl_lock_peek);
+EXPORT_SYMBOL(cl_lock_init);
/**
- * Returns a slice within a lock, corresponding to the given layer in the
+ * Returns a slice with a lock, corresponding to the given layer in the
* device stack.
*
* \see cl_page_at()
@@ -616,8 +131,6 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
{
const struct cl_lock_slice *slice;
- LINVRNT(cl_lock_invariant_trusted(NULL, lock));
-
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
return slice;
@@ -626,1537 +139,96 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
}
EXPORT_SYMBOL(cl_lock_at);
-static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- counters = cl_lock_counters(env, lock);
- lock->cll_depth++;
- counters->ctc_nr_locks_locked++;
- lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
- cl_lock_trace(D_TRACE, env, "got mutex", lock);
-}
-
-/**
- * Locks cl_lock object.
- *
- * This is used to manipulate cl_lock fields, and to serialize state
- * transitions in the lock state machine.
- *
- * \post cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_put()
- */
-void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_guarder == current) {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_depth > 0);
- } else {
- struct cl_object_header *hdr;
- struct cl_thread_info *info;
- int i;
-
- LINVRNT(lock->cll_guarder != current);
- hdr = cl_object_header(lock->cll_descr.cld_obj);
- /*
- * Check that mutices are taken in the bottom-to-top order.
- */
- info = cl_env_info(env);
- for (i = 0; i < hdr->coh_nesting; ++i)
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
- lock->cll_guarder = current;
- LINVRNT(lock->cll_depth == 0);
- }
- cl_lock_mutex_tail(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_mutex_get);
-
-/**
- * Try-locks cl_lock object.
- *
- * \retval 0 \a lock was successfully locked
- *
- * \retval -EBUSY \a lock cannot be locked right now
- *
- * \post ergo(result == 0, cl_lock_is_mutexed(lock))
- *
- * \see cl_lock_mutex_get()
- */
-static int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
-
- result = 0;
- if (lock->cll_guarder == current) {
- LINVRNT(lock->cll_depth > 0);
- cl_lock_mutex_tail(env, lock);
- } else if (mutex_trylock(&lock->cll_guard)) {
- LINVRNT(lock->cll_depth == 0);
- lock->cll_guarder = current;
- cl_lock_mutex_tail(env, lock);
- } else
- result = -EBUSY;
- return result;
-}
-
-/**
- {* Unlocks cl_lock object.
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_get()
- */
-void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_guarder == current);
- LINVRNT(lock->cll_depth > 0);
-
- counters = cl_lock_counters(env, lock);
- LINVRNT(counters->ctc_nr_locks_locked > 0);
-
- cl_lock_trace(D_TRACE, env, "put mutex", lock);
- lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
- counters->ctc_nr_locks_locked--;
- if (--lock->cll_depth == 0) {
- lock->cll_guarder = NULL;
- mutex_unlock(&lock->cll_guard);
- }
-}
-EXPORT_SYMBOL(cl_lock_mutex_put);
-
-/**
- * Returns true iff lock's mutex is owned by the current thread.
- */
-int cl_lock_is_mutexed(struct cl_lock *lock)
-{
- return lock->cll_guarder == current;
-}
-EXPORT_SYMBOL(cl_lock_is_mutexed);
-
-/**
- * Returns number of cl_lock mutices held by the current thread (environment).
- */
-int cl_lock_nr_mutexed(const struct lu_env *env)
-{
- struct cl_thread_info *info;
- int i;
- int locked;
-
- /*
- * NOTE: if summation across all nesting levels (currently 2) proves
- * too expensive, a summary counter can be added to
- * struct cl_thread_info.
- */
- info = cl_env_info(env);
- for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- locked += info->clt_counters[i].ctc_nr_locks_locked;
- return locked;
-}
-EXPORT_SYMBOL(cl_lock_nr_mutexed);
-
-static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- if (!(lock->cll_flags & CLF_CANCELLED)) {
- const struct cl_lock_slice *slice;
-
- lock->cll_flags |= CLF_CANCELLED;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_cancel)
- slice->cls_ops->clo_cancel(env, slice);
- }
- }
-}
-
-static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object_header *head;
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_state < CLS_FREEING) {
- bool in_cache;
-
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_FREEING);
-
- head = cl_object_header(lock->cll_descr.cld_obj);
-
- spin_lock(&head->coh_lock_guard);
- in_cache = !list_empty(&lock->cll_linkage);
- if (in_cache)
- list_del_init(&lock->cll_linkage);
- spin_unlock(&head->coh_lock_guard);
-
- if (in_cache) /* coh_locks cache holds a refcount. */
- cl_lock_put(env, lock);
-
- /*
- * From now on, no new references to this lock can be acquired
- * by cl_lock_lookup().
- */
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete)
- slice->cls_ops->clo_delete(env, slice);
- }
- /*
- * From now on, no new references to this lock can be acquired
- * by layer-specific means (like a pointer from struct
- * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
- * lov).
- *
- * Lock will be finally freed in cl_lock_put() when last of
- * existing references goes away.
- */
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
- * top-lock (nesting == 0) accounts for this modification in the per-thread
- * debugging counters. Sub-lock holds can be released by a thread different
- * from one that acquired it.
- */
-static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_holds += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_held += delta;
- LASSERT(counters->ctc_nr_held >= 0);
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
- * cl_lock_hold_mod() for the explanation of the debugging code.
- */
-static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_users += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_used += delta;
- LASSERT(counters->ctc_nr_used >= 0);
- }
-}
-
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
- lu_ref_del(&lock->cll_holders, scope, source);
- cl_lock_hold_mod(env, lock, -1);
- if (lock->cll_holds == 0) {
- CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
- if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
- lock->cll_descr.cld_mode == CLM_GROUP ||
- lock->cll_state != CLS_CACHED)
- /*
- * If lock is still phantom or grouplock when user is
- * done with it---destroy the lock.
- */
- lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
- if (lock->cll_flags & CLF_CANCELPEND) {
- lock->cll_flags &= ~CLF_CANCELPEND;
- cl_lock_cancel0(env, lock);
- }
- if (lock->cll_flags & CLF_DOOMED) {
- /* no longer doomed: it's dead... Jim. */
- lock->cll_flags &= ~CLF_DOOMED;
- cl_lock_delete0(env, lock);
- }
- }
-}
-EXPORT_SYMBOL(cl_lock_hold_release);
-
-/**
- * Waits until lock state is changed.
- *
- * This function is called with cl_lock mutex locked, atomically releases
- * mutex and goes to sleep, waiting for a lock state change (signaled by
- * cl_lock_signal()), and re-acquires the mutex before return.
- *
- * This function is used to wait until lock state machine makes some progress
- * and to emulate synchronous operations on top of asynchronous lock
- * interface.
- *
- * \retval -EINTR wait was interrupted
- *
- * \retval 0 wait wasn't interrupted
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_signal()
- */
-int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- wait_queue_t waiter;
- sigset_t blocked;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_depth == 1);
- LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
-
- cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
- result = lock->cll_error;
- if (result == 0) {
- /* To avoid being interrupted by the 'non-fatal' signals
- * (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305
- */
- blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&lock->cll_wq, &waiter);
- set_current_state(TASK_INTERRUPTIBLE);
- cl_lock_mutex_put(env, lock);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- /* Returning ERESTARTSYS instead of EINTR so syscalls
- * can be restarted if signals are pending here
- */
- result = -ERESTARTSYS;
- if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- schedule();
- if (!cfs_signal_pending())
- result = 0;
- }
-
- cl_lock_mutex_get(env, lock);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&lock->cll_wq, &waiter);
-
- /* Restore old blocked signals */
- cfs_restore_sigs(blocked);
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_state_wait);
-
-static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state)
- slice->cls_ops->clo_state(env, slice, state);
- wake_up_all(&lock->cll_wq);
-}
-
-/**
- * Notifies waiters that lock state changed.
- *
- * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
- * layers about state change by calling cl_lock_operations::clo_state()
- * top-to-bottom.
- */
-void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
- cl_lock_state_signal(env, lock, lock->cll_state);
-}
-EXPORT_SYMBOL(cl_lock_signal);
-
-/**
- * Changes lock state.
- *
- * This function is invoked to notify layers that lock state changed, possible
- * as a result of an asynchronous event such as call-back reception.
- *
- * \post lock->cll_state == state
- *
- * \see cl_lock_operations::clo_state()
- */
-void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(lock->cll_state <= state ||
- (lock->cll_state == CLS_CACHED &&
- (state == CLS_HELD || /* lock found in cache */
- state == CLS_NEW || /* sub-lock canceled */
- state == CLS_INTRANSIT)) ||
- /* lock is in transit state */
- lock->cll_state == CLS_INTRANSIT);
-
- if (lock->cll_state != state) {
- CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
- CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
-
- cl_lock_state_signal(env, lock, state);
- lock->cll_state = state;
- }
-}
-EXPORT_SYMBOL(cl_lock_state_set);
-
-static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- do {
- result = 0;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
-
- result = -ENOSYS;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse) {
- result = slice->cls_ops->clo_unuse(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- } while (result == CLO_REPEAT);
-
- return result;
-}
-
-/**
- * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
- * cl_lock_operations::clo_use() top-to-bottom to notify layers.
- * @atomic = 1, it must unuse the lock to recovery the lock to keep the
- * use process atomic
- */
-int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
+void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
{
const struct cl_lock_slice *slice;
- int result;
- enum cl_lock_state state;
-
- cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
-
- LASSERT(lock->cll_state == CLS_CACHED);
- if (lock->cll_error)
- return lock->cll_error;
-
- result = -ENOSYS;
- state = cl_lock_intransit(env, lock);
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_use) {
- result = slice->cls_ops->clo_use(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
-
- LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
- lock->cll_state);
-
- if (result == 0) {
- state = CLS_HELD;
- } else {
- if (result == -ESTALE) {
- /*
- * ESTALE means sublock being cancelled
- * at this time, and set lock state to
- * be NEW here and ask the caller to repeat.
- */
- state = CLS_NEW;
- result = CLO_REPEAT;
- }
-
- /* @atomic means back-off-on-failure. */
- if (atomic) {
- int rc;
-
- rc = cl_unuse_try_internal(env, lock);
- /* Vet the results. */
- if (rc < 0 && result > 0)
- result = rc;
- }
+ cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_cancel)
+ slice->cls_ops->clo_cancel(env, slice);
}
- cl_lock_extransit(env, lock, state);
- return result;
}
-EXPORT_SYMBOL(cl_use_try);
+EXPORT_SYMBOL(cl_lock_cancel);
/**
- * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
- * top-to-bottom.
+ * Enqueue a lock.
+ * \param anchor: if we need to wait for resources before getting the lock,
+ * use @anchor for the purpose.
+ * \retval 0 enqueue successfully
+ * \retval <0 error code
*/
-static int cl_enqueue_kick(const struct lu_env *env,
- struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
+int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock, struct cl_sync_io *anchor)
{
- int result;
const struct cl_lock_slice *slice;
+ int rc = -ENOSYS;
- result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_enqueue) {
- result = slice->cls_ops->clo_enqueue(env,
- slice, io, flags);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- return result;
-}
-
-/**
- * Tries to enqueue a lock.
- *
- * This function is called repeatedly by cl_enqueue() until either lock is
- * enqueued, or error occurs. This function does not block waiting for
- * networking communication to complete.
- *
- * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \see cl_enqueue() cl_lock_operations::clo_enqueue()
- * \see cl_lock_state::CLS_ENQUEUED
- */
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
-{
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- switch (lock->cll_state) {
- case CLS_NEW:
- cl_lock_state_set(env, lock, CLS_QUEUING);
- /* fall-through */
- case CLS_QUEUING:
- /* kick layers. */
- result = cl_enqueue_kick(env, lock, io, flags);
- /* For AGL case, the cl_lock::cll_state may
- * become CLS_HELD already.
- */
- if (result == 0 && lock->cll_state == CLS_QUEUING)
- cl_lock_state_set(env, lock, CLS_ENQUEUED);
- break;
- case CLS_INTRANSIT:
- LASSERT(cl_lock_is_intransit(lock));
- result = CLO_WAIT;
- break;
- case CLS_CACHED:
- /* yank lock from the cache. */
- result = cl_use_try(env, lock, 0);
- break;
- case CLS_ENQUEUED:
- case CLS_HELD:
- result = 0;
- break;
- default:
- case CLS_FREEING:
- /*
- * impossible, only held locks with increased
- * ->cll_holds can be enqueued, and they cannot be
- * freed.
- */
- LBUG();
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_enqueue_try);
-
-/**
- * Cancel the conflicting lock found during previous enqueue.
- *
- * \retval 0 conflicting lock has been canceled.
- * \retval -ve error code.
- */
-int cl_lock_enqueue_wait(const struct lu_env *env,
- struct cl_lock *lock,
- int keep_mutex)
-{
- struct cl_lock *conflict;
- int rc = 0;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(lock->cll_conflict);
-
- conflict = lock->cll_conflict;
- lock->cll_conflict = NULL;
+ if (!slice->cls_ops->clo_enqueue)
+ continue;
- cl_lock_mutex_put(env, lock);
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- cl_lock_mutex_get(env, conflict);
- cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
- cl_lock_cancel(env, conflict);
- cl_lock_delete(env, conflict);
-
- while (conflict->cll_state != CLS_FREEING) {
- rc = cl_lock_state_wait(env, conflict);
+ rc = slice->cls_ops->clo_enqueue(env, slice, io, anchor);
if (rc != 0)
break;
- }
- cl_lock_mutex_put(env, conflict);
- lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
- cl_lock_put(env, conflict);
-
- if (keep_mutex)
- cl_lock_mutex_get(env, lock);
-
- LASSERT(rc <= 0);
- return rc;
-}
-EXPORT_SYMBOL(cl_lock_enqueue_wait);
-
-static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 enqflags)
-{
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_user_add(env, lock);
- do {
- result = cl_enqueue_try(env, lock, io, enqflags);
- if (result == CLO_WAIT) {
- if (lock->cll_conflict)
- result = cl_lock_enqueue_wait(env, lock, 1);
- else
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result != 0)
- cl_unuse_try(env, lock);
- LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD));
- return result;
-}
-
-/**
- * Tries to unlock a lock.
- *
- * This function is called to release underlying resource:
- * 1. for top lock, the resource is sublocks it held;
- * 2. for sublock, the resource is the reference to dlmlock.
- *
- * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see cl_unuse() cl_lock_operations::clo_unuse()
- * \see cl_lock_state::CLS_CACHED
- */
-int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
- enum cl_lock_state state = CLS_NEW;
-
- cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
-
- if (lock->cll_users > 1) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
- * underlying resources.
- */
- if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /*
- * New lock users (->cll_users) are not protecting unlocking
- * from proceeding. From this point, lock eventually reaches
- * CLS_CACHED, is reinitialized to CLS_NEW or fails into
- * CLS_FREEING.
- */
- state = cl_lock_intransit(env, lock);
-
- result = cl_unuse_try_internal(env, lock);
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(result != CLO_WAIT);
- cl_lock_user_del(env, lock);
- if (result == 0 || result == -ESTALE) {
- /*
- * Return lock back to the cache. This is the only
- * place where lock is moved into CLS_CACHED state.
- *
- * If one of ->clo_unuse() methods returned -ESTALE, lock
- * cannot be placed into cache and has to be
- * re-initialized. This happens e.g., when a sub-lock was
- * canceled while unlocking was in progress.
- */
- if (state == CLS_HELD && result == 0)
- state = CLS_CACHED;
- else
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
-
- /*
- * Hide -ESTALE error.
- * If the lock is a glimpse lock, and it has multiple
- * stripes. Assuming that one of its sublock returned -ENAVAIL,
- * and other sublocks are matched write locks. In this case,
- * we can't set this lock to error because otherwise some of
- * its sublocks may not be canceled. This causes some dirty
- * pages won't be written to OSTs. -jay
- */
- result = 0;
- } else {
- CERROR("result = %d, this is unlikely!\n", result);
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
- }
- return result ?: lock->cll_error;
-}
-EXPORT_SYMBOL(cl_unuse_try);
-
-static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- result = cl_unuse_try(env, lock);
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
-}
-
-/**
- * Unlocks a lock.
- */
-void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_unuse_locked(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_release(env, lock);
-}
-EXPORT_SYMBOL(cl_unuse);
-
-/**
- * Tries to wait for a lock.
- *
- * This function is called repeatedly by cl_wait() until either lock is
- * granted, or error occurs. This function does not block waiting for network
- * communication to complete.
- *
- * \see cl_wait() cl_lock_operations::clo_wait()
- * \see cl_lock_state::CLS_HELD
- */
-int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_QUEUING ||
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD ||
- lock->cll_state == CLS_INTRANSIT,
- "lock state: %d\n", lock->cll_state);
- LASSERT(lock->cll_users > 0);
- LASSERT(lock->cll_holds > 0);
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- if (cl_lock_is_intransit(lock)) {
- result = CLO_WAIT;
- break;
- }
-
- if (lock->cll_state == CLS_HELD)
- /* nothing to do */
- break;
-
- result = -ENOSYS;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_wait) {
- result = slice->cls_ops->clo_wait(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- if (result == 0) {
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_HELD);
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_wait_try);
-
-/**
- * Waits until enqueued lock is granted.
- *
- * \pre current thread or io owns a hold on the lock
- * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \post ergo(result == 0, lock->cll_state == CLS_HELD)
- */
-int cl_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- cl_lock_mutex_get(env, lock);
-
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
- "Wrong state %d\n", lock->cll_state);
- LASSERT(lock->cll_holds > 0);
-
- do {
- result = cl_wait_try(env, lock);
- if (result == CLO_WAIT) {
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result < 0) {
- cl_unuse_try(env, lock);
- cl_lock_lockdep_release(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
- cl_lock_mutex_put(env, lock);
- LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
- return result;
-}
-EXPORT_SYMBOL(cl_wait);
-
-/**
- * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
- * value.
- */
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- unsigned long pound;
- unsigned long ounce;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- pound = 0;
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_weigh) {
- ounce = slice->cls_ops->clo_weigh(env, slice);
- pound += ounce;
- if (pound < ounce) /* over-weight^Wflow */
- pound = ~0UL;
- }
- }
- return pound;
-}
-EXPORT_SYMBOL(cl_lock_weigh);
-
-/**
- * Notifies layers that lock description changed.
- *
- * The server can grant client a lock different from one that was requested
- * (e.g., larger in extent). This method is called when actually granted lock
- * description becomes known to let layers to accommodate for changed lock
- * description.
- *
- * \see cl_lock_operations::clo_modify()
- */
-int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc)
-{
- const struct cl_lock_slice *slice;
- struct cl_object *obj = lock->cll_descr.cld_obj;
- struct cl_object_header *hdr = cl_object_header(obj);
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
- /* don't allow object to change */
- LASSERT(obj == desc->cld_obj);
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_modify) {
- result = slice->cls_ops->clo_modify(env, slice, desc);
- if (result != 0)
- return result;
- }
- }
- CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
- PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
- /*
- * Just replace description in place. Nothing more is needed for
- * now. If locks were indexed according to their extent and/or mode,
- * that index would have to be updated here.
- */
- spin_lock(&hdr->coh_lock_guard);
- lock->cll_descr = *desc;
- spin_unlock(&hdr->coh_lock_guard);
- return 0;
-}
-EXPORT_SYMBOL(cl_lock_modify);
-
-/**
- * Initializes lock closure with a given origin.
- *
- * \see cl_lock_closure
- */
-void cl_lock_closure_init(const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait)
-{
- LINVRNT(cl_lock_is_mutexed(origin));
- LINVRNT(cl_lock_invariant(env, origin));
-
- INIT_LIST_HEAD(&closure->clc_list);
- closure->clc_origin = origin;
- closure->clc_wait = wait;
- closure->clc_nr = 0;
-}
-EXPORT_SYMBOL(cl_lock_closure_init);
-
-/**
- * Builds a closure of \a lock.
- *
- * Building of a closure consists of adding initial lock (\a lock) into it,
- * and calling cl_lock_operations::clo_closure() methods of \a lock. These
- * methods might call cl_lock_closure_build() recursively again, adding more
- * locks to the closure, etc.
- *
- * \see cl_lock_closure
- */
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
- LINVRNT(cl_lock_invariant(env, closure->clc_origin));
-
- result = cl_lock_enclosure(env, lock, closure);
- if (result == 0) {
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_closure) {
- result = slice->cls_ops->clo_closure(env, slice,
- closure);
- if (result != 0)
- break;
- }
- }
- }
- if (result != 0)
- cl_lock_disclosure(env, closure);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_closure_build);
-
-/**
- * Adds new lock to a closure.
- *
- * Try-locks \a lock and if succeeded, adds it to the closure (never more than
- * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
- * until next try-lock is likely to succeed.
- */
-int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- int result = 0;
-
- cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
- if (!cl_lock_mutex_try(env, lock)) {
- /*
- * If lock->cll_inclosure is not empty, lock is already in
- * this closure.
- */
- if (list_empty(&lock->cll_inclosure)) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure", closure);
- list_add(&lock->cll_inclosure, &closure->clc_list);
- closure->clc_nr++;
- } else
- cl_lock_mutex_put(env, lock);
- result = 0;
- } else {
- cl_lock_disclosure(env, closure);
- if (closure->clc_wait) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure-w", closure);
- cl_lock_mutex_put(env, closure->clc_origin);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
- cl_lock_mutex_get(env, lock);
- cl_lock_mutex_put(env, lock);
-
- cl_lock_mutex_get(env, closure->clc_origin);
- lu_ref_del(&lock->cll_reference, "closure-w", closure);
- cl_lock_put(env, lock);
- }
- result = CLO_REPEAT;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_enclosure);
-
-/** Releases mutices of enclosed locks. */
-void cl_lock_disclosure(const struct lu_env *env,
- struct cl_lock_closure *closure)
-{
- struct cl_lock *scan;
- struct cl_lock *temp;
-
- cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
- list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure) {
- list_del_init(&scan->cll_inclosure);
- cl_lock_mutex_put(env, scan);
- lu_ref_del(&scan->cll_reference, "closure", closure);
- cl_lock_put(env, scan);
- closure->clc_nr--;
- }
- LASSERT(closure->clc_nr == 0);
-}
-EXPORT_SYMBOL(cl_lock_disclosure);
-
-/** Finalizes a closure. */
-void cl_lock_closure_fini(struct cl_lock_closure *closure)
-{
- LASSERT(closure->clc_nr == 0);
- LASSERT(list_empty(&closure->clc_list));
-}
-EXPORT_SYMBOL(cl_lock_closure_fini);
-
-/**
- * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
- * destroyed, then destroy the lock. If there are holds on the lock, postpone
- * destruction until all holds are released. This is called when a decision is
- * made to destroy the lock in the future. E.g., when a blocking AST is
- * received on it, or fatal communication error happens.
- *
- * Caller must have a reference on this lock to prevent a situation, when
- * deleted lock lingers in memory for indefinite time, because nobody calls
- * cl_lock_put() to finish it.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
- * cl_lock_nr_mutexed(env) == 1)
- * [i.e., if a top-lock is deleted, mutices of no other locks can be
- * held, as deletion of sub-locks might require releasing a top-lock
- * mutex]
- *
- * \see cl_lock_operations::clo_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
- cl_lock_nr_mutexed(env) == 1));
-
- cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_delete0(env, lock);
- else
- lock->cll_flags |= CLF_DOOMED;
-}
-EXPORT_SYMBOL(cl_lock_delete);
-
-/**
- * Mark lock as irrecoverably failed, and mark it for destruction. This
- * happens when, e.g., server fails to grant a lock to us, or networking
- * time-out happens.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- *
- * \see clo_lock_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_error == 0 && error != 0) {
- cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
- lock->cll_error = error;
- cl_lock_signal(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
-}
-EXPORT_SYMBOL(cl_lock_error);
-
-/**
- * Cancels this lock. Notifies layers
- * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
- * there are holds on the lock, postpone cancellation until
- * all holds are released.
- *
- * Cancellation notification is delivered to layers at most once.
- *
- * \see cl_lock_operations::clo_cancel()
- * \see cl_lock::cll_holds
- */
-void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_cancel0(env, lock);
- else
- lock->cll_flags |= CLF_CANCELPEND;
-}
-EXPORT_SYMBOL(cl_lock_cancel);
-
-/**
- * Finds an existing lock covering given index and optionally different from a
- * given \a except lock.
- */
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except,
- int pending, int canceld)
-{
- struct cl_object_header *head;
- struct cl_lock *scan;
- struct cl_lock *lock;
- struct cl_lock_descr *need;
-
- head = cl_object_header(obj);
- need = &cl_env_info(env)->clt_descr;
- lock = NULL;
-
- need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
- * not PHANTOM
- */
- need->cld_start = need->cld_end = index;
- need->cld_enq_flags = 0;
-
- spin_lock(&head->coh_lock_guard);
- /* It is fine to match any group lock since there could be only one
- * with a uniq gid and it conflicts with all other lock modes too
- */
- list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
- if (scan != except &&
- (scan->cll_descr.cld_mode == CLM_GROUP ||
- cl_lock_ext_match(&scan->cll_descr, need)) &&
- scan->cll_state >= CLS_HELD &&
- scan->cll_state < CLS_FREEING &&
- /*
- * This check is racy as the lock can be canceled right
- * after it is done, but this is fine, because page exists
- * already.
- */
- (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
- (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
- /* Don't increase cs_hit here since this
- * is just a helper function.
- */
- cl_lock_get_trust(scan);
- lock = scan;
- break;
}
- }
- spin_unlock(&head->coh_lock_guard);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_at_pgoff);
-
-/**
- * Calculate the page offset at the layer of @lock.
- * At the time of this writing, @page is top page and @lock is sub lock.
- */
-static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
-{
- struct lu_device_type *dtype;
- const struct cl_page_slice *slice;
-
- dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
- slice = cl_page_at(page, dtype);
- return slice->cpl_page->cp_index;
+ return rc;
}
+EXPORT_SYMBOL(cl_lock_enqueue);
/**
- * Check if page @page is covered by an extra lock or discard it.
+ * Main high-level entry point of cl_lock interface that finds existing or
+ * enqueues new lock matching given description.
*/
-static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
+int cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock)
{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
- pgoff_t index = pgoff_at_lock(page, lock);
+ struct cl_sync_io *anchor = NULL;
+ __u32 enq_flags = lock->cll_descr.cld_enq_flags;
+ int rc;
- if (index >= info->clt_fn_index) {
- struct cl_lock *tmp;
+ rc = cl_lock_init(env, lock, io);
+ if (rc < 0)
+ return rc;
- /* refresh non-overlapped index */
- tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
- lock, 1, 0);
- if (tmp) {
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, clt_fn_index). This
- * is safe because if tmp lock is canceled, it will
- * discard these pages.
- */
- info->clt_fn_index = tmp->cll_descr.cld_end + 1;
- if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
- info->clt_fn_index = CL_PAGE_EOF;
- cl_lock_put(env, tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
+ if ((enq_flags & CEF_ASYNC) && !(enq_flags & CEF_AGL)) {
+ anchor = &cl_env_info(env)->clt_anchor;
+ cl_sync_io_init(anchor, 1, cl_sync_io_end);
}
- info->clt_next_index = index + 1;
- return CLP_GANG_OKAY;
-}
+ rc = cl_lock_enqueue(env, io, lock, anchor);
-static int discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
+ if (anchor) {
+ int rc2;
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageWriteback(cl_page_vmpage(env, page))));
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageDirty(cl_page_vmpage(env, page))));
-
- info->clt_next_index = pgoff_at_lock(page, lock) + 1;
- if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
+ /* drop the reference count held at initialization time */
+ cl_sync_io_note(env, anchor, 0);
+ rc2 = cl_sync_io_wait(env, anchor, 0);
+ if (rc2 < 0 && rc == 0)
+ rc = rc2;
}
- return CLP_GANG_OKAY;
-}
+ if (rc < 0)
+ cl_lock_release(env, lock);
-/**
- * Discard pages protected by the given lock. This function traverses radix
- * tree to find all covering pages and discard them. If a page is being covered
- * by other locks, it should remain in cache.
- *
- * If error happens on any step, the process continues anyway (the reasoning
- * behind this being that lock cancellation cannot be delayed indefinitely).
- */
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_io *io = &info->clt_io;
- struct cl_lock_descr *descr = &lock->cll_descr;
- cl_page_gang_cb_t cb;
- int res;
- int result;
-
- LINVRNT(cl_lock_invariant(env, lock));
-
- io->ci_obj = cl_object_top(descr->cld_obj);
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result != 0)
- goto out;
-
- cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->clt_fn_index = info->clt_next_index = descr->cld_start;
- do {
- res = cl_page_gang_lookup(env, descr->cld_obj, io,
- info->clt_next_index, descr->cld_end,
- cb, (void *)lock);
- if (info->clt_next_index > descr->cld_end)
- break;
-
- if (res == CLP_GANG_RESCHED)
- cond_resched();
- } while (res != CLP_GANG_OKAY);
-out:
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_discard_pages);
-
-/**
- * Eliminate all locks for a given object.
- *
- * Caller has to guarantee that no lock is in active use.
- *
- * \param cancel when this is set, cl_locks_prune() cancels locks before
- * destroying.
- */
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
-{
- struct cl_object_header *head;
- struct cl_lock *lock;
-
- head = cl_object_header(obj);
- /*
- * If locks are destroyed without cancellation, all pages must be
- * already destroyed (as otherwise they will be left unprotected).
- */
- LASSERT(ergo(!cancel,
- !head->coh_tree.rnode && head->coh_pages == 0));
-
- spin_lock(&head->coh_lock_guard);
- while (!list_empty(&head->coh_locks)) {
- lock = container_of(head->coh_locks.next,
- struct cl_lock, cll_linkage);
- cl_lock_get_trust(lock);
- spin_unlock(&head->coh_lock_guard);
- lu_ref_add(&lock->cll_reference, "prune", current);
-
-again:
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_users <= 1);
- if (unlikely(lock->cll_users == 1)) {
- struct l_wait_info lwi = { 0 };
-
- cl_lock_mutex_put(env, lock);
- l_wait_event(lock->cll_wq,
- lock->cll_users == 0,
- &lwi);
- goto again;
- }
-
- if (cancel)
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, "prune", current);
- cl_lock_put(env, lock);
- spin_lock(&head->coh_lock_guard);
- }
- spin_unlock(&head->coh_lock_guard);
-}
-EXPORT_SYMBOL(cl_locks_prune);
-
-static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- while (1) {
- lock = cl_lock_find(env, io, need);
- if (IS_ERR(lock))
- break;
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING &&
- !(lock->cll_flags & CLF_CANCELLED)) {
- cl_lock_hold_mod(env, lock, 1);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
- break;
- }
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- }
- return lock;
-}
-
-/**
- * Returns a lock matching \a need description with a reference and a hold on
- * it.
- *
- * This is much like cl_lock_find(), except that cl_lock_hold() additionally
- * guarantees that lock is not in the CLS_FREEING state on return.
- */
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (!IS_ERR(lock))
- cl_lock_mutex_put(env, lock);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_hold);
-
-/**
- * Main high-level entry point of cl_lock interface that finds existing or
- * enqueues new lock matching given description.
- */
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
- int rc;
- __u32 enqflags = need->cld_enq_flags;
-
- do {
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (IS_ERR(lock))
- break;
-
- rc = cl_enqueue_locked(env, lock, io, enqflags);
- if (rc == 0) {
- if (cl_lock_fits_into(env, lock, need, io)) {
- if (!(enqflags & CEF_AGL)) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock,
- enqflags);
- break;
- }
- rc = 1;
- }
- cl_unuse_locked(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env,
- rc <= 0 ? "enqueue failed" : "agl succeed", lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
- if (rc > 0) {
- LASSERT(enqflags & CEF_AGL);
- lock = NULL;
- } else if (rc != 0) {
- lock = ERR_PTR(rc);
- }
- } while (rc == 0);
- return lock;
+ return rc;
}
EXPORT_SYMBOL(cl_lock_request);
/**
- * Adds a hold to a known lock.
- */
-void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state != CLS_FREEING);
-
- cl_lock_hold_mod(env, lock, 1);
- cl_lock_get(lock);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
-}
-EXPORT_SYMBOL(cl_lock_hold_add);
-
-/**
- * Releases a hold and a reference on a lock, on which caller acquired a
- * mutex.
- */
-void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_invariant(env, lock));
- cl_lock_hold_release(env, lock, scope, source);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_unhold);
-
-/**
* Releases a hold and a reference on a lock, obtained by cl_lock_hold().
*/
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock)
{
- LINVRNT(cl_lock_invariant(env, lock));
cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
- cl_lock_mutex_get(env, lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
+ cl_lock_cancel(env, lock);
+ cl_lock_fini(env, lock);
}
EXPORT_SYMBOL(cl_lock_release);
-void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_used_mod(env, lock, 1);
-}
-EXPORT_SYMBOL(cl_lock_user_add);
-
-void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_users > 0);
-
- cl_lock_used_mod(env, lock, -1);
- if (lock->cll_users == 0)
- wake_up_all(&lock->cll_wq);
-}
-EXPORT_SYMBOL(cl_lock_user_del);
-
const char *cl_lock_mode_name(const enum cl_lock_mode mode)
{
static const char *names[] = {
- [CLM_PHANTOM] = "P",
[CLM_READ] = "R",
[CLM_WRITE] = "W",
[CLM_GROUP] = "G"
@@ -2189,10 +261,8 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_lock *lock)
{
const struct cl_lock_slice *slice;
- (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
- lock, atomic_read(&lock->cll_ref),
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags);
+
+ (*printer)(env, cookie, "lock@%p", lock);
cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
(*printer)(env, cookie, " {\n");
@@ -2207,13 +277,3 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
(*printer)(env, cookie, "} lock@%p\n", lock);
}
EXPORT_SYMBOL(cl_lock_print);
-
-int cl_lock_init(void)
-{
- return lu_kmem_init(cl_lock_caches);
-}
-
-void cl_lock_fini(void)
-{
- lu_kmem_fini(cl_lock_caches);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 43e299d4d..5940f3031 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -36,6 +36,7 @@
* Client Lustre Object.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
/*
@@ -43,8 +44,6 @@
*
* i_mutex
* PG_locked
- * ->coh_page_guard
- * ->coh_lock_guard
* ->coh_attr_guard
* ->ls_guard
*/
@@ -63,10 +62,6 @@
static struct kmem_cache *cl_env_kmem;
-/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
-/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
static struct lock_class_key cl_attr_guard_class;
@@ -81,17 +76,9 @@ int cl_object_header_init(struct cl_object_header *h)
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
- spin_lock_init(&h->coh_lock_guard);
spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
- lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- h->coh_pages = 0;
- /* XXX hard coded GFP_* mask. */
- INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
- INIT_LIST_HEAD(&h->coh_locks);
- h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
+ h->coh_page_bufsize = 0;
}
return result;
}
@@ -145,7 +132,7 @@ EXPORT_SYMBOL(cl_object_get);
/**
* Returns the top-object for a given \a o.
*
- * \see cl_page_top(), cl_io_top()
+ * \see cl_io_top()
*/
struct cl_object *cl_object_top(struct cl_object *o)
{
@@ -315,6 +302,29 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
EXPORT_SYMBOL(cl_conf_set);
/**
+ * Prunes caches of pages and locks for this object.
+ */
+int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *top;
+ struct cl_object *o;
+ int result;
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
+ if (o->co_ops->coo_prune) {
+ result = o->co_ops->coo_prune(env, o);
+ if (result != 0)
+ break;
+ }
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(cl_object_prune);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
@@ -323,34 +333,12 @@ EXPORT_SYMBOL(cl_conf_set);
*/
void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
{
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(obj);
- LASSERT(!hdr->coh_tree.rnode);
- LASSERT(hdr->coh_pages == 0);
+ struct cl_object_header *hdr = cl_object_header(obj);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
- /*
- * Destroy all locks. Object destruction (including cl_inode_fini())
- * cannot cancel the locks, because in the case of a local client,
- * where client and server share the same thread running
- * prune_icache(), this can dead-lock with ldlm_cancel_handler()
- * waiting on __wait_on_freeing_inode().
- */
- cl_locks_prune(env, obj, 0);
}
EXPORT_SYMBOL(cl_object_kill);
-/**
- * Prunes caches of pages and locks for this object.
- */
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
-{
- cl_pages_prune(env, obj);
- cl_locks_prune(env, obj, 1);
-}
-EXPORT_SYMBOL(cl_object_prune);
-
void cache_stats_init(struct cache_stats *cs, const char *name)
{
int i;
@@ -383,6 +371,8 @@ static int cache_stats_print(const struct cache_stats *cs,
return 0;
}
+static void cl_env_percpu_refill(void);
+
/**
* Initialize client site.
*
@@ -397,11 +387,9 @@ int cl_site_init(struct cl_site *s, struct cl_device *d)
result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
if (result == 0) {
cache_stats_init(&s->cs_pages, "pages");
- cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
atomic_set(&s->cs_pages_state[0], 0);
- for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- atomic_set(&s->cs_locks_state[i], 0);
+ cl_env_percpu_refill();
}
return result;
}
@@ -435,15 +423,6 @@ int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
[CPS_PAGEIN] = "r",
[CPS_FREEING] = "f"
};
- static const char *lstate[] = {
- [CLS_NEW] = "n",
- [CLS_QUEUING] = "q",
- [CLS_ENQUEUED] = "e",
- [CLS_HELD] = "h",
- [CLS_INTRANSIT] = "t",
- [CLS_CACHED] = "c",
- [CLS_FREEING] = "f"
- };
/*
lookup hit total busy create
pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
@@ -457,12 +436,6 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
seq_printf(m, "%s: %u ", pstate[i],
atomic_read(&site->cs_pages_state[i]));
seq_printf(m, "]\n");
- cache_stats_print(&site->cs_locks, m, 0);
- seq_printf(m, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
- seq_printf(m, "%s: %u ", lstate[i],
- atomic_read(&site->cs_locks_state[i]));
- seq_printf(m, "]\n");
cache_stats_print(&cl_env_stats, m, 0);
seq_printf(m, "\n");
return 0;
@@ -492,6 +465,13 @@ EXPORT_SYMBOL(cl_site_stats_print);
* bz20044, bz22683.
*/
+static LIST_HEAD(cl_envs);
+static unsigned int cl_envs_cached_nr;
+static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
+ * for now.
+ */
+static DEFINE_SPINLOCK(cl_envs_guard);
+
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
@@ -674,8 +654,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
lu_context_enter(&cle->ce_ses);
env->le_ses = &cle->ce_ses;
cl_env_init0(cle, debug);
- } else
+ } else {
lu_env_fini(env);
+ }
}
if (rc != 0) {
kmem_cache_free(cl_env_kmem, cle);
@@ -684,8 +665,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
CL_ENV_INC(create);
CL_ENV_INC(total);
}
- } else
+ } else {
env = ERR_PTR(-ENOMEM);
+ }
return env;
}
@@ -697,6 +679,39 @@ static void cl_env_fini(struct cl_env *cle)
kmem_cache_free(cl_env_kmem, cle);
}
+static struct lu_env *cl_env_obtain(void *debug)
+{
+ struct cl_env *cle;
+ struct lu_env *env;
+
+ spin_lock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ if (cl_envs_cached_nr > 0) {
+ int rc;
+
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ env = &cle->ce_lu;
+ rc = lu_env_refill(env);
+ if (rc == 0) {
+ cl_env_init0(cle, debug);
+ lu_context_enter(&env->le_ctx);
+ lu_context_enter(&cle->ce_ses);
+ } else {
+ cl_env_fini(cle);
+ env = ERR_PTR(rc);
+ }
+ } else {
+ spin_unlock(&cl_envs_guard);
+ env = cl_env_new(lu_context_tags_default,
+ lu_session_tags_default, debug);
+ }
+ return env;
+}
+
static inline struct cl_env *cl_env_container(struct lu_env *env)
{
return container_of(env, struct cl_env, ce_lu);
@@ -727,6 +742,8 @@ static struct lu_env *cl_env_peek(int *refcheck)
* Returns lu_env: if there already is an environment associated with the
* current thread, it is returned, otherwise, new environment is allocated.
*
+ * Allocations are amortized through the global cache of environments.
+ *
* \param refcheck pointer to a counter used to detect environment leaks. In
* the usual case cl_env_get() and cl_env_put() are called in the same lexical
* scope and pointer to the same integer is passed as \a refcheck. This is
@@ -740,10 +757,7 @@ struct lu_env *cl_env_get(int *refcheck)
env = cl_env_peek(refcheck);
if (!env) {
- env = cl_env_new(lu_context_tags_default,
- lu_session_tags_default,
- __builtin_return_address(0));
-
+ env = cl_env_obtain(__builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
@@ -787,6 +801,32 @@ static void cl_env_exit(struct cl_env *cle)
}
/**
+ * Finalizes and frees a given number of cached environments. This is done to
+ * (1) free some memory (not currently hooked into VM), or (2) release
+ * references to modules.
+ */
+unsigned int cl_env_cache_purge(unsigned int nr)
+{
+ struct cl_env *cle;
+
+ spin_lock(&cl_envs_guard);
+ for (; !list_empty(&cl_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs_cached_nr > 0);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ cl_env_fini(cle);
+ spin_lock(&cl_envs_guard);
+ }
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ spin_unlock(&cl_envs_guard);
+ return nr;
+}
+EXPORT_SYMBOL(cl_env_cache_purge);
+
+/**
* Release an environment.
*
* Decrement \a env reference counter. When counter drops to 0, nothing in
@@ -808,7 +848,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
cl_env_detach(cle);
cle->ce_debug = NULL;
cl_env_exit(cle);
- cl_env_fini(cle);
+ /*
+ * Don't bother to take a lock here.
+ *
+ * Return environment to the cache only when it was allocated
+ * with the standard tags.
+ */
+ if (cl_envs_cached_nr < cl_envs_cached_max &&
+ (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
+ (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
+ spin_lock(&cl_envs_guard);
+ list_add(&cle->ce_linkage, &cl_envs);
+ cl_envs_cached_nr++;
+ spin_unlock(&cl_envs_guard);
+ } else {
+ cl_env_fini(cle);
+ }
}
}
EXPORT_SYMBOL(cl_env_put);
@@ -914,6 +969,104 @@ void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
}
EXPORT_SYMBOL(cl_lvb2attr);
+static struct cl_env cl_env_percpu[NR_CPUS];
+
+static int cl_env_percpu_init(void)
+{
+ struct cl_env *cle;
+ int tags = LCT_REMEMBER | LCT_NOREF;
+ int i, j;
+ int rc = 0;
+
+ for_each_possible_cpu(i) {
+ struct lu_env *env;
+
+ cle = &cl_env_percpu[i];
+ env = &cle->ce_lu;
+
+ INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ rc = lu_env_init(env, LCT_CL_THREAD | tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ } else {
+ lu_env_fini(env);
+ }
+ }
+ if (rc != 0)
+ break;
+ }
+ if (rc != 0) {
+ /* Indices 0 to i (excluding i) were correctly initialized,
+ * thus we must uninitialize up to i, the rest are undefined.
+ */
+ for (j = 0; j < i; j++) {
+ cle = &cl_env_percpu[i];
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+ }
+
+ return rc;
+}
+
+static void cl_env_percpu_fini(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cl_env *cle = &cl_env_percpu[i];
+
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+}
+
+static void cl_env_percpu_refill(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ lu_env_refill(&cl_env_percpu[i].ce_lu);
+}
+
+void cl_env_percpu_put(struct lu_env *env)
+{
+ struct cl_env *cle;
+ int cpu;
+
+ cpu = smp_processor_id();
+ cle = cl_env_container(env);
+ LASSERT(cle == &cl_env_percpu[cpu]);
+
+ cle->ce_ref--;
+ LASSERT(cle->ce_ref == 0);
+
+ CL_ENV_DEC(busy);
+ cl_env_detach(cle);
+ cle->ce_debug = NULL;
+
+ put_cpu();
+}
+EXPORT_SYMBOL(cl_env_percpu_put);
+
+struct lu_env *cl_env_percpu_get(void)
+{
+ struct cl_env *cle;
+
+ cle = &cl_env_percpu[get_cpu()];
+ cl_env_init0(cle, __builtin_return_address(0));
+
+ cl_env_attach(cle);
+ return &cle->ce_lu;
+}
+EXPORT_SYMBOL(cl_env_percpu_get);
+
/*****************************************************************************
*
* Temporary prototype thing: mirror obd-devices into cl devices.
@@ -944,8 +1097,9 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
CERROR("can't init device '%s', %d\n", typename, rc);
d = ERR_PTR(rc);
}
- } else
+ } else {
CERROR("Cannot allocate device: '%s'\n", typename);
+ }
return lu2cl_dev(d);
}
EXPORT_SYMBOL(cl_type_setup);
@@ -959,12 +1113,6 @@ void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
}
EXPORT_SYMBOL(cl_stack_fini);
-int cl_lock_init(void);
-void cl_lock_fini(void);
-
-int cl_page_init(void);
-void cl_page_fini(void);
-
static struct lu_context_key cl_key;
struct cl_thread_info *cl_env_info(const struct lu_env *env)
@@ -1059,17 +1207,13 @@ int cl_global_init(void)
if (result)
goto out_kmem;
- result = cl_lock_init();
+ result = cl_env_percpu_init();
if (result)
+ /* no cl_env_percpu_fini on error */
goto out_context;
- result = cl_page_init();
- if (result)
- goto out_lock;
-
return 0;
-out_lock:
- cl_lock_fini();
+
out_context:
lu_context_key_degister(&cl_key);
out_kmem:
@@ -1084,8 +1228,7 @@ out_store:
*/
void cl_global_fini(void)
{
- cl_lock_fini();
- cl_page_fini();
+ cl_env_percpu_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
cl_env_store_fini();
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 394580016..b754f516e 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -36,6 +36,7 @@
* Client Lustre Page.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -48,8 +49,7 @@
#include "../include/cl_object.h"
#include "cl_internal.h"
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix);
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
# define PASSERT(env, page, expr) \
do { \
@@ -63,24 +63,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
/**
- * Internal version of cl_page_top, it should be called if the page is
- * known to be not freed, says with page referenced, or radix tree lock held,
- * or page owned.
- */
-static struct cl_page *cl_page_top_trusted(struct cl_page *page)
-{
- while (page->cp_parent)
- page = page->cp_parent;
- return page;
-}
-
-/**
* Internal version of cl_page_get().
*
* This function can be used to obtain initial reference to previously
* unreferenced cached object. It can be called only if concurrent page
- * reclamation is somehow prevented, e.g., by locking page radix-tree
- * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
+ * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
* associated with \a page.
*
* Use with care! Not exported.
@@ -103,142 +90,12 @@ cl_page_at_trusted(const struct cl_page *page,
{
const struct cl_page_slice *slice;
- page = cl_page_top_trusted((struct cl_page *)page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- return slice;
- }
- page = page->cp_child;
- } while (page);
- return NULL;
-}
-
-/**
- * Returns a page with given index in the given object, or NULL if no page is
- * found. Acquires a reference on \a page.
- *
- * Locking: called under cl_object_header::coh_page_guard spin-lock.
- */
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
-{
- struct cl_page *page;
-
- assert_spin_locked(&hdr->coh_page_guard);
-
- page = radix_tree_lookup(&hdr->coh_tree, index);
- if (page)
- cl_page_get_trust(page);
- return page;
-}
-EXPORT_SYMBOL(cl_page_lookup);
-
-/**
- * Returns a list of pages by a given [start, end] of \a obj.
- *
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
- * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
- * crucial in the face of [offset, EOF] locks.
- *
- * Return at least one page in @queue unless there is no covered page.
- */
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata)
-{
- struct cl_object_header *hdr;
- struct cl_page *page;
- struct cl_page **pvec;
- const struct cl_page_slice *slice;
- const struct lu_device_type *dtype;
- pgoff_t idx;
- unsigned int nr;
- unsigned int i;
- unsigned int j;
- int res = CLP_GANG_OKAY;
- int tree_lock = 1;
-
- idx = start;
- hdr = cl_object_header(obj);
- pvec = cl_env_info(env)->clt_pvec;
- dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
- spin_lock(&hdr->coh_page_guard);
- while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
- idx, CLT_PVEC_SIZE)) > 0) {
- int end_of_region = 0;
-
- idx = pvec[nr - 1]->cp_index + 1;
- for (i = 0, j = 0; i < nr; ++i) {
- page = pvec[i];
- pvec[i] = NULL;
-
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (page->cp_index > end) {
- end_of_region = 1;
- break;
- }
- if (page->cp_state == CPS_FREEING)
- continue;
-
- slice = cl_page_at_trusted(page, dtype);
- /*
- * Pages for lsm-less file has no underneath sub-page
- * for osc, in case of ...
- */
- PASSERT(env, page, slice);
-
- page = slice->cpl_page;
- /*
- * Can safely call cl_page_get_trust() under
- * radix-tree spin-lock.
- *
- * XXX not true, because @page is from object another
- * than @hdr and protected by different tree lock.
- */
- cl_page_get_trust(page);
- lu_ref_add_atomic(&page->cp_reference,
- "gang_lookup", current);
- pvec[j++] = page;
- }
-
- /*
- * Here a delicate locking dance is performed. Current thread
- * holds a reference to a page, but has to own it before it
- * can be placed into queue. Owning implies waiting, so
- * radix-tree lock is to be released. After a wait one has to
- * check that pages weren't truncated (cl_page_own() returns
- * error in the latter case).
- */
- spin_unlock(&hdr->coh_page_guard);
- tree_lock = 0;
-
- for (i = 0; i < j; ++i) {
- page = pvec[i];
- if (res == CLP_GANG_OKAY)
- res = (*cb)(env, io, page, cbdata);
- lu_ref_del(&page->cp_reference,
- "gang_lookup", current);
- cl_page_put(env, page);
- }
- if (nr < CLT_PVEC_SIZE || end_of_region)
- break;
-
- if (res == CLP_GANG_OKAY && need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
- break;
-
- spin_lock(&hdr->coh_page_guard);
- tree_lock = 1;
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
+ return slice;
}
- if (tree_lock)
- spin_unlock(&hdr->coh_page_guard);
- return res;
+ return NULL;
}
-EXPORT_SYMBOL(cl_page_gang_lookup);
static void cl_page_free(const struct lu_env *env, struct cl_page *page)
{
@@ -247,17 +104,16 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, !page->cp_owner);
PASSERT(env, page, !page->cp_req);
- PASSERT(env, page, !page->cp_parent);
PASSERT(env, page, page->cp_state == CPS_FREEING);
- might_sleep();
while (!list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
slice = list_entry(page->cp_layers.next,
struct cl_page_slice, cpl_linkage);
list_del_init(page->cp_layers.next);
- slice->cpl_ops->cpo_fini(env, slice);
+ if (unlikely(slice->cpl_ops->cpo_fini))
+ slice->cpl_ops->cpo_fini(env, slice);
}
lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
cl_object_put(env, obj);
@@ -276,10 +132,10 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
*(enum cl_page_state *)&page->cp_state = state;
}
-static struct cl_page *cl_page_alloc(const struct lu_env *env,
- struct cl_object *o, pgoff_t ind,
- struct page *vmpage,
- enum cl_page_type type)
+struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type)
{
struct cl_page *page;
struct lu_object_header *head;
@@ -289,13 +145,11 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
int result = 0;
atomic_set(&page->cp_ref, 1);
- if (type == CPT_CACHEABLE) /* for radix tree */
- atomic_inc(&page->cp_ref);
page->cp_obj = o;
cl_object_get(o);
lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
page);
- page->cp_index = ind;
+ page->cp_vmpage = vmpage;
cl_page_state_set_trust(page, CPS_CACHED);
page->cp_type = type;
INIT_LIST_HEAD(&page->cp_layers);
@@ -306,10 +160,10 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
head = o->co_lu.lo_header;
list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
if (o->co_ops->coo_page_init) {
- result = o->co_ops->coo_page_init(env, o,
- page, vmpage);
+ result = o->co_ops->coo_page_init(env, o, page,
+ ind);
if (result != 0) {
- cl_page_delete0(env, page, 0);
+ cl_page_delete0(env, page);
cl_page_free(env, page);
page = ERR_PTR(result);
break;
@@ -321,6 +175,7 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
}
return page;
}
+EXPORT_SYMBOL(cl_page_alloc);
/**
* Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -333,16 +188,13 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
*
* \see cl_object_find(), cl_lock_find()
*/
-static struct cl_page *cl_page_find0(const struct lu_env *env,
- struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type,
- struct cl_page *parent)
+struct cl_page *cl_page_find(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
{
struct cl_page *page = NULL;
- struct cl_page *ghost = NULL;
struct cl_object_header *hdr;
- int err;
LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
might_sleep();
@@ -368,120 +220,25 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
* reference on it.
*/
page = cl_vmpage_page(vmpage, o);
- PINVRNT(env, page,
- ergo(page,
- cl_page_vmpage(env, page) == vmpage &&
- (void *)radix_tree_lookup(&hdr->coh_tree,
- idx) == page));
- }
- if (page)
- return page;
+ if (page)
+ return page;
+ }
/* allocate and initialize cl_page */
page = cl_page_alloc(env, o, idx, vmpage, type);
- if (IS_ERR(page))
- return page;
-
- if (type == CPT_TRANSIENT) {
- if (parent) {
- LASSERT(!page->cp_parent);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- return page;
- }
-
- /*
- * XXX optimization: use radix_tree_preload() here, and change tree
- * gfp mask to GFP_KERNEL in cl_object_header_init().
- */
- spin_lock(&hdr->coh_page_guard);
- err = radix_tree_insert(&hdr->coh_tree, idx, page);
- if (err != 0) {
- ghost = page;
- /*
- * Noted by Jay: a lock on \a vmpage protects cl_page_find()
- * from this race, but
- *
- * 0. it's better to have cl_page interface "locally
- * consistent" so that its correctness can be reasoned
- * about without appealing to the (obscure world of) VM
- * locking.
- *
- * 1. handling this race allows ->coh_tree to remain
- * consistent even when VM locking is somehow busted,
- * which is very useful during diagnosing and debugging.
- */
- page = ERR_PTR(err);
- CL_PAGE_DEBUG(D_ERROR, env, ghost,
- "fail to insert into radix tree: %d\n", err);
- } else {
- if (parent) {
- LASSERT(!page->cp_parent);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- hdr->coh_pages++;
- }
- spin_unlock(&hdr->coh_page_guard);
-
- if (unlikely(ghost)) {
- cl_page_delete0(env, ghost, 0);
- cl_page_free(env, ghost);
- }
return page;
}
-
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
-{
- return cl_page_find0(env, o, idx, vmpage, type, NULL);
-}
EXPORT_SYMBOL(cl_page_find);
-struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent)
-{
- return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
-}
-EXPORT_SYMBOL(cl_page_find_sub);
-
static inline int cl_page_invariant(const struct cl_page *pg)
{
- struct cl_object_header *header;
- struct cl_page *parent;
- struct cl_page *child;
- struct cl_io *owner;
-
/*
* Page invariant is protected by a VM lock.
*/
LINVRNT(cl_page_is_vmlocked(NULL, pg));
- header = cl_object_header(pg->cp_obj);
- parent = pg->cp_parent;
- child = pg->cp_child;
- owner = pg->cp_owner;
-
- return cl_page_in_use(pg) &&
- ergo(parent, parent->cp_child == pg) &&
- ergo(child, child->cp_parent == pg) &&
- ergo(child, pg->cp_obj != child->cp_obj) &&
- ergo(parent, pg->cp_obj != parent->cp_obj) &&
- ergo(owner && parent,
- parent->cp_owner == pg->cp_owner->ci_parent) &&
- ergo(owner && child, child->cp_owner->ci_parent == owner) &&
- /*
- * Either page is early in initialization (has neither child
- * nor parent yet), or it is in the object radix tree.
- */
- ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
- (void *)radix_tree_lookup(&header->coh_tree,
- pg->cp_index) == pg ||
- (!child && !parent));
+ return cl_page_in_use_noref(pg);
}
static void cl_page_state_set0(const struct lu_env *env,
@@ -534,13 +291,9 @@ static void cl_page_state_set0(const struct lu_env *env,
old = page->cp_state;
PASSERT(env, page, allowed_transitions[old][state]);
CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
- for (; page; page = page->cp_child) {
- PASSERT(env, page, page->cp_state == old);
- PASSERT(env, page,
- equi(state == CPS_OWNED, page->cp_owner));
-
- cl_page_state_set_trust(page, state);
- }
+ PASSERT(env, page, page->cp_state == old);
+ PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner));
+ cl_page_state_set_trust(page, state);
}
static void cl_page_state_set(const struct lu_env *env,
@@ -574,8 +327,6 @@ EXPORT_SYMBOL(cl_page_get);
*/
void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
- PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
-
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
atomic_read(&page->cp_ref));
@@ -595,34 +346,10 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
EXPORT_SYMBOL(cl_page_put);
/**
- * Returns a VM page associated with a given cl_page.
- */
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- /*
- * Find uppermost layer with ->cpo_vmpage() method, and return its
- * result.
- */
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_ops->cpo_vmpage)
- return slice->cpl_ops->cpo_vmpage(env, slice);
- }
- page = page->cp_child;
- } while (page);
- LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
-}
-EXPORT_SYMBOL(cl_page_vmpage);
-
-/**
* Returns a cl_page associated with a VM page, and given cl_object.
*/
struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
{
- struct cl_page *top;
struct cl_page *page;
KLASSERT(PageLocked(vmpage));
@@ -633,36 +360,15 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
* bottom-to-top pass.
*/
- /*
- * This loop assumes that ->private points to the top-most page. This
- * can be rectified easily.
- */
- top = (struct cl_page *)vmpage->private;
- if (!top)
- return NULL;
-
- for (page = top; page; page = page->cp_child) {
- if (cl_object_same(page->cp_obj, obj)) {
- cl_page_get_trust(page);
- break;
- }
+ page = (struct cl_page *)vmpage->private;
+ if (page) {
+ cl_page_get_trust(page);
+ LASSERT(page->cp_type == CPT_CACHEABLE);
}
- LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
return page;
}
EXPORT_SYMBOL(cl_vmpage_page);
-/**
- * Returns the top-page for a given page.
- *
- * \see cl_object_top(), cl_io_top()
- */
-struct cl_page *cl_page_top(struct cl_page *page)
-{
- return cl_page_top_trusted(page);
-}
-EXPORT_SYMBOL(cl_page_top);
-
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype)
{
@@ -682,26 +388,43 @@ EXPORT_SYMBOL(cl_page_at);
int (*__method)_proto; \
\
__result = 0; \
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) { \
- __result = (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- __page = __page->cp_child; \
- } while (__page && __result == 0); \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) { \
+ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
if (__result > 0) \
__result = 0; \
__result; \
})
+#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
+({ \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ int __result; \
+ ptrdiff_t __op = (_op); \
+ int (*__method)_proto; \
+ \
+ __result = 0; \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) { \
+ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
+ if (__result > 0) \
+ __result = 0; \
+ __result; \
+})
+
#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
do { \
const struct lu_env *__env = (_env); \
@@ -710,18 +433,11 @@ do { \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_child; \
- } while (__page); \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) \
+ (*__method)(__env, __scan, ## __VA_ARGS__); \
+ } \
} while (0)
#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
@@ -732,20 +448,11 @@ do { \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- /* get to the bottom page. */ \
- while (__page->cp_child) \
- __page = __page->cp_child; \
- do { \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_parent; \
- } while (__page); \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) \
+ (*__method)(__env, __scan, ## __VA_ARGS__); \
+ } \
} while (0)
static int cl_page_invoke(const struct lu_env *env,
@@ -771,20 +478,17 @@ static void cl_page_invoid(const struct lu_env *env,
static void cl_page_owner_clear(struct cl_page *page)
{
- for (page = cl_page_top(page); page; page = page->cp_child) {
- if (page->cp_owner) {
- LASSERT(page->cp_owner->ci_owned_nr > 0);
- page->cp_owner->ci_owned_nr--;
- page->cp_owner = NULL;
- page->cp_task = NULL;
- }
+ if (page->cp_owner) {
+ LASSERT(page->cp_owner->ci_owned_nr > 0);
+ page->cp_owner->ci_owned_nr--;
+ page->cp_owner = NULL;
+ page->cp_task = NULL;
}
}
static void cl_page_owner_set(struct cl_page *page)
{
- for (page = cl_page_top(page); page; page = page->cp_child)
- page->cp_owner->ci_owned_nr++;
+ page->cp_owner->ci_owned_nr++;
}
void cl_page_disown0(const struct lu_env *env,
@@ -794,7 +498,7 @@ void cl_page_disown0(const struct lu_env *env,
state = pg->cp_state;
PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
- PINVRNT(env, pg, cl_page_invariant(pg));
+ PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
cl_page_owner_clear(pg);
if (state == CPS_OWNED)
@@ -815,8 +519,9 @@ void cl_page_disown0(const struct lu_env *env,
*/
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
{
+ struct cl_io *top = cl_io_top((struct cl_io *)io);
LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- return pg->cp_state == CPS_OWNED && pg->cp_owner == io;
+ return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
}
EXPORT_SYMBOL(cl_page_is_owned);
@@ -847,7 +552,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
PINVRNT(env, pg, !cl_page_is_owned(pg, io));
- pg = cl_page_top(pg);
io = cl_io_top(io);
if (pg->cp_state == CPS_FREEING) {
@@ -861,7 +565,7 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
if (result == 0) {
PASSERT(env, pg, !pg->cp_owner);
PASSERT(env, pg, !pg->cp_req);
- pg->cp_owner = io;
+ pg->cp_owner = cl_io_top(io);
pg->cp_task = current;
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
@@ -914,12 +618,11 @@ void cl_page_assume(const struct lu_env *env,
{
PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
PASSERT(env, pg, !pg->cp_owner);
- pg->cp_owner = io;
+ pg->cp_owner = cl_io_top(io);
pg->cp_task = current;
cl_page_owner_set(pg);
cl_page_state_set(env, pg, CPS_OWNED);
@@ -943,7 +646,6 @@ void cl_page_unassume(const struct lu_env *env,
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_owner_clear(pg);
cl_page_state_set(env, pg, CPS_CACHED);
@@ -968,9 +670,9 @@ EXPORT_SYMBOL(cl_page_unassume);
void cl_page_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
+ pg->cp_state == CPS_FREEING);
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_disown0(env, io, pg);
}
@@ -1001,12 +703,8 @@ EXPORT_SYMBOL(cl_page_discard);
* pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
* path. Doesn't check page invariant.
*/
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix)
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
{
- struct cl_page *tmp = pg;
-
- PASSERT(env, pg, pg == cl_page_top(pg));
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
/*
@@ -1014,41 +712,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
*/
cl_page_owner_clear(pg);
- /*
- * unexport the page firstly before freeing it so that
- * the page content is considered to be invalid.
- * We have to do this because a CPS_FREEING cl_page may
- * be NOT under the protection of a cl_lock.
- * Afterwards, if this page is found by other threads, then this
- * page will be forced to reread.
- */
- cl_page_export(env, pg, 0);
cl_page_state_set0(env, pg, CPS_FREEING);
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
- (const struct lu_env *, const struct cl_page_slice *));
-
- if (tmp->cp_type == CPT_CACHEABLE) {
- if (!radix)
- /* !radix means that @pg is not yet in the radix tree,
- * skip removing it.
- */
- tmp = pg->cp_child;
- for (; tmp; tmp = tmp->cp_child) {
- void *value;
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(tmp->cp_obj);
- spin_lock(&hdr->coh_page_guard);
- value = radix_tree_delete(&hdr->coh_tree,
- tmp->cp_index);
- PASSERT(env, tmp, value == tmp);
- PASSERT(env, tmp, hdr->coh_pages > 0);
- hdr->coh_pages--;
- spin_unlock(&hdr->coh_page_guard);
- cl_page_put(env, tmp);
- }
- }
+ CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
+ (const struct lu_env *,
+ const struct cl_page_slice *));
}
/**
@@ -1070,7 +738,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
* Once page reaches cl_page_state::CPS_FREEING, all remaining references will
* drain after some time, at which point page will be recycled.
*
- * \pre pg == cl_page_top(pg)
* \pre VM page is locked
* \post pg->cp_state == CPS_FREEING
*
@@ -1079,30 +746,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
{
PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_delete0(env, pg, 1);
+ cl_page_delete0(env, pg);
}
EXPORT_SYMBOL(cl_page_delete);
/**
- * Unmaps page from user virtual memory.
- *
- * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to unmap page from user space
- * virtual memory.
- *
- * \see cl_page_operations::cpo_unmap()
- */
-int cl_page_unmap(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
-}
-EXPORT_SYMBOL(cl_page_unmap);
-
-/**
* Marks page up-to-date.
*
* Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
@@ -1129,7 +777,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
int result;
const struct cl_page_slice *slice;
- pg = cl_page_top_trusted((struct cl_page *)pg);
slice = container_of(pg->cp_layers.next,
const struct cl_page_slice, cpl_linkage);
PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
@@ -1241,7 +888,7 @@ void cl_page_completion(const struct lu_env *env,
cl_page_put(env, pg);
if (anchor)
- cl_sync_io_note(anchor, ioret);
+ cl_sync_io_note(env, anchor, ioret);
}
EXPORT_SYMBOL(cl_page_completion);
@@ -1276,44 +923,6 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
EXPORT_SYMBOL(cl_page_make_ready);
/**
- * Notify layers that high level io decided to place this page into a cache
- * for future transfer.
- *
- * The layer implementing transfer engine (osc) has to register this page in
- * its queues.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_cache_add()
- */
-int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
-{
- const struct cl_page_slice *scan;
- int result = 0;
-
- PINVRNT(env, pg, crt < CRT_NR);
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- if (crt >= CRT_NR)
- return -EINVAL;
-
- list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
- if (!scan->cpl_ops->io[crt].cpo_cache_add)
- continue;
-
- result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
- if (result != 0)
- break;
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_cache_add);
-
-/**
* Called if a pge is being written back by kernel's intention.
*
* \pre cl_page_is_owned(pg, io)
@@ -1344,68 +953,21 @@ EXPORT_SYMBOL(cl_page_flush);
* \see cl_page_operations::cpo_is_under_lock()
*/
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+ struct cl_page *page, pgoff_t *max_index)
{
int rc;
PINVRNT(env, page, cl_page_invariant(page));
- rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- PASSERT(env, page, rc != 0);
+ rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
+ (const struct lu_env *,
+ const struct cl_page_slice *,
+ struct cl_io *, pgoff_t *),
+ io, max_index);
return rc;
}
EXPORT_SYMBOL(cl_page_is_under_lock);
-static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- cl_page_own(env, io, page);
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- return CLP_GANG_OKAY;
-}
-
-/**
- * Purges all cached pages belonging to the object \a obj.
- */
-int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
-{
- struct cl_thread_info *info;
- struct cl_object *obj = cl_object_top(clobj);
- struct cl_io *io;
- int result;
-
- info = cl_env_info(env);
- io = &info->clt_io;
-
- /*
- * initialize the io. This is ugly since we never do IO in this
- * function, we just make cl_page_list functions happy. -jay
- */
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, obj);
- if (result != 0) {
- cl_io_fini(env, io);
- return io->ci_result;
- }
-
- do {
- result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
- page_prune_cb, NULL);
- if (result == CLP_GANG_RESCHED)
- cond_resched();
- } while (result != CLP_GANG_OKAY);
-
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_pages_prune);
-
/**
* Tells transfer engine that only part of a page is to be transmitted.
*
@@ -1431,9 +993,8 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
+ "page@%p[%d %p %d %d %d %p %p %#x]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
- pg->cp_index, pg->cp_parent, pg->cp_child,
pg->cp_state, pg->cp_error, pg->cp_type,
pg->cp_owner, pg->cp_req, pg->cp_flags);
}
@@ -1445,11 +1006,7 @@ EXPORT_SYMBOL(cl_page_header_print);
void cl_page_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
- struct cl_page *scan;
-
- for (scan = cl_page_top((struct cl_page *)pg); scan;
- scan = scan->cp_child)
- cl_page_header_print(env, cookie, printer, scan);
+ cl_page_header_print(env, cookie, printer, pg);
CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
(const struct lu_env *env,
const struct cl_page_slice *slice,
@@ -1509,21 +1066,13 @@ EXPORT_SYMBOL(cl_page_size);
* \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
*/
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
+ struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops)
{
list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
+ slice->cpl_index = index;
slice->cpl_ops = ops;
slice->cpl_page = page;
}
EXPORT_SYMBOL(cl_page_slice_add);
-
-int cl_page_init(void)
-{
- return 0;
-}
-
-void cl_page_fini(void)
-{
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index c2cf01596..f48816af8 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -60,6 +60,8 @@ unsigned int obd_dump_on_eviction;
EXPORT_SYMBOL(obd_dump_on_eviction);
unsigned int obd_max_dirty_pages = 256;
EXPORT_SYMBOL(obd_max_dirty_pages);
+atomic_t obd_unstable_pages;
+EXPORT_SYMBOL(obd_unstable_pages);
atomic_t obd_dirty_pages;
EXPORT_SYMBOL(obd_dirty_pages);
unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
@@ -335,7 +337,6 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
err = 0;
goto out;
}
-
}
if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
@@ -461,7 +462,7 @@ static int obd_init_checks(void)
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
+ if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) {
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
(__u64)PAGE_SIZE);
ret = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 43a7f7a79..e4edfb2c0 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -68,8 +68,8 @@ int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id)
LASSERT(addr);
- ne_off = le64_to_cpu (off);
- id = le64_to_cpu (id);
+ ne_off = le64_to_cpu(off);
+ id = le64_to_cpu(id);
if (memcmp(addr, (char *)&ne_off, LPDS)) {
CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n",
who, id, off, *(__u64 *)addr, ne_off);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf97b8f06..d95f11d62 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -604,7 +604,6 @@ int obd_init_caches(void)
out:
obd_cleanup_caches();
return -ENOMEM;
-
}
/* map connection to client */
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 8eddf206f..2cd452246 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -158,9 +158,7 @@ int obd_ioctl_popdata(void __user *arg, void *data, int len)
{
int err;
- err = copy_to_user(arg, data, len);
- if (err)
- err = -EFAULT;
+ err = copy_to_user(arg, data, len) ? -EFAULT : 0;
return err;
}
EXPORT_SYMBOL(obd_ioctl_popdata);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 992573eae..79194d8cb 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -265,7 +265,6 @@ repeat:
for (rec = (struct llog_rec_hdr *)buf;
(char *)rec < buf + LLOG_CHUNK_SIZE;
rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
-
CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
rec, rec->lrh_type);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index d93f42fee..5a1eae1de 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -49,7 +49,7 @@
static const char * const obd_connect_names[] = {
"read_only",
"lov_index",
- "unused",
+ "connect_from_mds",
"write_grant",
"server_lock",
"version",
@@ -122,6 +122,56 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
}
EXPORT_SYMBOL(obd_connect_flags2str);
+static void obd_connect_data_seqprint(struct seq_file *m,
+ struct obd_connect_data *ocd)
+{
+ int flags;
+
+ LASSERT(ocd);
+ flags = ocd->ocd_connect_flags;
+
+ seq_printf(m, " connect_data:\n"
+ " flags: %llx\n"
+ " instance: %u\n",
+ ocd->ocd_connect_flags,
+ ocd->ocd_instance);
+ if (flags & OBD_CONNECT_VERSION)
+ seq_printf(m, " target_version: %u.%u.%u.%u\n",
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version));
+ if (flags & OBD_CONNECT_MDS)
+ seq_printf(m, " mdt_index: %d\n", ocd->ocd_group);
+ if (flags & OBD_CONNECT_GRANT)
+ seq_printf(m, " initial_grant: %d\n", ocd->ocd_grant);
+ if (flags & OBD_CONNECT_INDEX)
+ seq_printf(m, " target_index: %u\n", ocd->ocd_index);
+ if (flags & OBD_CONNECT_BRW_SIZE)
+ seq_printf(m, " max_brw_size: %d\n", ocd->ocd_brw_size);
+ if (flags & OBD_CONNECT_IBITS)
+ seq_printf(m, " ibits_known: %llx\n",
+ ocd->ocd_ibits_known);
+ if (flags & OBD_CONNECT_GRANT_PARAM)
+ seq_printf(m, " grant_block_size: %d\n"
+ " grant_inode_size: %d\n"
+ " grant_extent_overhead: %d\n",
+ ocd->ocd_blocksize,
+ ocd->ocd_inodespace,
+ ocd->ocd_grant_extent);
+ if (flags & OBD_CONNECT_TRANSNO)
+ seq_printf(m, " first_transno: %llx\n",
+ ocd->ocd_transno);
+ if (flags & OBD_CONNECT_CKSUM)
+ seq_printf(m, " cksum_types: %#x\n",
+ ocd->ocd_cksum_types);
+ if (flags & OBD_CONNECT_MAX_EASIZE)
+ seq_printf(m, " max_easize: %d\n", ocd->ocd_max_easize);
+ if (flags & OBD_CONNECT_MAXBYTES)
+ seq_printf(m, " max_object_bytes: %llx\n",
+ ocd->ocd_maxbytes);
+}
+
int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
int mult)
{
@@ -624,6 +674,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
struct obd_device *obd = data;
struct obd_import *imp;
struct obd_import_conn *conn;
+ struct obd_connect_data *ocd;
int j;
int k;
int rw = 0;
@@ -635,9 +686,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
return rc;
imp = obd->u.cli.cl_import;
+ ocd = &imp->imp_connect_data;
- seq_printf(m,
- "import:\n"
+ seq_printf(m, "import:\n"
" name: %s\n"
" target: %s\n"
" state: %s\n"
@@ -649,9 +700,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
imp->imp_connect_data.ocd_instance);
obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
", ");
- seq_printf(m,
- " ]\n"
- " import_flags: [ ");
+ seq_printf(m, " ]\n");
+ obd_connect_data_seqprint(m, ocd);
+ seq_printf(m, " import_flags: [ ");
obd_import_flags2str(imp, m);
seq_printf(m,
@@ -694,8 +745,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
do_div(sum, ret.lc_count);
ret.lc_sum = sum;
- } else
+ } else {
ret.lc_sum = 0;
+ }
seq_printf(m,
" rpcs:\n"
" inflight: %u\n"
@@ -1471,10 +1523,10 @@ EXPORT_SYMBOL(lprocfs_oh_tally);
void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
{
- unsigned int val;
+ unsigned int val = 0;
- for (val = 0; ((1 << val) < value) && (val <= OBD_HIST_MAX); val++)
- ;
+ if (likely(value != 0))
+ val = min(fls(value - 1), OBD_HIST_MAX);
lprocfs_oh_tally(oh, val);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 978568ada..e04385760 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -55,6 +55,7 @@
#include "../include/lustre_disk.h"
#include "../include/lustre_fid.h"
#include "../include/lu_object.h"
+#include "../include/cl_object.h"
#include "../include/lu_ref.h"
#include <linux/list.h>
@@ -103,7 +104,6 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
if (lu_object_is_dying(top)) {
-
/*
* somebody may be waiting for this, currently only
* used for cl_object, see cl_object_put_last().
@@ -357,7 +357,6 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
if (count > 0 && --count == 0)
break;
-
}
cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
cond_resched();
@@ -715,8 +714,9 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
obj = lu_object_locate(top->lo_header, dev->ld_type);
if (!obj)
lu_object_put(env, top);
- } else
+ } else {
obj = top;
+ }
return obj;
}
EXPORT_SYMBOL(lu_object_find_slice);
@@ -935,7 +935,7 @@ static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
* Initialize site \a s, with \a d as the top level device.
*/
#define LU_SITE_BITS_MIN 12
-#define LU_SITE_BITS_MAX 24
+#define LU_SITE_BITS_MAX 19
/**
* total 256 buckets, we don't want too many buckets because:
* - consume too much memory
@@ -1468,6 +1468,7 @@ void lu_context_key_quiesce(struct lu_context_key *key)
/*
* XXX layering violation.
*/
+ cl_env_cache_purge(~0);
key->lct_tags |= LCT_QUIESCENT;
/*
* XXX memory barrier has to go here.
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5f812460b..b1abe023b 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -163,8 +163,9 @@ int class_del_uuid(const char *uuid)
break;
}
}
- } else
+ } else {
list_splice_init(&g_uuid_list, &deathrow);
+ }
spin_unlock(&g_uuid_lock);
if (uuid && list_empty(&deathrow)) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 5395e994d..cb1d65c3d 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -606,7 +606,7 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
return rc;
}
-LIST_HEAD(lustre_profile_list);
+static LIST_HEAD(lustre_profile_list);
struct lustre_profile *class_get_profile(const char *prof)
{
@@ -961,7 +961,6 @@ int class_process_config(struct lustre_cfg *lcfg)
default: {
err = obd_process_config(obd, sizeof(*lcfg), lcfg);
goto out;
-
}
}
out:
@@ -1001,7 +1000,13 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
for (i = 1; i < lcfg->lcfg_bufcount; i++) {
key = lustre_cfg_buf(lcfg, i);
/* Strip off prefix */
- class_match_param(key, prefix, &key);
+ if (class_match_param(key, prefix, &key)) {
+ /*
+ * If the prefix doesn't match, return error so we
+ * can pass it down the stack
+ */
+ return -ENOSYS;
+ }
sval = strchr(key, '=');
if (!sval || (*(sval + 1) == 0)) {
CERROR("Can't parse param %s (missing '=')\n", key);
@@ -1034,18 +1039,14 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
j++;
}
if (!matched) {
- /* If the prefix doesn't match, return error so we
- * can pass it down the stack
- */
- if (strnchr(key, keylen, '.'))
- return -ENOSYS;
- CERROR("%s: unknown param %s\n",
+ CERROR("%.*s: %s unknown param %s\n",
+ (int)strlen(prefix) - 1, prefix,
(char *)lustre_cfg_string(lcfg, 0), key);
/* rc = -EINVAL; continue parsing other params */
skip++;
} else if (rc < 0) {
- CERROR("writing proc entry %s err %d\n",
- var->name, rc);
+ CERROR("%s: error writing proc entry '%s': rc = %d\n",
+ prefix, var->name, rc);
rc = 0;
} else {
CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
@@ -1350,6 +1351,7 @@ static int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf,
lustre_cfg_string(lcfg, i));
}
}
+ ptr += snprintf(ptr, end - ptr, "\n");
/* return consumed bytes */
rc = ptr - buf;
return rc;
@@ -1368,7 +1370,7 @@ int class_config_dump_handler(const struct lu_env *env,
if (rec->lrh_type == OBD_CFG_REC) {
class_config_parse_rec(rec, outstr, 256);
- LCONSOLE(D_WARNING, " %s\n", outstr);
+ LCONSOLE(D_WARNING, " %s", outstr);
} else {
LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type);
rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index d3e28a389..e0c90adc7 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -102,7 +102,7 @@ int lustre_process_log(struct super_block *sb, char *logname,
LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
mgc->obd_name, logname, rc);
- if (rc)
+ else if (rc)
LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
mgc->obd_name, logname,
rc);
@@ -307,7 +307,8 @@ int lustre_start_mgc(struct super_block *sb)
while (class_parse_nid(ptr, &nid, &ptr) == 0) {
rc = do_lcfg(mgcname, nid,
LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
- i++;
+ if (!rc)
+ i++;
/* Stop at the first failover nid */
if (*ptr == ':')
break;
@@ -345,16 +346,18 @@ int lustre_start_mgc(struct super_block *sb)
sprintf(niduuid, "%s_%x", mgcname, i);
j = 0;
while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) {
- j++;
- rc = do_lcfg(mgcname, nid,
- LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
+ rc = do_lcfg(mgcname, nid, LCFG_ADD_UUID, niduuid,
+ NULL, NULL, NULL);
+ if (!rc)
+ ++j;
if (*ptr == ':')
break;
}
if (j > 0) {
rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN,
niduuid, NULL, NULL, NULL);
- i++;
+ if (!rc)
+ i++;
} else {
/* at ":/fsname" */
break;
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index e6436cb4a..748e33f01 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -185,8 +185,7 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid)
op_data->op_attr.ia_valid |= ATTR_BLOCKS;
}
if (valid & OBD_MD_FLFLAGS) {
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- oa->o_flags;
+ op_data->op_attr_flags = oa->o_flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
}
}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 1e83669c2..91ef06f17 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -81,7 +81,6 @@ struct echo_object_conf {
struct echo_page {
struct cl_page_slice ep_cl;
struct mutex ep_lock;
- struct page *ep_vmpage;
};
struct echo_lock {
@@ -164,15 +163,13 @@ static int cl_echo_object_put(struct echo_object *eco);
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct page **pages, int npages, int async);
-static struct echo_thread_info *echo_env_info(const struct lu_env *env);
-
struct echo_thread_info {
struct echo_object_conf eti_conf;
struct lustre_md eti_md;
struct cl_2queue eti_queue;
struct cl_io eti_io;
- struct cl_lock_descr eti_descr;
+ struct cl_lock eti_lock;
struct lu_fid eti_fid;
struct lu_fid eti_fid2;
};
@@ -219,12 +216,6 @@ static struct lu_kmem_descr echo_caches[] = {
*
* @{
*/
-static struct page *echo_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2echo_page(slice)->ep_vmpage;
-}
-
static int echo_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *io, int nonblock)
@@ -273,12 +264,10 @@ static void echo_page_completion(const struct lu_env *env,
static void echo_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct echo_page *ep = cl2echo_page(slice);
struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
- struct page *vmpage = ep->ep_vmpage;
atomic_dec(&eco->eo_npages);
- put_page(vmpage);
+ put_page(slice->cpl_page->cp_vmpage);
}
static int echo_page_prep(const struct lu_env *env,
@@ -295,7 +284,8 @@ static int echo_page_print(const struct lu_env *env,
struct echo_page *ep = cl2echo_page(slice);
(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+ ep, mutex_is_locked(&ep->ep_lock),
+ slice->cpl_page->cp_vmpage);
return 0;
}
@@ -303,7 +293,6 @@ static const struct cl_page_operations echo_page_ops = {
.cpo_own = echo_page_own,
.cpo_disown = echo_page_disown,
.cpo_discard = echo_page_discard,
- .cpo_vmpage = echo_page_vmpage,
.cpo_fini = echo_page_fini,
.cpo_print = echo_page_print,
.cpo_is_vmlocked = echo_page_is_vmlocked,
@@ -336,26 +325,8 @@ static void echo_lock_fini(const struct lu_env *env,
kmem_cache_free(echo_lock_kmem, ecl);
}
-static void echo_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct echo_lock *ecl = cl2echo_lock(slice);
-
- LASSERT(list_empty(&ecl->el_chain));
-}
-
-static int echo_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *unused)
-{
- return 1;
-}
-
static struct cl_lock_operations echo_lock_ops = {
.clo_fini = echo_lock_fini,
- .clo_delete = echo_lock_delete,
- .clo_fits_into = echo_lock_fits_into
};
/** @} echo_lock */
@@ -367,15 +338,14 @@ static struct cl_lock_operations echo_lock_ops = {
* @{
*/
static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
- ep->ep_vmpage = vmpage;
- get_page(vmpage);
+ get_page(page->cp_vmpage);
mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+ cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
atomic_inc(&eco->eo_npages);
return 0;
}
@@ -568,6 +538,8 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
obj = &echo_obj2cl(eco)->co_lu;
cl_object_header_init(hdr);
+ hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
lu_object_init(obj, &hdr->coh_lu, dev);
lu_object_add_top(&hdr->coh_lu, obj);
@@ -694,8 +666,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
struct obd_device *obd = NULL; /* to keep compiler happy */
struct obd_device *tgt;
const char *tgt_type_name;
- int rc;
- int cleanup = 0;
+ int rc, err;
ed = kzalloc(sizeof(*ed), GFP_NOFS);
if (!ed) {
@@ -703,16 +674,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
goto out;
}
- cleanup = 1;
cd = &ed->ed_cl;
rc = cl_device_init(cd, t);
if (rc)
- goto out;
+ goto out_free;
cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
cd->cd_ops = &echo_device_cl_ops;
- cleanup = 2;
obd = class_name2obd(lustre_cfg_string(cfg, 0));
LASSERT(obd);
LASSERT(env);
@@ -722,28 +691,25 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
CERROR("Can not find tgt device %s\n",
lustre_cfg_string(cfg, 1));
rc = -ENODEV;
- goto out;
+ goto out_device_fini;
}
next = tgt->obd_lu_dev;
if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
CERROR("echo MDT client must be run on server\n");
rc = -EOPNOTSUPP;
- goto out;
+ goto out_device_fini;
}
rc = echo_site_init(env, ed);
if (rc)
- goto out;
-
- cleanup = 3;
+ goto out_device_fini;
rc = echo_client_setup(env, obd, cfg);
if (rc)
- goto out;
+ goto out_site_fini;
ed->ed_ec = &obd->u.echo_client;
- cleanup = 4;
/* if echo client is to be stacked upon ost device, the next is
* NULL since ost is not a clio device so far
@@ -755,7 +721,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
if (next) {
if (next->ld_site) {
rc = -EBUSY;
- goto out;
+ goto out_cleanup;
}
next->ld_site = &ed->ed_site->cs_lu;
@@ -763,7 +729,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
next->ld_type->ldt_name,
NULL);
if (rc)
- goto out;
+ goto out_cleanup;
} else {
LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
@@ -771,27 +737,19 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
ed->ed_next = next;
return &cd->cd_lu_dev;
-out:
- switch (cleanup) {
- case 4: {
- int rc2;
-
- rc2 = echo_client_cleanup(obd);
- if (rc2)
- CERROR("Cleanup obd device %s error(%d)\n",
- obd->obd_name, rc2);
- }
- case 3:
- echo_site_fini(env, ed);
- case 2:
- cl_device_fini(&ed->ed_cl);
- case 1:
- kfree(ed);
- case 0:
- default:
- break;
- }
+out_cleanup:
+ err = echo_client_cleanup(obd);
+ if (err)
+ CERROR("Cleanup obd device %s error(%d)\n",
+ obd->obd_name, err);
+out_site_fini:
+ echo_site_fini(env, ed);
+out_device_fini:
+ cl_device_fini(&ed->ed_cl);
+out_free:
+ kfree(ed);
+out:
return ERR_PTR(rc);
}
@@ -819,16 +777,7 @@ static void echo_lock_release(const struct lu_env *env,
{
struct cl_lock *clk = echo_lock2cl(ecl);
- cl_lock_get(clk);
- cl_unuse(env, clk);
- cl_lock_release(env, clk, "ec enqueue", ecl->el_object);
- if (!still_used) {
- cl_lock_mutex_get(env, clk);
- cl_lock_cancel(env, clk);
- cl_lock_delete(env, clk);
- cl_lock_mutex_put(env, clk);
- }
- cl_lock_put(env, clk);
+ cl_lock_release(env, clk);
}
static struct lu_device *echo_device_free(const struct lu_env *env,
@@ -1022,9 +971,11 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
info = echo_env_info(env);
io = &info->eti_io;
- descr = &info->eti_descr;
+ lck = &info->eti_lock;
obj = echo_obj2cl(eco);
+ memset(lck, 0, sizeof(*lck));
+ descr = &lck->cll_descr;
descr->cld_obj = obj;
descr->cld_start = cl_index(obj, start);
descr->cld_end = cl_index(obj, end);
@@ -1032,25 +983,20 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
descr->cld_enq_flags = enqflags;
io->ci_obj = obj;
- lck = cl_lock_request(env, io, descr, "ec enqueue", eco);
- if (lck) {
+ rc = cl_lock_request(env, io, lck);
+ if (rc == 0) {
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
struct echo_lock *el;
- rc = cl_wait(env, lck);
- if (rc == 0) {
- el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- spin_lock(&ec->ec_lock);
- if (list_empty(&el->el_chain)) {
- list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- spin_unlock(&ec->ec_lock);
- } else {
- cl_lock_release(env, lck, "ec enqueue", current);
+ el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
+ spin_lock(&ec->ec_lock);
+ if (list_empty(&el->el_chain)) {
+ list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
}
+ atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
}
return rc;
}
@@ -1085,22 +1031,17 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
return 0;
}
-static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type unused, struct cl_2queue *queue)
+static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
{
- struct cl_page *clp;
- struct cl_page *temp;
- int result = 0;
+ struct echo_thread_info *info;
+ struct cl_2queue *queue;
- cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
- int rc;
+ info = echo_env_info(env);
+ LASSERT(io == &info->eti_io);
- rc = cl_page_cache_add(env, io, clp, CRT_WRITE);
- if (rc == 0)
- continue;
- result = result ?: rc;
- }
- return result;
+ queue = &info->eti_queue;
+ cl_page_list_add(&queue->c2_qout, page);
}
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
@@ -1119,7 +1060,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
int rc;
int i;
- LASSERT((offset & ~CFS_PAGE_MASK) == 0);
+ LASSERT((offset & ~PAGE_MASK) == 0);
LASSERT(ed->ed_next);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -1179,7 +1120,9 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
async = async && (typ == CRT_WRITE);
if (async)
- rc = cl_echo_async_brw(env, io, typ, queue);
+ rc = cl_io_commit_async(env, io, &queue->c2_qin,
+ 0, PAGE_SIZE,
+ echo_commit_callback);
else
rc = cl_io_submit_sync(env, io, typ, queue, 0);
CDEBUG(D_INFO, "echo_client %s write returns %d\n",
@@ -1387,7 +1330,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
if (count <= 0 ||
- (count & (~CFS_PAGE_MASK)) != 0)
+ (count & (~PAGE_MASK)) != 0)
return -EINVAL;
/* XXX think again with misaligned I/O */
@@ -1409,7 +1352,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
for (i = 0, pgp = pga, off = offset;
i < npages;
i++, pgp++, off += PAGE_SIZE) {
-
LASSERT(!pgp->pg); /* for cleanup */
rc = -ENOMEM;
@@ -1470,7 +1412,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
u64 npages, tot_pages;
int i, ret = 0, brw_flags = 0;
- if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
+ if (count <= 0 || (count & (~PAGE_MASK)) != 0)
return -EINVAL;
npages = batch >> PAGE_SHIFT;
@@ -1886,7 +1828,6 @@ static int __init obdecho_init(void)
static void /*__exit*/ obdecho_exit(void)
{
echo_client_exit();
-
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index a3358c39b..33a113213 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -121,9 +121,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
atomic_add(added, &osc_pool_req_count);
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
@@ -139,9 +139,9 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj,
long val;
int mult;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
val = cli->cl_dirty_max;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
mult = 1 << 20;
return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult);
@@ -169,10 +169,10 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
@@ -222,8 +222,16 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
return -ERANGE;
rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
- if (rc > 0)
- (void)osc_lru_shrink(cli, rc);
+ if (rc > 0) {
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ (void)osc_lru_shrink(env, cli, rc, true);
+ cl_env_put(env, &refcheck);
+ }
+ }
return count;
}
@@ -239,9 +247,9 @@ static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_dirty);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -256,9 +264,9 @@ static ssize_t cur_grant_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_avail_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -279,12 +287,12 @@ static ssize_t cur_grant_bytes_store(struct kobject *kobj,
return rc;
/* this is only for shrinking grant */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (val >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return -EINVAL;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
rc = osc_shrink_grant_to_target(cli, val);
@@ -303,9 +311,9 @@ static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_lost_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -577,14 +585,31 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
return -ERANGE;
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_pages_per_rpc = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
LUSTRE_RW_ATTR(max_pages_per_rpc);
+static ssize_t unstable_stats_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ struct client_obd *cli = &dev->u.cli;
+ int pages, mb;
+
+ pages = atomic_read(&cli->cl_unstable_count);
+ mb = (pages * PAGE_SIZE) >> 20;
+
+ return sprintf(buf, "unstable_pages: %8d\n"
+ "unstable_mb: %8d\n", pages, mb);
+}
+LUSTRE_RO_ATTR(unstable_stats);
+
LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
@@ -623,7 +648,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
ktime_get_real_ts64(&now);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
(s64)now.tv_sec, (unsigned long)now.tv_nsec);
@@ -707,7 +732,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
break;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
@@ -794,6 +819,7 @@ static struct attribute *osc_attrs[] = {
&lustre_attr_max_pages_per_rpc.attr,
&lustre_attr_max_rpcs_in_flight.attr,
&lustre_attr_resend_count.attr,
+ &lustre_attr_unstable_stats.attr,
NULL,
};
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5f25bf83d..5a14bea96 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -76,6 +76,8 @@ static inline char *ext_flags(struct osc_extent *ext, char *flags)
*buf++ = ext->oe_rw ? 'r' : 'w';
if (ext->oe_intree)
*buf++ = 'i';
+ if (ext->oe_sync)
+ *buf++ = 'S';
if (ext->oe_srvlock)
*buf++ = 's';
if (ext->oe_hp)
@@ -121,9 +123,13 @@ static const char *oes_strings[] = {
__ext->oe_grants, __ext->oe_nr_pages, \
list_empty_marker(&__ext->oe_pages), \
waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
- __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
+ __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \
/* ----- part 4 ----- */ \
## __VA_ARGS__); \
+ if (lvl == D_ERROR && __ext->oe_dlmlock) \
+ LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ else \
+ LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
} while (0)
#undef EASSERTF
@@ -240,20 +246,25 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
goto out;
}
- if (!ext->oe_osclock && ext->oe_grants > 0) {
+ if (ext->oe_sync && ext->oe_grants > 0) {
rc = 90;
goto out;
}
- if (ext->oe_osclock) {
- struct cl_lock_descr *descr;
+ if (ext->oe_dlmlock) {
+ struct ldlm_extent *extent;
- descr = &ext->oe_osclock->cll_descr;
- if (!(descr->cld_start <= ext->oe_start &&
- descr->cld_end >= ext->oe_max_end)) {
+ extent = &ext->oe_dlmlock->l_policy_data.l_extent;
+ if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
+ extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end))) {
rc = 100;
goto out;
}
+
+ if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))) {
+ rc = 102;
+ goto out;
+ }
}
if (ext->oe_nr_pages > ext->oe_mppr) {
@@ -276,7 +287,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
page_count = 0;
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- pgoff_t index = oap2cl_page(oap)->cp_index;
+ pgoff_t index = osc_index(oap2osc(oap));
++page_count;
if (index > ext->oe_end || index < ext->oe_start) {
rc = 110;
@@ -359,7 +370,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
ext->oe_state = OES_INV;
INIT_LIST_HEAD(&ext->oe_pages);
init_waitqueue_head(&ext->oe_waitq);
- ext->oe_osclock = NULL;
+ ext->oe_dlmlock = NULL;
return ext;
}
@@ -385,9 +396,11 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
LASSERT(ext->oe_state == OES_INV);
LASSERT(!ext->oe_intree);
- if (ext->oe_osclock) {
- cl_lock_put(env, ext->oe_osclock);
- ext->oe_osclock = NULL;
+ if (ext->oe_dlmlock) {
+ lu_ref_add(&ext->oe_dlmlock->l_reference,
+ "osc_extent", ext);
+ LDLM_LOCK_PUT(ext->oe_dlmlock);
+ ext->oe_dlmlock = NULL;
}
osc_extent_free(ext);
}
@@ -543,7 +556,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
if (cur->oe_max_end != victim->oe_max_end)
return -ERANGE;
- LASSERT(cur->oe_osclock == victim->oe_osclock);
+ LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
@@ -624,10 +637,10 @@ static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
static struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
int *grants)
-
{
struct client_obd *cli = osc_cli(obj);
- struct cl_lock *lock;
+ struct osc_lock *olck;
+ struct cl_lock_descr *descr;
struct osc_extent *cur;
struct osc_extent *ext;
struct osc_extent *conflict = NULL;
@@ -644,8 +657,12 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
if (!cur)
return ERR_PTR(-ENOMEM);
- lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
+ olck = osc_env_io(env)->oi_write_osclock;
+ LASSERTF(olck, "page %lu is not covered by lock\n", index);
+ LASSERT(olck->ols_state == OLS_GRANTED);
+
+ descr = &olck->ols_cl.cls_lock->cll_descr;
+ LASSERT(descr->cld_mode >= CLM_WRITE);
LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
@@ -657,19 +674,23 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
max_pages = cli->cl_max_pages_per_rpc;
LASSERT((max_pages & ~chunk_mask) == 0);
max_end = index - (index % max_pages) + max_pages - 1;
- max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
+ max_end = min_t(pgoff_t, max_end, descr->cld_end);
/* initialize new extent by parameters so far */
cur->oe_max_end = max_end;
cur->oe_start = index & chunk_mask;
cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
- if (cur->oe_start < lock->cll_descr.cld_start)
- cur->oe_start = lock->cll_descr.cld_start;
+ if (cur->oe_start < descr->cld_start)
+ cur->oe_start = descr->cld_start;
if (cur->oe_end > max_end)
cur->oe_end = max_end;
- cur->oe_osclock = lock;
cur->oe_grants = 0;
cur->oe_mppr = max_pages;
+ if (olck->ols_dlmlock) {
+ LASSERT(olck->ols_hold);
+ cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock);
+ lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur);
+ }
/* grants has been allocated by caller */
LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
@@ -691,7 +712,7 @@ restart:
break;
/* if covering by different locks, no chance to match */
- if (lock != ext->oe_osclock) {
+ if (olck->ols_dlmlock != ext->oe_dlmlock) {
EASSERTF(!overlapped(ext, cur), ext,
EXTSTR"\n", EXTPARA(cur));
@@ -795,7 +816,7 @@ restart:
if (found) {
LASSERT(!conflict);
if (!IS_ERR(found)) {
- LASSERT(found->oe_osclock == cur->oe_osclock);
+ LASSERT(found->oe_dlmlock == cur->oe_dlmlock);
OSC_EXTENT_DUMP(D_CACHE, found,
"found caching ext for %lu.\n", index);
}
@@ -810,7 +831,7 @@ restart:
found = osc_extent_hold(cur);
osc_extent_insert(obj, cur);
OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
- index, lock->cll_descr.cld_end);
+ index, descr->cld_end);
}
osc_object_unlock(obj);
@@ -856,6 +877,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
ext->oe_rc = rc ?: ext->oe_nr_pages;
EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
+
+ osc_lru_add_batch(cli, &ext->oe_pages);
list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
list_del_init(&oap->oap_rpc_item);
list_del_init(&oap->oap_pending_item);
@@ -877,10 +900,9 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check.
*/
- int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
- int count = oap->oap_count + (offset & (blocksize - 1));
- int end = (offset + oap->oap_count) & (blocksize - 1);
-
+ int offset = last_off & ~PAGE_MASK;
+ int count = last_count + (offset & (blocksize - 1));
+ int end = (offset + last_count) & (blocksize - 1);
if (end)
count += blocksize - end;
@@ -943,7 +965,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
"%s: wait ext to %d timedout, recovery in progress?\n",
osc_export(obj)->exp_obd->obd_name, state);
- lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
&lwi);
}
@@ -990,19 +1012,19 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
/* discard all pages with index greater then trunc_index */
list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
- struct cl_page *sub = oap2cl_page(oap);
- struct cl_page *page = cl_page_top(sub);
+ pgoff_t index = osc_index(oap2osc(oap));
+ struct cl_page *page = oap2cl_page(oap);
LASSERT(list_empty(&oap->oap_rpc_item));
/* only discard the pages with their index greater than
* trunc_index, and ...
*/
- if (sub->cp_index < trunc_index ||
- (sub->cp_index == trunc_index && partial)) {
+ if (index < trunc_index ||
+ (index == trunc_index && partial)) {
/* accounting how many pages remaining in the chunk
* so that we can calculate grants correctly. */
- if (sub->cp_index >> ppc_bits == trunc_chunk)
+ if (index >> ppc_bits == trunc_chunk)
++pages_in_chunk;
continue;
}
@@ -1013,7 +1035,6 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
lu_ref_add(&page->cp_reference, "truncate", current);
if (cl_page_own(env, io, page) == 0) {
- cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
} else {
@@ -1126,7 +1147,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last->oap_count > 0);
LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
+ spin_lock(&last->oap_lock);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&last->oap_lock);
}
/* for the rest of pages, we don't need to call osf_refresh_count()
@@ -1135,7 +1158,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
oap->oap_count = PAGE_SIZE - oap->oap_page_off;
+ spin_lock(&last->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&last->oap_lock);
}
}
@@ -1256,7 +1281,7 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
int cmd)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ struct cl_page *page = oap2cl_page(oap);
int result;
LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
@@ -1271,7 +1296,7 @@ static int osc_refresh_count(const struct lu_env *env,
struct osc_async_page *oap, int cmd)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = oap2cl_page(oap);
+ pgoff_t index = osc_index(oap2osc(oap));
struct cl_object *obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
@@ -1288,10 +1313,10 @@ static int osc_refresh_count(const struct lu_env *env,
if (result < 0)
return result;
kms = attr->cat_kms;
- if (cl_offset(obj, page->cp_index) >= kms)
+ if (cl_offset(obj, index) >= kms)
/* catch race with truncate */
return 0;
- else if (cl_offset(obj, page->cp_index + 1) > kms)
+ else if (cl_offset(obj, index + 1) > kms)
/* catch sub-page write at end of file */
return kms % PAGE_SIZE;
else
@@ -1302,14 +1327,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
int cmd, int rc)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ struct cl_page *page = oap2cl_page(oap);
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
cmd &= ~OBD_BRW_NOQUOTA;
- LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
- LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
+ LASSERTF(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ),
+ "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
+ LASSERTF(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE),
+ "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
LASSERT(opg->ops_transfer_pinned);
/*
@@ -1358,22 +1385,28 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
return 0;
}
-#define OSC_DUMP_GRANT(cli, fmt, args...) do { \
+#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
- "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
+ "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
+ "reserved: %ld, flight: %d } lru {in list: %d, " \
+ "left: %d, waiters: %d }" fmt, \
__tmp->cl_import->imp_obd->obd_name, \
__tmp->cl_dirty, __tmp->cl_dirty_max, \
atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
+ atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
- __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
+ __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
+ atomic_read(&__tmp->cl_lru_in_list), \
+ atomic_read(&__tmp->cl_lru_busy), \
+ atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
/* caller must hold loi_list_lock */
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_SIZE;
@@ -1389,7 +1422,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
return;
}
@@ -1408,7 +1441,7 @@ static void osc_release_write_grant(struct client_obd *cli,
* To avoid sleeping with object lock held, it's good for us allocate enough
* grants before entering into critical section.
*
- * client_obd_list_lock held by caller
+ * spin_lock held by caller
*/
static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
{
@@ -1442,11 +1475,11 @@ static void __osc_unreserve_grant(struct client_obd *cli,
static void osc_unreserve_grant(struct client_obd *cli,
unsigned int reserved, unsigned int unused)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
__osc_unreserve_grant(cli, reserved, unused);
if (unused > 0)
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
@@ -1467,7 +1500,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
{
int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
atomic_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty -= nr_pages << PAGE_SHIFT;
cli->cl_lost_grant += lost_grant;
@@ -1479,7 +1512,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
cli->cl_avail_grant += grant;
}
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
lost_grant, cli->cl_lost_grant,
cli->cl_avail_grant, cli->cl_dirty);
@@ -1491,9 +1524,9 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
*/
static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_release_write_grant(cli, &oap->oap_brw_page);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
@@ -1506,14 +1539,15 @@ static int osc_enter_cache_try(struct client_obd *cli,
{
int rc;
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
rc = osc_reserve_grant(cli, bytes);
if (rc < 0)
return 0;
if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
+ atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
cli->cl_dirty_transit += PAGE_SIZE;
@@ -1532,9 +1566,9 @@ static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&ocw->ocw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
@@ -1551,12 +1585,13 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc = oap->oap_obj;
struct lov_oinfo *loi = osc->oo_oinfo;
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
+ LWI_ON_SIGNAL_NOOP, NULL);
int rc = -EDQUOT;
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* force the caller to try sync io. this can jump the list
* of queued writes and create a discontiguous rpc stream
@@ -1587,7 +1622,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
ocw.ocw_rc = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug_async(env, cli, NULL);
@@ -1596,10 +1631,17 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
- /* l_wait_event is interrupted by signal */
+ /* l_wait_event is interrupted by signal, or timed out */
if (rc < 0) {
+ if (rc == -ETIMEDOUT) {
+ OSC_DUMP_GRANT(D_ERROR, cli,
+ "try to reserve %d.\n", bytes);
+ osc_extent_tree_dump(D_ERROR, osc);
+ rc = -EDQUOT;
+ }
+
list_del_init(&ocw.ocw_entry);
goto out;
}
@@ -1615,8 +1657,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
}
}
out:
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
+ spin_unlock(&cli->cl_loi_list_lock);
+ OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
return rc;
}
@@ -1633,8 +1675,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_dirty_pages) + 1 >
- obd_max_dirty_pages)) {
+ (atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
cli->cl_dirty,
cli->cl_dirty_max, obd_max_dirty_pages);
@@ -1776,9 +1818,9 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
{
int is_ready;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
is_ready = __osc_list_maint(cli, osc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return is_ready;
}
@@ -1799,13 +1841,101 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
ar->ar_force_sync = 1;
ar->ar_min_xid = ptlrpc_sample_next_xid();
return;
-
}
if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
ar->ar_force_sync = 0;
}
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ int page_count = desc->bd_iov_count;
+ int i;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache)
+ return;
+
+ LASSERT(page_count >= 0);
+
+ for (i = 0; i < page_count; i++)
+ dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+
+ atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+
+ atomic_sub(page_count, &cli->cl_unstable_count);
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+
+ atomic_sub(page_count, &obd_unstable_pages);
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+
+ spin_lock(&req->rq_lock);
+ req->rq_committed = 1;
+ req->rq_unstable = 0;
+ spin_unlock(&req->rq_lock);
+
+ wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+}
+
+/* "unstable" page accounting. See: osc_dec_unstable_pages. */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ long page_count = desc->bd_iov_count;
+ int i;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache)
+ return;
+
+ LASSERT(page_count >= 0);
+
+ for (i = 0; i < page_count; i++)
+ inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+ atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+ atomic_add(page_count, &cli->cl_unstable_count);
+
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+ atomic_add(page_count, &obd_unstable_pages);
+
+ spin_lock(&req->rq_lock);
+
+ /*
+ * If the request has already been committed (i.e. brw_commit
+ * called via rq_commit_cb), we need to undo the unstable page
+ * increments we just performed because rq_commit_cb wont be
+ * called again. Otherwise, just set the commit callback so the
+ * unstable page accounting is properly updated when the request
+ * is committed
+ */
+ if (req->rq_committed) {
+ /* Drop lock before calling osc_dec_unstable_pages */
+ spin_unlock(&req->rq_lock);
+ osc_dec_unstable_pages(req);
+ spin_lock(&req->rq_lock);
+ } else {
+ req->rq_unstable = 1;
+ req->rq_commit_cb = osc_dec_unstable_pages;
+ }
+
+ spin_unlock(&req->rq_lock);
+}
+
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request
*/
@@ -1817,6 +1947,9 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
__u64 xid = 0;
if (oap->oap_request) {
+ if (!rc)
+ osc_inc_unstable_pages(oap->oap_request);
+
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = NULL;
@@ -1829,10 +1962,10 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_process_ar(&cli->cl_ar, xid, rc);
osc_process_ar(&loi->loi_ar, xid, rc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
rc = osc_completion(env, oap, oap->oap_cmd, rc);
@@ -2133,9 +2266,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
}
cl_object_get(obj);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- lu_object_ref_add_at(&obj->co_lu, &link, "check",
- current);
+ spin_unlock(&cli->cl_loi_list_lock);
+ lu_object_ref_add_at(&obj->co_lu, &link, "check", current);
/* attempt some read/write balancing by alternating between
* reads and writes in an object. The makes_rpc checks here
@@ -2178,11 +2310,10 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
osc_object_unlock(osc);
osc_list_maint(cli, osc);
- lu_object_ref_del_at(&obj->co_lu, &link, "check",
- current);
+ lu_object_ref_del_at(&obj->co_lu, &link, "check", current);
cl_object_put(env, obj);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
}
}
@@ -2199,9 +2330,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
* potential stack overrun problem. LU-2859
*/
atomic_inc(&cli->cl_lru_shrinkers);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
@@ -2238,7 +2369,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_page = page;
oap->oap_obj_off = offset;
- LASSERT(!(offset & ~CFS_PAGE_MASK));
+ LASSERT(!(offset & ~PAGE_MASK));
if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
@@ -2306,16 +2437,23 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
return rc;
}
+ if (osc_over_unstable_soft_limit(cli))
+ brw_flags |= OBD_BRW_SOFT_SYNC;
+
oap->oap_cmd = cmd;
oap->oap_page_off = ops->ops_from;
oap->oap_count = ops->ops_to - ops->ops_from;
+ /*
+ * No need to hold a lock here,
+ * since this page is not in any list yet.
+ */
oap->oap_async_flags = 0;
oap->oap_brw_flags = brw_flags;
OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
- index = oap2cl_page(oap)->cp_index;
+ index = osc_index(oap2osc(oap));
/* Add this page into extent by the following steps:
* 1. if there exists an active extent for this IO, mostly this page
@@ -2334,9 +2472,9 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
grants = 0;
/* it doesn't need any grant to dirty this page */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = osc_enter_cache_try(cli, oap, grants, 0);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (rc == 0) { /* try failed */
grants = 0;
need_release = 1;
@@ -2427,21 +2565,21 @@ int osc_teardown_async_page(const struct lu_env *env,
LASSERT(oap->oap_magic == OAP_MAGIC);
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
- oap, ops, oap2cl_page(oap)->cp_index);
+ oap, ops, osc_index(oap2osc(oap)));
osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
- ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
+ ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details.
*/
if (ext && ext->oe_state != OES_TRUNC) {
OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
- oap2cl_page(oap)->cp_index);
+ osc_index(oap2osc(oap)));
rc = -EBUSY;
}
}
@@ -2464,7 +2602,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
struct osc_extent *ext = NULL;
struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
struct cl_page *cp = ops->ops_cl.cpl_page;
- pgoff_t index = cp->cp_index;
+ pgoff_t index = osc_index(ops);
struct osc_async_page *oap = &ops->ops_oap;
bool unplug = false;
int rc = 0;
@@ -2479,8 +2617,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
switch (ext->oe_state) {
case OES_RPC:
case OES_LOCK_DONE:
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
- "flush an in-rpc page?\n");
+ CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n");
LASSERT(0);
break;
case OES_LOCKING:
@@ -2506,7 +2643,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
break;
}
- rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
+ rc = cl_page_prep(env, io, cp, CRT_WRITE);
if (rc)
goto out;
@@ -2550,7 +2687,7 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
struct osc_extent *ext;
struct osc_extent *found = NULL;
struct list_head *plist;
- pgoff_t index = oap2cl_page(oap)->cp_index;
+ pgoff_t index = osc_index(ops);
int rc = -EBUSY;
int cmd;
@@ -2613,12 +2750,12 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
pgoff_t end = 0;
list_for_each_entry(oap, list, oap_pending_item) {
- struct cl_page *cp = oap2cl_page(oap);
+ pgoff_t index = osc_index(oap2osc(oap));
- if (cp->cp_index > end)
- end = cp->cp_index;
- if (cp->cp_index < start)
- start = cp->cp_index;
+ if (index > end)
+ end = index;
+ if (index < start)
+ start = index;
++page_count;
mppr <<= (page_count > mppr);
}
@@ -2633,6 +2770,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
}
ext->oe_rw = !!(cmd & OBD_BRW_READ);
+ ext->oe_sync = 1;
ext->oe_urgent = 1;
ext->oe_start = start;
ext->oe_end = ext->oe_max_end = end;
@@ -2988,7 +3126,200 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
result = rc;
}
- OSC_IO_DEBUG(obj, "cache page out.\n");
+ OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result);
+ return result;
+}
+
+/**
+ * Returns a list of pages by a given [start, end] of \a obj.
+ *
+ * \param resched If not NULL, then we give up before hogging CPU for too
+ * long and set *resched = 1, in that case caller should implement a retry
+ * logic.
+ *
+ * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
+ * crucial in the face of [offset, EOF] locks.
+ *
+ * Return at least one page in @queue unless there is no covered page.
+ */
+int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata)
+{
+ struct osc_page *ops;
+ void **pvec;
+ pgoff_t idx;
+ unsigned int nr;
+ unsigned int i;
+ unsigned int j;
+ int res = CLP_GANG_OKAY;
+ bool tree_lock = true;
+
+ idx = start;
+ pvec = osc_env_info(env)->oti_pvec;
+ spin_lock(&osc->oo_tree_lock);
+ while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
+ idx, OTI_PVEC_SIZE)) > 0) {
+ struct cl_page *page;
+ bool end_of_region = false;
+
+ for (i = 0, j = 0; i < nr; ++i) {
+ ops = pvec[i];
+ pvec[i] = NULL;
+
+ idx = osc_index(ops);
+ if (idx > end) {
+ end_of_region = true;
+ break;
+ }
+
+ page = ops->ops_cl.cpl_page;
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ if (page->cp_state == CPS_FREEING)
+ continue;
+
+ cl_page_get(page);
+ lu_ref_add_atomic(&page->cp_reference,
+ "gang_lookup", current);
+ pvec[j++] = ops;
+ }
+ ++idx;
+
+ /*
+ * Here a delicate locking dance is performed. Current thread
+ * holds a reference to a page, but has to own it before it
+ * can be placed into queue. Owning implies waiting, so
+ * radix-tree lock is to be released. After a wait one has to
+ * check that pages weren't truncated (cl_page_own() returns
+ * error in the latter case).
+ */
+ spin_unlock(&osc->oo_tree_lock);
+ tree_lock = false;
+
+ for (i = 0; i < j; ++i) {
+ ops = pvec[i];
+ if (res == CLP_GANG_OKAY)
+ res = (*cb)(env, io, ops, cbdata);
+
+ page = ops->ops_cl.cpl_page;
+ lu_ref_del(&page->cp_reference, "gang_lookup", current);
+ cl_page_put(env, page);
+ }
+ if (nr < OTI_PVEC_SIZE || end_of_region)
+ break;
+
+ if (res == CLP_GANG_OKAY && need_resched())
+ res = CLP_GANG_RESCHED;
+ if (res != CLP_GANG_OKAY)
+ break;
+
+ spin_lock(&osc->oo_tree_lock);
+ tree_lock = true;
+ }
+ if (tree_lock)
+ spin_unlock(&osc->oo_tree_lock);
+ return res;
+}
+
+/**
+ * Check if page @page is covered by an extra lock or discard it.
+ */
+static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct osc_object *osc = cbdata;
+ pgoff_t index;
+
+ index = osc_index(ops);
+ if (index >= info->oti_fn_index) {
+ struct ldlm_lock *tmp;
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ /* refresh non-overlapped index */
+ tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
+ if (tmp) {
+ __u64 end = tmp->l_policy_data.l_extent.end;
+ /* Cache the first-non-overlapped index so as to skip
+ * all pages within [index, oti_fn_index). This is safe
+ * because if tmp lock is canceled, it will discard
+ * these pages.
+ */
+ info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
+ if (end == OBD_OBJECT_EOF)
+ info->oti_fn_index = CL_PAGE_EOF;
+ LDLM_LOCK_PUT(tmp);
+ } else if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+ }
+
+ info->oti_next_index = index + 1;
+ return CLP_GANG_OKAY;
+}
+
+static int discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ /* page is top page. */
+ info->oti_next_index = osc_index(ops) + 1;
+ if (cl_page_own(env, io, page) == 0) {
+ KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
+ !PageDirty(cl_page_vmpage(page))));
+
+ /* discard the page */
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+
+ return CLP_GANG_OKAY;
+}
+
+/**
+ * Discard pages protected by the given lock. This function traverses radix
+ * tree to find all covering pages and discard them. If a page is being covered
+ * by other locks, it should remain in cache.
+ *
+ * If error happens on any step, the process continues anyway (the reasoning
+ * behind this being that lock cancellation cannot be delayed indefinitely).
+ */
+int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
+ pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct cl_io *io = &info->oti_io;
+ osc_page_gang_cbt cb;
+ int res;
+ int result;
+
+ io->ci_obj = cl_object_top(osc2cl(osc));
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result != 0)
+ goto out;
+
+ cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ info->oti_fn_index = info->oti_next_index = start;
+ do {
+ res = osc_page_gang_lookup(env, io, osc,
+ info->oti_next_index, end, cb, osc);
+ if (info->oti_next_index > end)
+ break;
+
+ if (res == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (res != CLP_GANG_OKAY);
+out:
+ cl_io_fini(env, io);
return result;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index d55d04d04..ae19d396b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -51,7 +51,6 @@
#include "../include/obd.h"
/* osc_build_res_name() */
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "osc_internal.h"
/** \defgroup osc osc
@@ -68,6 +67,9 @@ struct osc_io {
struct cl_io_slice oi_cl;
/** true if this io is lockless. */
int oi_lockless;
+ /** how many LRU pages are reserved for this IO */
+ int oi_lru_reserved;
+
/** active extents, we know how many bytes is going to be written,
* so having an active extent will prevent it from being fragmented
*/
@@ -77,6 +79,8 @@ struct osc_io {
*/
struct osc_extent *oi_trunc;
+ /** write osc_lock for this IO, used by osc_extent_find(). */
+ struct osc_lock *oi_write_osclock;
struct obd_info oi_info;
struct obdo oi_oa;
struct osc_async_cbargs {
@@ -100,7 +104,7 @@ struct osc_session {
struct osc_io os_io;
};
-#define OTI_PVEC_SIZE 64
+#define OTI_PVEC_SIZE 256
struct osc_thread_info {
struct ldlm_res_id oti_resname;
ldlm_policy_data_t oti_policy;
@@ -109,7 +113,13 @@ struct osc_thread_info {
struct lustre_handle oti_handle;
struct cl_page_list oti_plist;
struct cl_io oti_io;
- struct cl_page *oti_pvec[OTI_PVEC_SIZE];
+ void *oti_pvec[OTI_PVEC_SIZE];
+ /**
+ * Fields used by cl_lock_discard_pages().
+ */
+ pgoff_t oti_next_index;
+ pgoff_t oti_fn_index; /* first non-overlapped index */
+ struct cl_sync_io oti_anchor;
};
struct osc_object {
@@ -125,7 +135,7 @@ struct osc_object {
*/
struct list_head oo_inflight[CRT_NR];
/**
- * Lock, protecting ccc_object::cob_inflight, because a seat-belt is
+ * Lock, protecting osc_page::ops_inflight, because a seat-belt is
* locked during take-off and landing.
*/
spinlock_t oo_seatbelt;
@@ -159,6 +169,17 @@ struct osc_object {
* oo_{read|write}_pages soon.
*/
spinlock_t oo_lock;
+
+ /**
+ * Radix tree for caching pages
+ */
+ struct radix_tree_root oo_tree;
+ spinlock_t oo_tree_lock;
+ unsigned long oo_npages;
+
+ /* Protect osc_lock this osc_object has */
+ spinlock_t oo_ol_spin;
+ struct list_head oo_ol_list;
};
static inline void osc_object_lock(struct osc_object *obj)
@@ -198,8 +219,6 @@ enum osc_lock_state {
OLS_ENQUEUED,
OLS_UPCALL_RECEIVED,
OLS_GRANTED,
- OLS_RELEASED,
- OLS_BLOCKED,
OLS_CANCELLED
};
@@ -208,10 +227,8 @@ enum osc_lock_state {
*
* Interaction with DLM.
*
- * CLIO enqueues all DLM locks through ptlrpcd (that is, in "async" mode).
- *
* Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
- * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_lock.
+ * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
*
* This pointer is protected through a reference, acquired by
* osc_lock_upcall0(). Also, an additional reference is acquired by
@@ -249,26 +266,27 @@ enum osc_lock_state {
*/
struct osc_lock {
struct cl_lock_slice ols_cl;
+ /** Internal lock to protect states, etc. */
+ spinlock_t ols_lock;
+ /** Owner sleeps on this channel for state change */
+ struct cl_sync_io *ols_owner;
+ /** waiting list for this lock to be cancelled */
+ struct list_head ols_waiting_list;
+ /** wait entry of ols_waiting_list */
+ struct list_head ols_wait_entry;
+ /** list entry for osc_object::oo_ol_list */
+ struct list_head ols_nextlock_oscobj;
+
/** underlying DLM lock */
- struct ldlm_lock *ols_lock;
- /** lock value block */
- struct ost_lvb ols_lvb;
+ struct ldlm_lock *ols_dlmlock;
/** DLM flags with which osc_lock::ols_lock was enqueued */
__u64 ols_flags;
/** osc_lock::ols_lock handle */
struct lustre_handle ols_handle;
struct ldlm_enqueue_info ols_einfo;
enum osc_lock_state ols_state;
-
- /**
- * How many pages are using this lock for io, currently only used by
- * read-ahead. If non-zero, the underlying dlm lock won't be cancelled
- * during recovery to avoid deadlock. see bz16774.
- *
- * \see osc_page::ops_lock
- * \see osc_page_addref_lock(), osc_page_putref_lock()
- */
- atomic_t ols_pageref;
+ /** lock value block */
+ struct ost_lvb ols_lvb;
/**
* true, if ldlm_lock_addref() was called against
@@ -299,16 +317,6 @@ struct osc_lock {
*/
ols_locklessable:1,
/**
- * set by osc_lock_use() to wait until blocking AST enters into
- * osc_ldlm_blocking_ast0(), so that cl_lock mutex can be used for
- * further synchronization.
- */
- ols_ast_wait:1,
- /**
- * If the data of this lock has been flushed to server side.
- */
- ols_flush:1,
- /**
* if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
* the EVAVAIL error as tolerable, this will make upper logic happy
* to wait all glimpse locks to each OSTs to be completed.
@@ -321,15 +329,6 @@ struct osc_lock {
* For async glimpse lock.
*/
ols_agl:1;
- /**
- * IO that owns this lock. This field is used for a dead-lock
- * avoidance by osc_lock_enqueue_wait().
- *
- * XXX: unfortunately, the owner of a osc_lock is not unique,
- * the lock may have multiple users, if the lock is granted and
- * then matched.
- */
- struct osc_io *ols_owner;
};
/**
@@ -369,18 +368,15 @@ struct osc_page {
* Set if the page must be transferred with OBD_BRW_SRVLOCK.
*/
ops_srvlock:1;
- union {
- /**
- * lru page list. ops_inflight and ops_lru are exclusive so
- * that they can share the same data.
- */
- struct list_head ops_lru;
- /**
- * Linkage into a per-osc_object list of pages in flight. For
- * debugging.
- */
- struct list_head ops_inflight;
- };
+ /**
+ * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
+ */
+ struct list_head ops_lru;
+ /**
+ * Linkage into a per-osc_object list of pages in flight. For
+ * debugging.
+ */
+ struct list_head ops_inflight;
/**
* Thread that submitted this page for transfer. For debugging.
*/
@@ -389,16 +385,6 @@ struct osc_page {
* Submit time - the time when the page is starting RPC. For debugging.
*/
unsigned long ops_submit_time;
-
- /**
- * A lock of which we hold a reference covers this page. Only used by
- * read-ahead: for a readahead page, we hold it's covering lock to
- * prevent it from being canceled during recovery.
- *
- * \see osc_lock::ols_pageref
- * \see osc_page_addref_lock(), osc_page_putref_lock().
- */
- struct cl_lock *ops_lock;
};
extern struct kmem_cache *osc_lock_kmem;
@@ -417,21 +403,22 @@ extern struct lu_context_key osc_session_key;
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
-int osc_io_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int osc_req_init (const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
+int osc_io_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+int osc_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
struct lu_object *osc_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t ind);
-void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end);
-int osc_lvb_print (const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb);
+void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+ pgoff_t start, pgoff_t end);
+int osc_lvb_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct ost_lvb *lvb);
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags);
int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
@@ -441,6 +428,8 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct page *page, loff_t offset);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops);
+int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io);
int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
struct osc_page *ops);
int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
@@ -457,12 +446,13 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
pgoff_t start, pgoff_t end);
void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc);
+int lru_queue_work(const struct lu_env *env, void *data);
-void osc_object_set_contended (struct osc_object *obj);
+void osc_object_set_contended(struct osc_object *obj);
void osc_object_clear_contended(struct osc_object *obj);
-int osc_object_is_contended (struct osc_object *obj);
+int osc_object_is_contended(struct osc_object *obj);
-int osc_lock_is_lockless (const struct osc_lock *olck);
+int osc_lock_is_lockless(const struct osc_lock *olck);
/*****************************************************************************
*
@@ -558,6 +548,11 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap)
return container_of0(oap, struct osc_page, ops_oap);
}
+static inline pgoff_t osc_index(struct osc_page *opg)
+{
+ return opg->ops_cl.cpl_index;
+}
+
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
{
return oap2osc(oap)->ops_cl.cpl_page;
@@ -608,7 +603,7 @@ enum osc_extent_state {
*
* LOCKING ORDER
* =============
- * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
+ * page lock -> cl_loi_list_lock -> object lock(osc_object::oo_lock)
*/
struct osc_extent {
/** red-black tree node */
@@ -627,6 +622,8 @@ struct osc_extent {
unsigned int oe_intree:1,
/** 0 is write, 1 is read */
oe_rw:1,
+ /** sync extent, queued by osc_queue_sync_pages() */
+ oe_sync:1,
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
@@ -675,7 +672,7 @@ struct osc_extent {
*/
wait_queue_head_t oe_waitq;
/** lock covering this extent */
- struct cl_lock *oe_osclock;
+ struct ldlm_lock *oe_dlmlock;
/** terminator of this extent. Must be true if this extent is in IO. */
struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
@@ -690,6 +687,14 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
int sent, int rc);
void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
+int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
+ pgoff_t start, pgoff_t end, enum cl_lock_mode mode);
+
+typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
+ struct osc_page *, void *);
+int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata);
/** @} osc */
#endif /* OSC_CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index ea695c209..7fad82781 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -83,6 +83,12 @@ struct osc_async_page {
#define oap_count oap_brw_page.count
#define oap_brw_flags oap_brw_page.flag
+static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
+{
+ return (struct osc_async_page *)container_of(pga, struct osc_async_page,
+ oap_brw_page);
+}
+
struct osc_cache_waiter {
struct list_head ocw_entry;
wait_queue_head_t ocw_waitq;
@@ -102,12 +108,14 @@ void osc_update_next_shrink(struct client_obd *cli);
extern struct ptlrpc_request_set *PTLRPCD_SET;
+typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
+ int rc);
+
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall,
+ osc_enqueue_upcall_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
struct ptlrpc_request_set *rqset, int async, int agl);
int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
@@ -130,9 +138,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd);
-int osc_lru_shrink(struct client_obd *cli, int target);
+int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ int target, bool force);
+int osc_lru_reclaim(struct client_obd *cli);
-extern spinlock_t osc_ast_guard;
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
@@ -173,8 +183,6 @@ static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev);
}
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm);
-
extern struct kmem_cache *osc_quota_kmem;
struct osc_quota_info {
/** linkage for quota hash table */
@@ -192,5 +200,12 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl);
int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
+void osc_inc_unstable_pages(struct ptlrpc_request *req);
+void osc_dec_unstable_pages(struct ptlrpc_request *req);
+int osc_over_unstable_soft_limit(struct client_obd *cli);
+
+struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj, pgoff_t index,
+ int pending, int canceling);
#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 6bd0a45d8..d534b0e0e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -68,11 +68,15 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
return oio;
}
-static struct osc_page *osc_cl_page_osc(struct cl_page *page)
+static struct osc_page *osc_cl_page_osc(struct cl_page *page,
+ struct osc_object *osc)
{
const struct cl_page_slice *slice;
- slice = cl_page_at(page, &osc_device_type);
+ if (osc)
+ slice = cl_object_page_slice(&osc->oo_cl, page);
+ else
+ slice = cl_page_at(page, &osc_device_type);
LASSERT(slice);
return cl2osc_page(slice);
@@ -137,7 +141,7 @@ static int osc_io_submit(const struct lu_env *env,
io = page->cp_owner;
LASSERT(io);
- opg = osc_cl_page_osc(page);
+ opg = osc_cl_page_osc(page, osc);
oap = &opg->ops_oap;
LASSERT(osc == oap->oap_obj);
@@ -164,8 +168,10 @@ static int osc_io_submit(const struct lu_env *env,
}
cl_page_list_move(qout, qin, page);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
osc_page_submit(env, opg, crt, brw_flags);
list_add_tail(&oap->oap_pending_item, &list);
@@ -185,6 +191,13 @@ static int osc_io_submit(const struct lu_env *env,
return qout->pl_nr > 0 ? 0 : result;
}
+/**
+ * This is called when a page is accessed within file in a way that creates
+ * new page, if one were missing (i.e., if there were a hole at that place in
+ * the file, or accessed page is beyond the current file size).
+ *
+ * Expand stripe KMS if necessary.
+ */
static void osc_page_touch_at(const struct lu_env *env,
struct cl_object *obj, pgoff_t idx, unsigned to)
{
@@ -208,7 +221,8 @@ static void osc_page_touch_at(const struct lu_env *env,
kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
loi->loi_lvb.lvb_size);
- valid = 0;
+ attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ valid = CAT_MTIME | CAT_CTIME;
if (kms > loi->loi_kms) {
attr->cat_kms = kms;
valid |= CAT_KMS;
@@ -221,91 +235,128 @@ static void osc_page_touch_at(const struct lu_env *env,
cl_object_attr_unlock(obj);
}
-/**
- * This is called when a page is accessed within file in a way that creates
- * new page, if one were missing (i.e., if there were a hole at that place in
- * the file, or accessed page is beyond the current file size). Examples:
- * ->commit_write() and ->nopage() methods.
- *
- * Expand stripe KMS if necessary.
- */
-static void osc_page_touch(const struct lu_env *env,
- struct osc_page *opage, unsigned to)
-{
- struct cl_page *page = opage->ops_cl.cpl_page;
- struct cl_object *obj = opage->ops_cl.cpl_obj;
-
- osc_page_touch_at(env, obj, page->cp_index, to);
-}
-
-/**
- * Implements cl_io_operations::cio_prepare_write() method for osc layer.
- *
- * \retval -EIO transfer initiated against this osc will most likely fail
- * \retval 0 transfer initiated against this osc will most likely succeed.
- *
- * The reason for this check is to immediately return an error to the caller
- * in the case of a deactivated import. Note, that import can be deactivated
- * later, while pages, dirtied by this IO, are still in the cache, but this is
- * irrelevant, because that would still return an error to the application (if
- * it does fsync), but many applications don't do fsync because of performance
- * issues, and we wanted to return an -EIO at write time to notify the
- * application.
- */
-static int osc_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
+static int osc_io_commit_async(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct cl_page_list *qin, int from, int to,
+ cl_commit_cbt cb)
{
- struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
- struct obd_import *imp = class_exp2cliimp(dev->od_exp);
+ struct cl_io *io = ios->cis_io;
struct osc_io *oio = cl2osc_io(env, ios);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct cl_page *page;
+ struct cl_page *last_page;
+ struct osc_page *opg;
int result = 0;
- /*
- * This implements OBD_BRW_CHECK logic from old client.
- */
+ LASSERT(qin->pl_nr > 0);
+
+ /* Handle partial page cases */
+ last_page = cl_page_list_last(qin);
+ if (oio->oi_lockless) {
+ page = cl_page_list_first(qin);
+ if (page == last_page) {
+ cl_page_clip(env, page, from, to);
+ } else {
+ if (from != 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE)
+ cl_page_clip(env, last_page, 0, to);
+ }
+ }
+
+ while (qin->pl_nr > 0) {
+ struct osc_async_page *oap;
+
+ page = cl_page_list_first(qin);
+ opg = osc_cl_page_osc(page, osc);
+ oap = &opg->ops_oap;
+
+ if (!list_empty(&oap->oap_rpc_item)) {
+ CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
+ oap, opg);
+ result = -EBUSY;
+ break;
+ }
+
+ /* The page may be already in dirty cache. */
+ if (list_empty(&oap->oap_pending_item)) {
+ result = osc_page_cache_add(env, &opg->ops_cl, io);
+ if (result != 0)
+ break;
+ }
+
+ osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
+ page == last_page ? to : PAGE_SIZE);
+
+ cl_page_list_del(env, qin, page);
- if (!imp || imp->imp_invalid)
- result = -EIO;
- if (result == 0 && oio->oi_lockless)
- /* this page contains `invalid' data, but who cares?
- * nobody can access the invalid data.
- * in osc_io_commit_write(), we're going to write exact
- * [from, to) bytes of this page to OST. -jay
+ (*cb)(env, io, page);
+ /* Can't access page any more. Page can be in transfer and
+ * complete at any time.
*/
- cl_page_export(env, slice->cpl_page, 1);
+ }
+ /* for sync write, kernel will wait for this page to be flushed before
+ * osc_io_end() is called, so release it earlier.
+ * for mkwrite(), it's known there is no further pages.
+ */
+ if (cl_io_is_sync_write(io) && oio->oi_active) {
+ osc_extent_release(env, oio->oi_active);
+ oio->oi_active = NULL;
+ }
+
+ CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
return result;
}
-static int osc_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
+static int osc_io_rw_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
{
- struct osc_io *oio = cl2osc_io(env, ios);
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- struct osc_async_page *oap = &opg->ops_oap;
+ struct cl_io *io = ios->cis_io;
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct client_obd *cli = osc_cli(osc);
+ unsigned long c;
+ unsigned int npages;
+ unsigned int max_pages;
+
+ if (cl_io_is_append(io))
+ return 0;
+
+ npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
+ if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
+ ++npages;
+
+ max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+ if (npages > max_pages)
+ npages = max_pages;
+
+ c = atomic_read(cli->cl_lru_left);
+ if (c < npages && osc_lru_reclaim(cli) > 0)
+ c = atomic_read(cli->cl_lru_left);
+ while (c >= npages) {
+ if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ oio->oi_lru_reserved = npages;
+ break;
+ }
+ c = atomic_read(cli->cl_lru_left);
+ }
- LASSERT(to > 0);
- /*
- * XXX instead of calling osc_page_touch() here and in
- * osc_io_fault_start() it might be more logical to introduce
- * cl_page_touch() method, that generic cl_io_commit_write() and page
- * fault code calls.
- */
- osc_page_touch(env, cl2osc_page(slice), to);
- if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE))
- oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
+ return 0;
+}
- if (oio->oi_lockless)
- /* see osc_io_prepare_write() for lockless io handling. */
- cl_page_clip(env, slice->cpl_page, from, to);
+static void osc_io_rw_iter_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct client_obd *cli = osc_cli(osc);
- return 0;
+ if (oio->oi_lru_reserved > 0) {
+ atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+ oio->oi_lru_reserved = 0;
+ }
+ oio->oi_write_osclock = NULL;
}
static int osc_io_fault_start(const struct lu_env *env,
@@ -342,31 +393,21 @@ static int osc_async_upcall(void *a, int rc)
* Checks that there are no pages being written in the extent being truncated.
*/
static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
+ struct osc_page *ops, void *cbdata)
{
- const struct cl_page_slice *slice;
- struct osc_page *ops;
+ struct cl_page *page = ops->ops_cl.cpl_page;
struct osc_async_page *oap;
__u64 start = *(__u64 *)cbdata;
- slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice);
- ops = cl2osc_page(slice);
oap = &ops->ops_oap;
-
if (oap->oap_cmd & OBD_BRW_WRITE &&
!list_empty(&oap->oap_pending_item))
CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
start, current->comm);
- {
- struct page *vmpage = cl_page_vmpage(env, page);
-
- if (PageLocked(vmpage))
- CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
- ops, page->cp_index,
- (oap->oap_cmd & OBD_BRW_RWMASK));
- }
+ if (PageLocked(page->cp_vmpage))
+ CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
+ ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
return CLP_GANG_OKAY;
}
@@ -385,8 +426,9 @@ static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
/*
* Complain if there are pages in the truncated region.
*/
- cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF,
- trunc_check_cb, (void *)&size);
+ osc_page_gang_lookup(env, io, cl2osc(clob),
+ start + partial, CL_PAGE_EOF,
+ trunc_check_cb, (void *)&size);
}
static int osc_io_setattr_start(const struct lu_env *env,
@@ -650,6 +692,8 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
},
[CIT_WRITE] = {
+ .cio_iter_init = osc_io_rw_iter_init,
+ .cio_iter_fini = osc_io_rw_iter_fini,
.cio_start = osc_io_write_start,
.cio_end = osc_io_end,
.cio_fini = osc_io_fini
@@ -672,16 +716,8 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = osc_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = osc_io_submit
- }
- },
- .cio_prepare_write = osc_io_prepare_write,
- .cio_commit_write = osc_io_commit_write
+ .cio_submit = osc_io_submit,
+ .cio_commit_async = osc_io_commit_async
};
/*****************************************************************************
@@ -718,8 +754,7 @@ static void osc_req_attr_set(const struct lu_env *env,
struct lov_oinfo *oinfo;
struct cl_req *clerq;
struct cl_page *apage; /* _some_ page in @clerq */
- struct cl_lock *lock; /* _some_ lock protecting @apage */
- struct osc_lock *olck;
+ struct ldlm_lock *lock; /* _some_ lock protecting @apage */
struct osc_page *opg;
struct obdo *oa;
struct ost_lvb *lvb;
@@ -753,31 +788,32 @@ static void osc_req_attr_set(const struct lu_env *env,
LASSERT(!list_empty(&clerq->crq_pages));
apage = container_of(clerq->crq_pages.next,
struct cl_page, cp_flight);
- opg = osc_cl_page_osc(apage);
- apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
- lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
- if (!lock) {
- struct cl_object_header *head;
- struct cl_lock *scan;
-
- head = cl_object_header(apage->cp_obj);
- list_for_each_entry(scan, &head->coh_locks, cll_linkage)
- CL_LOCK_DEBUG(D_ERROR, env, scan,
- "no cover page!\n");
- CL_PAGE_DEBUG(D_ERROR, env, apage,
- "dump uncover page!\n");
+ opg = osc_cl_page_osc(apage, NULL);
+ lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
+ 1, 1);
+ if (!lock && !opg->ops_srvlock) {
+ struct ldlm_resource *res;
+ struct ldlm_res_id *resname;
+
+ CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
+
+ resname = &osc_env_info(env)->oti_resname;
+ ostid_build_res_name(&oinfo->loi_oi, resname);
+ res = ldlm_resource_get(
+ osc_export(cl2osc(obj))->exp_obd->obd_namespace,
+ NULL, resname, LDLM_EXTENT, 0);
+ ldlm_resource_dump(D_ERROR, res);
+
dump_stack();
LBUG();
}
- olck = osc_lock_at(lock);
- LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock));
/* check for lockless io. */
- if (olck->ols_lock) {
- oa->o_handle = olck->ols_lock->l_remote_handle;
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
oa->o_valid |= OBD_MD_FLHANDLE;
+ LDLM_LOCK_PUT(lock);
}
- cl_lock_put(env, lock);
}
}
@@ -807,8 +843,9 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev,
if (or) {
cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 013df9787..16f9cd9d3 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -36,6 +36,7 @@
* Implementation of cl_lock for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
@@ -50,8 +51,6 @@
* @{
*/
-#define _PAGEREF_MAGIC (-10000000)
-
/*****************************************************************************
*
* Type conversions.
@@ -62,7 +61,6 @@ static const struct cl_lock_operations osc_lock_ops;
static const struct cl_lock_operations osc_lock_lockless_ops;
static void osc_lock_to_lockless(const struct lu_env *env,
struct osc_lock *ols, int force);
-static int osc_lock_has_pages(struct osc_lock *olck);
int osc_lock_is_lockless(const struct osc_lock *olck)
{
@@ -90,11 +88,11 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
static int osc_lock_invariant(struct osc_lock *ols)
{
struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
- struct ldlm_lock *olock = ols->ols_lock;
+ struct ldlm_lock *olock = ols->ols_dlmlock;
int handle_used = lustre_handle_is_used(&ols->ols_handle);
if (ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && !ols->ols_lock))
+ ols->ols_locklessable && !ols->ols_dlmlock))
return 1;
/*
@@ -111,7 +109,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
ergo(!lock, !olock)))
return 0;
/*
- * Check that ->ols_handle and ->ols_lock are consistent, but
+ * Check that ->ols_handle and ->ols_dlmlock are consistent, but
* take into account that they are set at the different time.
*/
if (!ergo(ols->ols_state == OLS_CANCELLED,
@@ -122,7 +120,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
* ast.
*/
if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
- ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ !ldlm_is_destroyed(olock)))
return 0;
if (!ergo(ols->ols_state == OLS_GRANTED,
@@ -138,117 +136,13 @@ static int osc_lock_invariant(struct osc_lock *ols)
*
*/
-/**
- * Breaks a link between osc_lock and dlm_lock.
- */
-static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
-{
- struct ldlm_lock *dlmlock;
-
- spin_lock(&osc_ast_guard);
- dlmlock = olck->ols_lock;
- if (!dlmlock) {
- spin_unlock(&osc_ast_guard);
- return;
- }
-
- olck->ols_lock = NULL;
- /* wb(); --- for all who checks (ols->ols_lock != NULL) before
- * call to osc_lock_detach()
- */
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
-
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
- struct cl_object *obj = olck->ols_cl.cls_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms;
-
- cl_object_attr_lock(obj);
- /* Must get the value under the lock to avoid possible races. */
- old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
- /* Update the kms. Need to loop all granted locks.
- * Not a problem for the client
- */
- attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
-
- cl_object_attr_set(env, obj, attr, CAT_KMS);
- cl_object_attr_unlock(obj);
- }
- unlock_res_and_lock(dlmlock);
-
- /* release a reference taken in osc_lock_upcall0(). */
- LASSERT(olck->ols_has_ref);
- lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
- LDLM_LOCK_RELEASE(dlmlock);
- olck->ols_has_ref = 0;
-}
-
-static int osc_lock_unhold(struct osc_lock *ols)
-{
- int result = 0;
-
- if (ols->ols_hold) {
- ols->ols_hold = 0;
- result = osc_cancel_base(&ols->ols_handle,
- ols->ols_einfo.ei_mode);
- }
- return result;
-}
-
-static int osc_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(ols));
-
- switch (ols->ols_state) {
- case OLS_NEW:
- LASSERT(!ols->ols_hold);
- LASSERT(ols->ols_agl);
- return 0;
- case OLS_UPCALL_RECEIVED:
- osc_lock_unhold(ols);
- case OLS_ENQUEUED:
- LASSERT(!ols->ols_hold);
- osc_lock_detach(env, ols);
- ols->ols_state = OLS_NEW;
- return 0;
- case OLS_GRANTED:
- LASSERT(!ols->ols_glimpse);
- LASSERT(ols->ols_hold);
- /*
- * Move lock into OLS_RELEASED state before calling
- * osc_cancel_base() so that possible synchronous cancellation
- * sees that lock is released.
- */
- ols->ols_state = OLS_RELEASED;
- return osc_lock_unhold(ols);
- default:
- CERROR("Impossible state: %d\n", ols->ols_state);
- LBUG();
- }
-}
-
static void osc_lock_fini(const struct lu_env *env,
struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
LINVRNT(osc_lock_invariant(ols));
- /*
- * ->ols_hold can still be true at this point if, for example, a
- * thread that requested a lock was killed (and released a reference
- * to the lock), before reply from a server was received. In this case
- * lock is destroyed immediately after upcall.
- */
- osc_lock_unhold(ols);
- LASSERT(!ols->ols_lock);
- LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
- atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
+ LASSERT(!ols->ols_dlmlock);
kmem_cache_free(osc_lock_kmem, ols);
}
@@ -275,55 +169,12 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
result |= LDLM_FL_HAS_INTENT;
if (enqflags & CEF_DISCARD_DATA)
result |= LDLM_FL_AST_DISCARD_DATA;
+ if (enqflags & CEF_PEEK)
+ result |= LDLM_FL_TEST_LOCK;
return result;
}
/**
- * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
- * pointers. Initialized in osc_init().
- */
-spinlock_t osc_ast_guard;
-
-static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
-{
- struct osc_lock *olck;
-
- lock_res_and_lock(dlm_lock);
- spin_lock(&osc_ast_guard);
- olck = dlm_lock->l_ast_data;
- if (olck) {
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- /*
- * If osc_lock holds a reference on ldlm lock, return it even
- * when cl_lock is in CLS_FREEING state. This way
- *
- * osc_ast_data_get(dlmlock) == NULL
- *
- * guarantees that all osc references on dlmlock were
- * released. osc_dlm_blocking_ast0() relies on that.
- */
- if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
- cl_lock_get_trust(lock);
- lu_ref_add_atomic(&lock->cll_reference,
- "ast", current);
- } else
- olck = NULL;
- }
- spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(dlm_lock);
- return olck;
-}
-
-static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
-{
- struct cl_lock *lock;
-
- lock = olck->ols_cl.cls_lock;
- lu_ref_del(&lock->cll_reference, "ast", current);
- cl_lock_put(env, lock);
-}
-
-/**
* Updates object attributes from a lock value block (lvb) received together
* with the DLM lock reply from the server. Copy of osc_update_enqueue()
* logic.
@@ -333,35 +184,30 @@ static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
*
* Called under lock and resource spin-locks.
*/
-static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
- int rc)
+static void osc_lock_lvb_update(const struct lu_env *env,
+ struct osc_object *osc,
+ struct ldlm_lock *dlmlock,
+ struct ost_lvb *lvb)
{
- struct ost_lvb *lvb;
- struct cl_object *obj;
- struct lov_oinfo *oinfo;
- struct cl_attr *attr;
+ struct cl_object *obj = osc2cl(osc);
+ struct lov_oinfo *oinfo = osc->oo_oinfo;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
unsigned valid;
- if (!(olck->ols_flags & LDLM_FL_LVB_READY))
- return;
-
- lvb = &olck->ols_lvb;
- obj = olck->ols_cl.cls_obj;
- oinfo = cl2osc(obj)->oo_oinfo;
- attr = &osc_env_info(env)->oti_attr;
valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
+ if (!lvb)
+ lvb = dlmlock->l_lvb_data;
+
cl_lvb2attr(attr, lvb);
cl_object_attr_lock(obj);
- if (rc == 0) {
- struct ldlm_lock *dlmlock;
+ if (dlmlock) {
__u64 size;
- dlmlock = olck->ols_lock;
-
- /* re-grab LVB from a dlm lock under DLM spin-locks. */
- *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
+ check_res_locked(dlmlock->l_resource);
+ LASSERT(lvb == dlmlock->l_lvb_data);
size = lvb->lvb_size;
+
/* Extend KMS up to the end of this lock and no further
* A lock on [x,y] means a KMS of up to y + 1 bytes!
*/
@@ -378,102 +224,67 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
dlmlock->l_policy_data.l_extent.end);
}
ldlm_lock_allow_match_locked(dlmlock);
- } else if (rc == -ENAVAIL && olck->ols_glimpse) {
- CDEBUG(D_INODE, "glimpsed, setting rss=%llu; leaving kms=%llu\n",
- lvb->lvb_size, oinfo->loi_kms);
- } else
- valid = 0;
-
- if (valid != 0)
- cl_object_attr_set(env, obj, attr, valid);
+ }
+ cl_object_attr_set(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
-/**
- * Called when a lock is granted, from an upcall (when server returned a
- * granted lock), or from completion AST, when server returned a blocked lock.
- *
- * Called under lock and resource spin-locks, that are released temporarily
- * here.
- */
-static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
- struct ldlm_lock *dlmlock, int rc)
+static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
+ struct lustre_handle *lockh, bool lvb_update)
{
- struct ldlm_extent *ext;
- struct cl_lock *lock;
- struct cl_lock_descr *descr;
+ struct ldlm_lock *dlmlock;
- LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+ dlmlock = ldlm_handle2lock_long(lockh, 0);
+ LASSERT(dlmlock);
- if (olck->ols_state < OLS_GRANTED) {
- lock = olck->ols_cl.cls_lock;
- ext = &dlmlock->l_policy_data.l_extent;
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_obj = lock->cll_descr.cld_obj;
+ /* lock reference taken by ldlm_handle2lock_long() is
+ * owned by osc_lock and released in osc_lock_detach()
+ */
+ lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
+ oscl->ols_has_ref = 1;
- /* XXX check that ->l_granted_mode is valid. */
- descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, ext->start);
- descr->cld_end = cl_index(descr->cld_obj, ext->end);
- descr->cld_gid = ext->gid;
- /*
- * tell upper layers the extent of the lock that was actually
- * granted
- */
- olck->ols_state = OLS_GRANTED;
- osc_lock_lvb_update(env, olck, rc);
-
- /* release DLM spin-locks to allow cl_lock_{modify,signal}()
- * to take a semaphore on a parent lock. This is safe, because
- * spin-locks are needed to protect consistency of
- * dlmlock->l_*_mode and LVB, and we have finished processing
- * them.
+ LASSERT(!oscl->ols_dlmlock);
+ oscl->ols_dlmlock = dlmlock;
+
+ /* This may be a matched lock for glimpse request, do not hold
+ * lock reference in that case.
+ */
+ if (!oscl->ols_glimpse) {
+ /* hold a refc for non glimpse lock which will
+ * be released in osc_lock_cancel()
*/
- unlock_res_and_lock(dlmlock);
- cl_lock_modify(env, lock, descr);
- cl_lock_signal(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- lock_res_and_lock(dlmlock);
+ lustre_handle_copy(&oscl->ols_handle, lockh);
+ ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
+ oscl->ols_hold = 1;
}
-}
-
-static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
-
-{
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
- LASSERT(dlmlock);
+ /* Lock must have been granted. */
lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(dlmlock->l_ast_data == olck);
- LASSERT(!olck->ols_lock);
- olck->ols_lock = dlmlock;
- spin_unlock(&osc_ast_guard);
+ if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+ struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
+ struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
- /*
- * Lock might be not yet granted. In this case, completion ast
- * (osc_ldlm_completion_ast()) comes later and finishes lock
- * granting.
- */
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
- osc_lock_granted(env, olck, dlmlock, 0);
- unlock_res_and_lock(dlmlock);
+ /* extend the lock extent, otherwise it will have problem when
+ * we decide whether to grant a lockless lock.
+ */
+ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
+ descr->cld_start = cl_index(descr->cld_obj, ext->start);
+ descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_gid = ext->gid;
- /*
- * osc_enqueue_interpret() decrefs asynchronous locks, counter
- * this.
- */
- ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
- olck->ols_hold = 1;
+ /* no lvb update for matched lock */
+ if (lvb_update) {
+ LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
+ osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
+ dlmlock, NULL);
+ }
+ LINVRNT(osc_lock_invariant(oscl));
+ }
+ unlock_res_and_lock(dlmlock);
- /* lock reference taken by ldlm_handle2lock_long() is owned by
- * osc_lock and released in osc_lock_detach()
- */
- lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
- olck->ols_has_ref = 1;
+ LASSERT(oscl->ols_state != OLS_GRANTED);
+ oscl->ols_state = OLS_GRANTED;
}
/**
@@ -481,143 +292,124 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
* received from a server, or after osc_enqueue_base() matched a local DLM
* lock.
*/
-static int osc_lock_upcall(void *cookie, int errcode)
+static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
+ int errcode)
{
- struct osc_lock *olck = cookie;
- struct cl_lock_slice *slice = &olck->ols_cl;
- struct cl_lock *lock = slice->cls_lock;
+ struct osc_lock *oscl = cookie;
+ struct cl_lock_slice *slice = &oscl->ols_cl;
struct lu_env *env;
struct cl_env_nest nest;
+ int rc;
env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- int rc;
+ /* should never happen, similar to osc_ldlm_blocking_ast(). */
+ LASSERT(!IS_ERR(env));
+
+ rc = ldlm_error2errno(errcode);
+ if (oscl->ols_state == OLS_ENQUEUED) {
+ oscl->ols_state = OLS_UPCALL_RECEIVED;
+ } else if (oscl->ols_state == OLS_CANCELLED) {
+ rc = -EIO;
+ } else {
+ CERROR("Impossible state: %d\n", oscl->ols_state);
+ LBUG();
+ }
- cl_lock_mutex_get(env, lock);
+ if (rc == 0)
+ osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
- LASSERT(lock->cll_state >= CLS_QUEUING);
- if (olck->ols_state == OLS_ENQUEUED) {
- olck->ols_state = OLS_UPCALL_RECEIVED;
- rc = ldlm_error2errno(errcode);
- } else if (olck->ols_state == OLS_CANCELLED) {
- rc = -EIO;
- } else {
- CERROR("Impossible state: %d\n", olck->ols_state);
- LBUG();
- }
- if (rc) {
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock(&olck->ols_handle);
- if (dlmlock) {
- lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(!olck->ols_lock);
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
- ldlm_lock_fail_match_locked(dlmlock);
- unlock_res_and_lock(dlmlock);
- LDLM_LOCK_PUT(dlmlock);
- }
- } else {
- if (olck->ols_glimpse)
- olck->ols_glimpse = 0;
- osc_lock_upcall0(env, olck);
- }
+ /* Error handling, some errors are tolerable. */
+ if (oscl->ols_locklessable && rc == -EUSERS) {
+ /* This is a tolerable error, turn this lock into
+ * lockless lock.
+ */
+ osc_object_set_contended(cl2osc(slice->cls_obj));
+ LASSERT(slice->cls_ops == &osc_lock_ops);
+
+ /* Change this lock to ldlmlock-less lock. */
+ osc_lock_to_lockless(env, oscl, 1);
+ oscl->ols_state = OLS_GRANTED;
+ rc = 0;
+ } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
+ LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
+ osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
+ NULL, &oscl->ols_lvb);
+ /* Hide the error. */
+ rc = 0;
+ }
- /* Error handling, some errors are tolerable. */
- if (olck->ols_locklessable && rc == -EUSERS) {
- /* This is a tolerable error, turn this lock into
- * lockless lock.
- */
- osc_object_set_contended(cl2osc(slice->cls_obj));
- LASSERT(slice->cls_ops == &osc_lock_ops);
+ if (oscl->ols_owner)
+ cl_sync_io_note(env, oscl->ols_owner, rc);
+ cl_env_nested_put(&nest, env);
- /* Change this lock to ldlmlock-less lock. */
- osc_lock_to_lockless(env, olck, 1);
- olck->ols_state = OLS_GRANTED;
- rc = 0;
- } else if (olck->ols_glimpse && rc == -ENAVAIL) {
- osc_lock_lvb_update(env, olck, rc);
- cl_lock_delete(env, lock);
- /* Hide the error. */
- rc = 0;
- }
-
- if (rc == 0) {
- /* For AGL case, the RPC sponsor may exits the cl_lock
- * processing without wait() called before related OSC
- * lock upcall(). So update the lock status according
- * to the enqueue result inside AGL upcall().
- */
- if (olck->ols_agl) {
- lock->cll_flags |= CLF_FROM_UPCALL;
- cl_wait_try(env, lock);
- lock->cll_flags &= ~CLF_FROM_UPCALL;
- if (!olck->ols_glimpse)
- olck->ols_agl = 0;
- }
- cl_lock_signal(env, lock);
- /* del user for lock upcall cookie */
- cl_unuse_try(env, lock);
- } else {
- /* del user for lock upcall cookie */
- cl_lock_user_del(env, lock);
- cl_lock_error(env, lock, rc);
- }
+ return rc;
+}
- /* release cookie reference, acquired by osc_lock_enqueue() */
- cl_lock_hold_release(env, lock, "upcall", lock);
- cl_lock_mutex_put(env, lock);
+static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
+ int errcode)
+{
+ struct osc_object *osc = cookie;
+ struct ldlm_lock *dlmlock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
- lu_ref_del(&lock->cll_reference, "upcall", lock);
- /* This maybe the last reference, so must be called after
- * cl_lock_mutex_put().
- */
- cl_lock_put(env, lock);
+ env = cl_env_nested_get(&nest);
+ LASSERT(!IS_ERR(env));
- cl_env_nested_put(&nest, env);
- } else {
- /* should never happen, similar to osc_ldlm_blocking_ast(). */
- LBUG();
+ if (errcode == ELDLM_LOCK_MATCHED) {
+ errcode = ELDLM_OK;
+ goto out;
}
- return errcode;
+
+ if (errcode != ELDLM_OK)
+ goto out;
+
+ dlmlock = ldlm_handle2lock(lockh);
+ LASSERT(dlmlock);
+
+ lock_res_and_lock(dlmlock);
+ LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+
+ /* there is no osc_lock associated with AGL lock */
+ osc_lock_lvb_update(env, osc, dlmlock, NULL);
+
+ unlock_res_and_lock(dlmlock);
+ LDLM_LOCK_PUT(dlmlock);
+
+out:
+ cl_object_put(env, osc2cl(osc));
+ cl_env_nested_put(&nest, env);
+ return ldlm_error2errno(errcode);
}
-/**
- * Core of osc_dlm_blocking_ast() logic.
- */
-static void osc_lock_blocking(const struct lu_env *env,
- struct ldlm_lock *dlmlock,
- struct osc_lock *olck, int blocking)
+static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
+ enum cl_lock_mode mode, int discard)
{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
+ int rc = 0;
+ int rc2 = 0;
- LASSERT(olck->ols_lock == dlmlock);
- CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
- LASSERT(!osc_lock_is_lockless(olck));
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ if (mode == CLM_WRITE) {
+ rc = osc_cache_writeback_range(env, obj, start, end, 1,
+ discard);
+ CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
+ obj, start, end, rc,
+ discard ? "discarded" : "written back");
+ if (rc > 0)
+ rc = 0;
+ }
- /*
- * Lock might be still addref-ed here, if e.g., blocking ast
- * is sent for a failed lock.
- */
- osc_lock_unhold(olck);
+ rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
+ if (rc == 0 && rc2 < 0)
+ rc = rc2;
- if (blocking && olck->ols_state < OLS_BLOCKED)
- /*
- * Move osc_lock into OLS_BLOCKED before canceling the lock,
- * because it recursively re-enters osc_lock_blocking(), with
- * the state set to OLS_CANCELLED.
- */
- olck->ols_state = OLS_BLOCKED;
- /*
- * cancel and destroy lock at least once no matter how blocking ast is
- * entered (see comment above osc_ldlm_blocking_ast() for use
- * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
- */
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
+ cl_env_nested_put(&nest, env);
+ return rc;
}
/**
@@ -628,65 +420,63 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
struct ldlm_lock *dlmlock,
void *data, int flag)
{
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int cancel;
-
- LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
-
- cancel = 0;
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- if (olck->ols_ast_wait) {
- /* wake up osc_lock_use() */
- cl_lock_signal(env, lock);
- olck->ols_ast_wait = 0;
- }
- /*
- * Lock might have been canceled while this thread was
- * sleeping for lock mutex, but olck is pinned in memory.
- */
- if (olck == dlmlock->l_ast_data) {
- /*
- * NOTE: DLM sends blocking AST's for failed locks
- * (that are still in pre-OLS_GRANTED state)
- * too, and they have to be canceled otherwise
- * DLM lock is never destroyed and stuck in
- * the memory.
- *
- * Alternatively, ldlm_cli_cancel() can be
- * called here directly for osc_locks with
- * ols_state < OLS_GRANTED to maintain an
- * invariant that ->clo_cancel() is only called
- * for locks that were granted.
- */
- LASSERT(data == olck);
- osc_lock_blocking(env, dlmlock,
- olck, flag == LDLM_CB_BLOCKING);
- } else
- cancel = 1;
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- } else
- /*
- * DLM lock exists, but there is no cl_lock attached to it.
- * This is a `normal' race. cl_object and its cl_lock's can be
- * removed by memory pressure, together with all pages.
+ struct cl_object *obj = NULL;
+ int result = 0;
+ int discard;
+ enum cl_lock_mode mode = CLM_READ;
+
+ LASSERT(flag == LDLM_CB_CANCELING);
+
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
+ dlmlock->l_ast_data = NULL;
+ unlock_res_and_lock(dlmlock);
+ return 0;
+ }
+
+ discard = ldlm_is_discard_data(dlmlock);
+ if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
+ mode = CLM_WRITE;
+
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ dlmlock->l_ast_data = NULL;
+
+ cl_object_get(obj);
+ }
+
+ unlock_res_and_lock(dlmlock);
+
+ /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
+ * the object has been destroyed.
+ */
+ if (obj) {
+ struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ __u64 old_kms;
+
+ /* Destroy pages covered by the extent of the DLM lock */
+ result = osc_lock_flush(cl2osc(obj),
+ cl_index(obj, extent->start),
+ cl_index(obj, extent->end),
+ mode, discard);
+
+ /* losing a lock, update kms */
+ lock_res_and_lock(dlmlock);
+ cl_object_attr_lock(obj);
+ /* Must get the value under the lock to avoid race. */
+ old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
+ /* Update the kms. Need to loop all granted locks.
+ * Not a problem for the client
*/
- cancel = (flag == LDLM_CB_BLOCKING);
+ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
- if (cancel) {
- struct lustre_handle *lockh;
+ cl_object_attr_set(env, obj, attr, CAT_KMS);
+ cl_object_attr_unlock(obj);
+ unlock_res_and_lock(dlmlock);
- lockh = &osc_env_info(env)->oti_handle;
- ldlm_lock2handle(dlmlock, lockh);
- result = ldlm_cli_cancel(lockh, LCF_ASYNC);
- } else
- result = 0;
+ cl_object_put(env, obj);
+ }
return result;
}
@@ -736,107 +526,52 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
struct ldlm_lock_desc *new, void *data,
int flag)
{
- struct lu_env *env;
- struct cl_env_nest nest;
- int result;
+ int result = 0;
- /*
- * This can be called in the context of outer IO, e.g.,
- *
- * cl_enqueue()->...
- * ->osc_enqueue_base()->...
- * ->ldlm_prep_elc_req()->...
- * ->ldlm_cancel_callback()->...
- * ->osc_ldlm_blocking_ast()
- *
- * new environment has to be created to not corrupt outer context.
- */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- cl_env_nested_put(&nest, env);
- } else {
- result = PTR_ERR(env);
- /*
- * XXX This should never happen, as cl_lock is
- * stuck. Pre-allocated environment a la vvp_inode_fini_env
- * should be used.
- */
- LBUG();
- }
- if (result != 0) {
+ switch (flag) {
+ case LDLM_CB_BLOCKING: {
+ struct lustre_handle lockh;
+
+ ldlm_lock2handle(dlmlock, &lockh);
+ result = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (result == -ENODATA)
result = 0;
- else
- CERROR("BAST failed: %d\n", result);
+ break;
}
- return result;
-}
+ case LDLM_CB_CANCELING: {
+ struct lu_env *env;
+ struct cl_env_nest nest;
-static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
- __u64 flags, void *data)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int dlmrc;
-
- /* first, do dlm part of the work */
- dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
- /* then, notify cl_lock */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- /*
- * ldlm_handle_cp_callback() copied LVB from request
- * to lock->l_lvb_data, store it in osc_lock.
- */
- LASSERT(dlmlock->l_lvb_data);
- lock_res_and_lock(dlmlock);
- olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
- if (!olck->ols_lock) {
- /*
- * upcall (osc_lock_upcall()) hasn't yet been
- * called. Do nothing now, upcall will bind
- * olck to dlmlock and signal the waiters.
- *
- * This maintains an invariant that osc_lock
- * and ldlm_lock are always bound when
- * osc_lock is in OLS_GRANTED state.
- */
- } else if (dlmlock->l_granted_mode ==
- dlmlock->l_req_mode) {
- osc_lock_granted(env, olck, dlmlock, dlmrc);
- }
- unlock_res_and_lock(dlmlock);
+ /*
+ * This can be called in the context of outer IO, e.g.,
+ *
+ * osc_enqueue_base()->...
+ * ->ldlm_prep_elc_req()->...
+ * ->ldlm_cancel_callback()->...
+ * ->osc_ldlm_blocking_ast()
+ *
+ * new environment has to be created to not corrupt outer
+ * context.
+ */
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env)) {
+ result = PTR_ERR(env);
+ break;
+ }
- if (dlmrc != 0) {
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "dlmlock returned %d\n", dlmrc);
- cl_lock_error(env, lock, dlmrc);
- }
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- result = 0;
- } else
- result = -ELDLM_NO_LOCK_DATA;
+ result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- return dlmrc ?: result;
+ break;
+ }
+ default:
+ LBUG();
+ }
+ return result;
}
static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
{
struct ptlrpc_request *req = data;
- struct osc_lock *olck;
- struct cl_lock *lock;
- struct cl_object *obj;
struct cl_env_nest nest;
struct lu_env *env;
struct ost_lvb *lvb;
@@ -847,14 +582,16 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
- /* osc_ast_data_get() has to go after environment is
- * allocated, because osc_ast_data() acquires a
- * reference to a lock, and it can only be released in
- * environment.
- */
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
+ struct cl_object *obj = NULL;
+
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ cl_object_get(obj);
+ }
+ unlock_res_and_lock(dlmlock);
+
+ if (obj) {
/* Do not grab the mutex of cl_lock for glimpse.
* See LU-1274 for details.
* BTW, it's okay for cl_lock to be cancelled during
@@ -869,7 +606,6 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
result = req_capsule_server_pack(cap);
if (result == 0) {
lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
- obj = lock->cll_descr.cld_obj;
result = cl_object_glimpse(env, obj, lvb);
}
if (!exp_connect_lvb_type(req->rq_export))
@@ -877,7 +613,7 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
&RMF_DLM_LVB,
sizeof(struct ost_lvb_v1),
RCL_SERVER);
- osc_ast_data_put(env, olck);
+ cl_object_put(env, obj);
} else {
/*
* These errors are normal races, so we don't want to
@@ -888,44 +624,123 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
result = -ELDLM_NO_LOCK_DATA;
}
cl_env_nested_put(&nest, env);
- } else
+ } else {
result = PTR_ERR(env);
+ }
req->rq_status = result;
return result;
}
-static unsigned long osc_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static int weigh_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
{
- /*
- * don't need to grab coh_page_guard since we don't care the exact #
- * of pages..
- */
- return cl_object_header(slice->cls_obj)->coh_pages;
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ if (cl_page_is_vmlocked(env, page) ||
+ PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
+ ) {
+ (*(unsigned long *)cbdata)++;
+ return CLP_GANG_ABORT;
+ }
+
+ return CLP_GANG_OKAY;
}
-static void osc_lock_build_einfo(const struct lu_env *env,
- const struct cl_lock *clock,
- struct osc_lock *lock,
- struct ldlm_enqueue_info *einfo)
+static unsigned long osc_lock_weight(const struct lu_env *env,
+ struct osc_object *oscobj,
+ struct ldlm_extent *extent)
+{
+ struct cl_io *io = &osc_env_info(env)->oti_io;
+ struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
+ unsigned long npages = 0;
+ int result;
+
+ io->ci_obj = obj;
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result != 0)
+ return result;
+
+ do {
+ result = osc_page_gang_lookup(env, io, oscobj,
+ cl_index(obj, extent->start),
+ cl_index(obj, extent->end),
+ weigh_cb, (void *)&npages);
+ if (result == CLP_GANG_ABORT)
+ break;
+ if (result == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (result != CLP_GANG_OKAY);
+ cl_io_fini(env, io);
+
+ return npages;
+}
+
+/**
+ * Get the weight of dlm lock for early cancellation.
+ */
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
- enum cl_lock_mode mode;
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct osc_object *obj;
+ struct osc_lock *oscl;
+ unsigned long weight;
+ bool found = false;
+
+ might_sleep();
+ /*
+ * osc_ldlm_weigh_ast has a complex context since it might be called
+ * because of lock canceling, or from user's input. We have to make
+ * a new environment for it. Probably it is implementation safe to use
+ * the upper context because cl_lock_put don't modify environment
+ * variables. But just in case ..
+ */
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ /* Mostly because lack of memory, do not eliminate this lock */
+ return 1;
- mode = clock->cll_descr.cld_mode;
- if (mode == CLM_PHANTOM)
+ LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
+ obj = dlmlock->l_ast_data;
+ if (obj) {
+ weight = 1;
+ goto out;
+ }
+
+ spin_lock(&obj->oo_ol_spin);
+ list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
+ if (oscl->ols_dlmlock && oscl->ols_dlmlock != dlmlock)
+ continue;
+ found = true;
+ }
+ spin_unlock(&obj->oo_ol_spin);
+ if (found) {
/*
- * For now, enqueue all glimpse locks in read mode. In the
- * future, client might choose to enqueue LCK_PW lock for
- * glimpse on a file opened for write.
+ * If the lock is being used by an IO, definitely not cancel it.
*/
- mode = CLM_READ;
+ weight = 1;
+ goto out;
+ }
+
+ weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
+
+out:
+ cl_env_nested_put(&nest, env);
+ return weight;
+}
+static void osc_lock_build_einfo(const struct lu_env *env,
+ const struct cl_lock *lock,
+ struct osc_object *osc,
+ struct ldlm_enqueue_info *einfo)
+{
einfo->ei_type = LDLM_EXTENT;
- einfo->ei_mode = osc_cl_lock2ldlm(mode);
+ einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
einfo->ei_cb_bl = osc_ldlm_blocking_ast;
- einfo->ei_cb_cp = osc_ldlm_completion_ast;
+ einfo->ei_cb_cp = ldlm_completion_ast;
einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
- einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
+ einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
}
/**
@@ -981,113 +796,100 @@ static void osc_lock_to_lockless(const struct lu_env *env,
LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
}
-static int osc_lock_compatible(const struct osc_lock *qing,
- const struct osc_lock *qed)
+static bool osc_lock_compatible(const struct osc_lock *qing,
+ const struct osc_lock *qed)
{
- enum cl_lock_mode qing_mode;
- enum cl_lock_mode qed_mode;
+ struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
+ struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
- qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
- if (qed->ols_glimpse &&
- (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
- return 1;
+ if (qed->ols_glimpse)
+ return true;
+
+ if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
+ return true;
+
+ if (qed->ols_state < OLS_GRANTED)
+ return true;
+
+ if (qed_descr->cld_mode >= qing_descr->cld_mode &&
+ qed_descr->cld_start <= qing_descr->cld_start &&
+ qed_descr->cld_end >= qing_descr->cld_end)
+ return true;
- qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
- return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
+ return false;
}
-/**
- * Cancel all conflicting locks and wait for them to be destroyed.
- *
- * This function is used for two purposes:
- *
- * - early cancel all conflicting locks before starting IO, and
- *
- * - guarantee that pages added to the page cache by lockless IO are never
- * covered by locks other than lockless IO lock, and, hence, are not
- * visible to other threads.
- */
-static int osc_lock_enqueue_wait(const struct lu_env *env,
- const struct osc_lock *olck)
+static void osc_lock_wake_waiters(const struct lu_env *env,
+ struct osc_object *osc,
+ struct osc_lock *oscl)
{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- struct cl_lock_descr *descr = &lock->cll_descr;
- struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
- struct cl_lock *scan;
- struct cl_lock *conflict = NULL;
- int lockless = osc_lock_is_lockless(olck);
- int rc = 0;
+ spin_lock(&osc->oo_ol_spin);
+ list_del_init(&oscl->ols_nextlock_oscobj);
+ spin_unlock(&osc->oo_ol_spin);
- LASSERT(cl_lock_is_mutexed(lock));
+ spin_lock(&oscl->ols_lock);
+ while (!list_empty(&oscl->ols_waiting_list)) {
+ struct osc_lock *scan;
- /* make it enqueue anyway for glimpse lock, because we actually
- * don't need to cancel any conflicting locks.
- */
- if (olck->ols_glimpse)
- return 0;
+ scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
+ ols_wait_entry);
+ list_del_init(&scan->ols_wait_entry);
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- struct cl_lock_descr *cld = &scan->cll_descr;
- const struct osc_lock *scan_ols;
+ cl_sync_io_note(env, scan->ols_owner, 0);
+ }
+ spin_unlock(&oscl->ols_lock);
+}
+
+static void osc_lock_enqueue_wait(const struct lu_env *env,
+ struct osc_object *obj,
+ struct osc_lock *oscl)
+{
+ struct osc_lock *tmp_oscl;
+ struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
+ struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
- if (scan == lock)
+ spin_lock(&obj->oo_ol_spin);
+ list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
+
+restart:
+ list_for_each_entry(tmp_oscl, &obj->oo_ol_list,
+ ols_nextlock_oscobj) {
+ struct cl_lock_descr *descr;
+
+ if (tmp_oscl == oscl)
break;
- if (scan->cll_state < CLS_QUEUING ||
- scan->cll_state == CLS_FREEING ||
- cld->cld_start > descr->cld_end ||
- cld->cld_end < descr->cld_start)
+ descr = &tmp_oscl->ols_cl.cls_lock->cll_descr;
+ if (descr->cld_start > need->cld_end ||
+ descr->cld_end < need->cld_start)
continue;
- /* overlapped and living locks. */
+ /* We're not supposed to give up group lock */
+ if (descr->cld_mode == CLM_GROUP)
+ break;
- /* We're not supposed to give up group lock. */
- if (scan->cll_descr.cld_mode == CLM_GROUP) {
- LASSERT(descr->cld_mode != CLM_GROUP ||
- descr->cld_gid != scan->cll_descr.cld_gid);
+ if (!osc_lock_is_lockless(oscl) &&
+ osc_lock_compatible(oscl, tmp_oscl))
continue;
- }
- scan_ols = osc_lock_at(scan);
+ /* wait for conflicting lock to be canceled */
+ cl_sync_io_init(waiter, 1, cl_sync_io_end);
+ oscl->ols_owner = waiter;
- /* We need to cancel the compatible locks if we're enqueuing
- * a lockless lock, for example:
- * imagine that client has PR lock on [0, 1000], and thread T0
- * is doing lockless IO in [500, 1500] region. Concurrent
- * thread T1 can see lockless data in [500, 1000], which is
- * wrong, because these data are possibly stale.
- */
- if (!lockless && osc_lock_compatible(olck, scan_ols))
- continue;
+ spin_lock(&tmp_oscl->ols_lock);
+ /* add oscl into tmp's ols_waiting list */
+ list_add_tail(&oscl->ols_wait_entry,
+ &tmp_oscl->ols_waiting_list);
+ spin_unlock(&tmp_oscl->ols_lock);
- cl_lock_get_trust(scan);
- conflict = scan;
- break;
- }
- spin_unlock(&hdr->coh_lock_guard);
+ spin_unlock(&obj->oo_ol_spin);
+ (void)cl_sync_io_wait(env, waiter, 0);
- if (conflict) {
- if (lock->cll_descr.cld_mode == CLM_GROUP) {
- /* we want a group lock but a previous lock request
- * conflicts, we do not wait but return 0 so the
- * request is send to the server
- */
- CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n",
- lock, conflict);
- cl_lock_put(env, conflict);
- rc = 0;
- } else {
- CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
- lock, conflict);
- LASSERT(!lock->cll_conflict);
- lu_ref_add(&conflict->cll_reference, "cancel-wait",
- lock);
- lock->cll_conflict = conflict;
- rc = CLO_WAIT;
- }
+ spin_lock(&obj->oo_ol_spin);
+ oscl->ols_owner = NULL;
+ goto restart;
}
- return rc;
+ spin_unlock(&obj->oo_ol_spin);
}
/**
@@ -1106,188 +908,122 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
*/
static int osc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
+ struct cl_io *unused, struct cl_sync_io *anchor)
{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = ols->ols_cl.cls_lock;
+ struct osc_thread_info *info = osc_env_info(env);
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(slice->cls_obj);
+ struct osc_lock *oscl = cl2osc_lock(slice);
+ struct cl_lock *lock = slice->cls_lock;
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ osc_enqueue_upcall_f upcall = osc_lock_upcall;
+ void *cookie = oscl;
+ bool async = false;
int result;
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERTF(ols->ols_state == OLS_NEW,
- "Impossible state: %d\n", ols->ols_state);
-
- LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, ols);
+ LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
+ "lock = %p, ols = %p\n", lock, oscl);
- result = osc_lock_enqueue_wait(env, ols);
- if (result == 0) {
- if (!osc_lock_is_lockless(ols)) {
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
+ if (oscl->ols_state == OLS_GRANTED)
+ return 0;
- /* lock will be passed as upcall cookie,
- * hold ref to prevent to be released.
- */
- cl_lock_hold_add(env, lock, "upcall", lock);
- /* a user for lock also */
- cl_lock_user_add(env, lock);
- ols->ols_state = OLS_ENQUEUED;
+ if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
+ goto enqueue_base;
- /*
- * XXX: this is possible blocking point as
- * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
- * LDLM_CP_CALLBACK.
- */
- ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
- osc_lock_build_policy(env, lock, policy);
- result = osc_enqueue_base(osc_export(obj), resname,
- &ols->ols_flags, policy,
- &ols->ols_lvb,
- obj->oo_oinfo->loi_kms_valid,
- osc_lock_upcall,
- ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1, ols->ols_agl);
- if (result != 0) {
- cl_lock_user_del(env, lock);
- cl_lock_unhold(env, lock, "upcall", lock);
- if (unlikely(result == -ECANCELED)) {
- ols->ols_state = OLS_NEW;
- result = 0;
- }
- }
- } else {
- ols->ols_state = OLS_GRANTED;
- ols->ols_owner = osc_env_io(env);
- }
+ if (oscl->ols_glimpse) {
+ LASSERT(equi(oscl->ols_agl, !anchor));
+ async = true;
+ goto enqueue_base;
}
- LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
- return result;
-}
-static int osc_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
-
- LINVRNT(osc_lock_invariant(olck));
-
- if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
- if (olck->ols_flags & LDLM_FL_LVB_READY) {
- return 0;
- } else if (olck->ols_agl) {
- if (lock->cll_flags & CLF_FROM_UPCALL)
- /* It is from enqueue RPC reply upcall for
- * updating state. Do not re-enqueue.
- */
- return -ENAVAIL;
- olck->ols_state = OLS_NEW;
- } else {
- LASSERT(lock->cll_error);
- return lock->cll_error;
- }
+ osc_lock_enqueue_wait(env, osc, oscl);
+
+ /* we can grant lockless lock right after all conflicting locks
+ * are canceled.
+ */
+ if (osc_lock_is_lockless(oscl)) {
+ oscl->ols_state = OLS_GRANTED;
+ oio->oi_lockless = 1;
+ return 0;
}
- if (olck->ols_state == OLS_NEW) {
- int rc;
-
- LASSERT(olck->ols_agl);
- olck->ols_agl = 0;
- olck->ols_flags &= ~LDLM_FL_BLOCK_NOWAIT;
- rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
- if (rc != 0)
- return rc;
- else
- return CLO_REENQUEUED;
+enqueue_base:
+ oscl->ols_state = OLS_ENQUEUED;
+ if (anchor) {
+ atomic_inc(&anchor->csi_sync_nr);
+ oscl->ols_owner = anchor;
}
- LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
- lock->cll_error == 0, olck->ols_lock));
+ /**
+ * DLM lock's ast data must be osc_object;
+ * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
+ * DLM's enqueue callback set to osc_lock_upcall() with cookie as
+ * osc_lock.
+ */
+ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
+ osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo);
+ osc_lock_build_policy(env, lock, policy);
+ if (oscl->ols_agl) {
+ oscl->ols_einfo.ei_cbdata = NULL;
+ /* hold a reference for callback */
+ cl_object_get(osc2cl(osc));
+ upcall = osc_lock_upcall_agl;
+ cookie = osc;
+ }
+ result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
+ policy, &oscl->ols_lvb,
+ osc->oo_oinfo->loi_kms_valid,
+ upcall, cookie,
+ &oscl->ols_einfo, PTLRPCD_SET, async,
+ oscl->ols_agl);
+ if (result != 0) {
+ oscl->ols_state = OLS_CANCELLED;
+ osc_lock_wake_waiters(env, osc, oscl);
- return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
+ /* hide error for AGL lock. */
+ if (oscl->ols_agl) {
+ cl_object_put(env, osc2cl(osc));
+ result = 0;
+ }
+ if (anchor)
+ cl_sync_io_note(env, anchor, result);
+ } else {
+ if (osc_lock_is_lockless(oscl)) {
+ oio->oi_lockless = 1;
+ } else if (!async) {
+ LASSERT(oscl->ols_state == OLS_GRANTED);
+ LASSERT(oscl->ols_hold);
+ LASSERT(oscl->ols_dlmlock);
+ }
+ }
+ return result;
}
/**
- * An implementation of cl_lock_operations::clo_use() method that pins cached
- * lock.
+ * Breaks a link between osc_lock and dlm_lock.
*/
-static int osc_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
- struct osc_lock *olck = cl2osc_lock(slice);
- int rc;
-
- LASSERT(!olck->ols_hold);
+ struct ldlm_lock *dlmlock;
- /*
- * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
- * flag is not set. This protects us from a concurrent blocking ast.
- */
- rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
- if (rc == 0) {
- olck->ols_hold = 1;
- olck->ols_state = OLS_GRANTED;
- } else {
- struct cl_lock *lock;
+ dlmlock = olck->ols_dlmlock;
+ if (!dlmlock)
+ return;
- /*
- * Lock is being cancelled somewhere within
- * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
- * set, but osc_ldlm_blocking_ast() hasn't yet acquired
- * cl_lock mutex.
- */
- lock = slice->cls_lock;
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(lock->cll_users > 0);
- /* set a flag for osc_dlm_blocking_ast0() to signal the
- * lock.
- */
- olck->ols_ast_wait = 1;
- rc = CLO_WAIT;
+ if (olck->ols_hold) {
+ olck->ols_hold = 0;
+ osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ olck->ols_handle.cookie = 0ULL;
}
- return rc;
-}
-static int osc_lock_flush(struct osc_lock *ols, int discard)
-{
- struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct cl_env_nest nest;
- struct lu_env *env;
- int result = 0;
-
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
- struct cl_lock_descr *descr = &lock->cll_descr;
- int rc = 0;
-
- if (descr->cld_mode >= CLM_WRITE) {
- result = osc_cache_writeback_range(env, obj,
- descr->cld_start,
- descr->cld_end,
- 1, discard);
- LDLM_DEBUG(ols->ols_lock,
- "lock %p: %d pages were %s.\n", lock, result,
- discard ? "discarded" : "written");
- if (result > 0)
- result = 0;
- }
+ olck->ols_dlmlock = NULL;
- rc = cl_lock_discard_pages(env, lock);
- if (result == 0 && rc < 0)
- result = rc;
-
- cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- if (result == 0) {
- ols->ols_flush = 1;
- LINVRNT(!osc_lock_has_pages(ols));
- }
- return result;
+ /* release a reference taken in osc_lock_upcall(). */
+ LASSERT(olck->ols_has_ref);
+ lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
+ LDLM_LOCK_RELEASE(dlmlock);
+ olck->ols_has_ref = 0;
}
/**
@@ -1307,96 +1043,16 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
static void osc_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct cl_lock *lock = slice->cls_lock;
- struct osc_lock *olck = cl2osc_lock(slice);
- struct ldlm_lock *dlmlock = olck->ols_lock;
- int result = 0;
- int discard;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LINVRNT(osc_lock_invariant(olck));
-
- if (dlmlock) {
- int do_cancel;
-
- discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
- if (olck->ols_state >= OLS_GRANTED)
- result = osc_lock_flush(olck, discard);
- osc_lock_unhold(olck);
-
- lock_res_and_lock(dlmlock);
- /* Now that we're the only user of dlm read/write reference,
- * mostly the ->l_readers + ->l_writers should be zero.
- * However, there is a corner case.
- * See bug 18829 for details.
- */
- do_cancel = (dlmlock->l_readers == 0 &&
- dlmlock->l_writers == 0);
- dlmlock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(dlmlock);
- if (do_cancel)
- result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
- if (result < 0)
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "lock %p cancel failure with error(%d)\n",
- lock, result);
- }
- olck->ols_state = OLS_CANCELLED;
- olck->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_detach(env, olck);
-}
-
-static int osc_lock_has_pages(struct osc_lock *olck)
-{
- return 0;
-}
-
-static void osc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck;
+ struct osc_object *obj = cl2osc(slice->cls_obj);
+ struct osc_lock *oscl = cl2osc_lock(slice);
- olck = cl2osc_lock(slice);
- if (olck->ols_glimpse) {
- LASSERT(!olck->ols_hold);
- LASSERT(!olck->ols_lock);
- return;
- }
+ LINVRNT(osc_lock_invariant(oscl));
- LINVRNT(osc_lock_invariant(olck));
- LINVRNT(!osc_lock_has_pages(olck));
+ osc_lock_detach(env, oscl);
+ oscl->ols_state = OLS_CANCELLED;
+ oscl->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_unhold(olck);
- osc_lock_detach(env, olck);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for osc layer.
- *
- * Maintains osc_lock::ols_owner field.
- *
- * This assumes that lock always enters CLS_HELD (from some other state) in
- * the same IO context as one that requested the lock. This should not be a
- * problem, because context is by definition shared by all activity pertaining
- * to the same high-level IO.
- */
-static void osc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- /*
- * XXX multiple io contexts can use the lock at the same time.
- */
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
-
- LASSERT(!lock->ols_owner);
- lock->ols_owner = oio;
- } else if (state != CLS_HELD)
- lock->ols_owner = NULL;
+ osc_lock_wake_waiters(env, obj, oscl);
}
static int osc_lock_print(const struct lu_env *env, void *cookie,
@@ -1404,221 +1060,161 @@ static int osc_lock_print(const struct lu_env *env, void *cookie,
{
struct osc_lock *lock = cl2osc_lock(slice);
- /*
- * XXX print ldlm lock and einfo properly.
- */
(*p)(env, cookie, "%p %#16llx %#llx %d %p ",
- lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
+ lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie,
lock->ols_state, lock->ols_owner);
osc_lvb_print(env, cookie, p, &lock->ols_lvb);
return 0;
}
-static int osc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- if (need->cld_enq_flags & CEF_NEVER)
- return 0;
-
- if (ols->ols_state >= OLS_CANCELLED)
- return 0;
-
- if (need->cld_mode == CLM_PHANTOM) {
- if (ols->ols_agl)
- return !(ols->ols_state > OLS_RELEASED);
-
- /*
- * Note: the QUEUED lock can't be matched here, otherwise
- * it might cause the deadlocks.
- * In read_process,
- * P1: enqueued read lock, create sublock1
- * P2: enqueued write lock, create sublock2(conflicted
- * with sublock1).
- * P1: Grant read lock.
- * P1: enqueued glimpse lock(with holding sublock1_read),
- * matched with sublock2, waiting sublock2 to be granted.
- * But sublock2 can not be granted, because P1
- * will not release sublock1. Bang!
- */
- if (ols->ols_state < OLS_GRANTED ||
- ols->ols_state > OLS_RELEASED)
- return 0;
- } else if (need->cld_enq_flags & CEF_MUST) {
- /*
- * If the lock hasn't ever enqueued, it can't be matched
- * because enqueue process brings in many information
- * which can be used to determine things such as lockless,
- * CEF_MUST, etc.
- */
- if (ols->ols_state < OLS_UPCALL_RECEIVED &&
- ols->ols_locklessable)
- return 0;
- }
- return 1;
-}
-
static const struct cl_lock_operations osc_lock_ops = {
.clo_fini = osc_lock_fini,
.clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_wait,
- .clo_unuse = osc_lock_unuse,
- .clo_use = osc_lock_use,
- .clo_delete = osc_lock_delete,
- .clo_state = osc_lock_state,
.clo_cancel = osc_lock_cancel,
- .clo_weigh = osc_lock_weigh,
.clo_print = osc_lock_print,
- .clo_fits_into = osc_lock_fits_into,
};
-static int osc_lock_lockless_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = slice->cls_lock;
-
- LASSERT(ols->ols_state == OLS_GRANTED);
- LINVRNT(osc_lock_invariant(ols));
-
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- return 0;
-}
-
static void osc_lock_lockless_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
+ struct osc_object *osc = cl2osc(slice->cls_obj);
+ struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
int result;
- result = osc_lock_flush(ols, 0);
+ LASSERT(!ols->ols_dlmlock);
+ result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
+ descr->cld_mode, 0);
if (result)
CERROR("Pages for lockless lock %p were not purged(%d)\n",
ols, result);
- ols->ols_state = OLS_CANCELLED;
-}
-
-static int osc_lock_lockless_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- LINVRNT(osc_lock_invariant(olck));
- LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
-
- return lock->cll_error;
+ osc_lock_wake_waiters(env, osc, ols);
}
-static void osc_lock_lockless_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
+static const struct cl_lock_operations osc_lock_lockless_ops = {
+ .clo_fini = osc_lock_fini,
+ .clo_enqueue = osc_lock_enqueue,
+ .clo_cancel = osc_lock_lockless_cancel,
+ .clo_print = osc_lock_print
+};
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
+static void osc_lock_set_writer(const struct lu_env *env,
+ const struct cl_io *io,
+ struct cl_object *obj, struct osc_lock *oscl)
+{
+ struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
+ pgoff_t io_start;
+ pgoff_t io_end;
- LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
- lock->ols_owner = oio;
+ if (!cl_object_same(io->ci_obj, obj))
+ return;
- /* set the io to be lockless if this lock is for io's
- * host object
- */
- if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
- oio->oi_lockless = 1;
+ if (likely(io->ci_type == CIT_WRITE)) {
+ io_start = cl_index(obj, io->u.ci_rw.crw_pos);
+ io_end = cl_index(obj, io->u.ci_rw.crw_pos +
+ io->u.ci_rw.crw_count - 1);
+ if (cl_io_is_append(io)) {
+ io_start = 0;
+ io_end = CL_PAGE_EOF;
+ }
+ } else {
+ LASSERT(cl_io_is_mkwrite(io));
+ io_start = io_end = io->u.ci_fault.ft_index;
}
-}
-static int osc_lock_lockless_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- if (!(need->cld_enq_flags & CEF_NEVER))
- return 0;
+ if (descr->cld_mode >= CLM_WRITE &&
+ descr->cld_start <= io_start && descr->cld_end >= io_end) {
+ struct osc_io *oio = osc_env_io(env);
- /* lockless lock should only be used by its owning io. b22147 */
- return (lock->ols_owner == osc_env_io(env));
+ /* There must be only one lock to match the write region */
+ LASSERT(!oio->oi_write_osclock);
+ oio->oi_write_osclock = oscl;
+ }
}
-static const struct cl_lock_operations osc_lock_lockless_ops = {
- .clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_lockless_wait,
- .clo_unuse = osc_lock_lockless_unuse,
- .clo_state = osc_lock_lockless_state,
- .clo_fits_into = osc_lock_lockless_fits_into,
- .clo_cancel = osc_lock_lockless_cancel,
- .clo_print = osc_lock_print
-};
-
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
+ const struct cl_io *io)
{
- struct osc_lock *clk;
- int result;
-
- clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
- if (clk) {
- __u32 enqflags = lock->cll_descr.cld_enq_flags;
+ struct osc_lock *oscl;
+ __u32 enqflags = lock->cll_descr.cld_enq_flags;
+
+ oscl = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
+ if (!oscl)
+ return -ENOMEM;
+
+ oscl->ols_state = OLS_NEW;
+ spin_lock_init(&oscl->ols_lock);
+ INIT_LIST_HEAD(&oscl->ols_waiting_list);
+ INIT_LIST_HEAD(&oscl->ols_wait_entry);
+ INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
+
+ oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
+ oscl->ols_agl = !!(enqflags & CEF_AGL);
+ if (oscl->ols_agl)
+ oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+ if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
+ oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
+ oscl->ols_glimpse = 1;
+ }
- osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
- atomic_set(&clk->ols_pageref, 0);
- clk->ols_state = OLS_NEW;
+ cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
- clk->ols_flags = osc_enq2ldlm_flags(enqflags);
- clk->ols_agl = !!(enqflags & CEF_AGL);
- if (clk->ols_agl)
- clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
- if (clk->ols_flags & LDLM_FL_HAS_INTENT)
- clk->ols_glimpse = 1;
+ if (!(enqflags & CEF_MUST))
+ /* try to convert this lock to a lockless lock */
+ osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
+ if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
+ oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
- cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
+ if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
+ osc_lock_set_writer(env, io, obj, oscl);
- if (!(enqflags & CEF_MUST))
- /* try to convert this lock to a lockless lock */
- osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
- if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
- clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
- lock, clk, clk->ols_flags);
+ LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
+ lock, oscl, oscl->ols_flags);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ return 0;
}
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
+/**
+ * Finds an existing lock covering given index and optionally different from a
+ * given \a except lock.
+ */
+struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj, pgoff_t index,
+ int pending, int canceling)
{
- struct osc_lock *olock;
- int rc = 0;
-
- spin_lock(&osc_ast_guard);
- olock = dlm->l_ast_data;
+ struct osc_thread_info *info = osc_env_info(env);
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ struct lustre_handle lockh;
+ struct ldlm_lock *lock = NULL;
+ enum ldlm_mode mode;
+ __u64 flags;
+
+ ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
+ osc_index2policy(policy, osc2cl(obj), index, index);
+ policy->l_extent.gid = LDLM_GID_ANY;
+
+ flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
+ if (pending)
+ flags |= LDLM_FL_CBPENDING;
/*
- * there's a very rare race with osc_page_addref_lock(), but that
- * doesn't matter because in the worst case we don't cancel a lock
- * which we actually can, that's no harm.
+ * It is fine to match any group lock since there could be only one
+ * with a uniq gid and it conflicts with all other lock modes too
*/
- if (olock &&
- atomic_add_return(_PAGEREF_MAGIC,
- &olock->ols_pageref) != _PAGEREF_MAGIC) {
- atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
- rc = 1;
+again:
+ mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
+ flags, resname, LDLM_EXTENT, policy,
+ LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling);
+ if (mode != 0) {
+ lock = ldlm_handle2lock(&lockh);
+ /* RACE: the lock is cancelled so let's try again */
+ if (unlikely(!lock))
+ goto again;
}
- spin_unlock(&osc_ast_guard);
- return rc;
+ return lock;
}
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 9d474fcdd..738ab10ab 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -36,6 +36,7 @@
* Implementation of cl_object for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
@@ -94,6 +95,9 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
atomic_set(&osc->oo_nr_reads, 0);
atomic_set(&osc->oo_nr_writes, 0);
spin_lock_init(&osc->oo_lock);
+ spin_lock_init(&osc->oo_tree_lock);
+ spin_lock_init(&osc->oo_ol_spin);
+ INIT_LIST_HEAD(&osc->oo_ol_list);
cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
@@ -120,6 +124,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
LASSERT(list_empty(&osc->oo_reading_exts));
LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
+ LASSERT(list_empty(&osc->oo_ol_list));
lu_object_fini(obj);
kmem_cache_free(osc_object_kmem, osc);
@@ -192,6 +197,32 @@ static int osc_object_glimpse(const struct lu_env *env,
return 0;
}
+static int osc_object_ast_clear(struct ldlm_lock *lock, void *data)
+{
+ LASSERT(lock->l_granted_mode == lock->l_req_mode);
+ if (lock->l_ast_data == data)
+ lock->l_ast_data = NULL;
+ return LDLM_ITER_CONTINUE;
+}
+
+static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct osc_object *osc = cl2osc(obj);
+ struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
+
+ LASSERTF(osc->oo_npages == 0,
+ DFID "still have %lu pages, obj: %p, osc: %p\n",
+ PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc);
+
+ /* DLM locks don't hold a reference of osc_object so we have to
+ * clear it before the object is being destroyed.
+ */
+ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
+ ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
+ osc_object_ast_clear, osc);
+ return 0;
+}
+
void osc_object_set_contended(struct osc_object *obj)
{
obj->oo_contention_time = cfs_time_current();
@@ -236,12 +267,12 @@ static const struct cl_object_operations osc_ops = {
.coo_io_init = osc_io_init,
.coo_attr_get = osc_attr_get,
.coo_attr_set = osc_attr_set,
- .coo_glimpse = osc_object_glimpse
+ .coo_glimpse = osc_object_glimpse,
+ .coo_prune = osc_object_prune
};
static const struct lu_object_operations osc_lu_obj_ops = {
.loo_object_init = osc_object_init,
- .loo_object_delete = NULL,
.loo_object_release = NULL,
.loo_object_free = osc_object_free,
.loo_object_print = osc_object_print,
@@ -261,8 +292,9 @@ struct lu_object *osc_object_alloc(const struct lu_env *env,
lu_object_init(obj, NULL, dev);
osc->oo_cl.co_ops = &osc_ops;
obj->lo_ops = &osc_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index ce9ddd515..c29c2eabe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -36,14 +36,15 @@
* Implementation of cl_page for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
#include "osc_cl_internal.h"
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg);
@@ -63,18 +64,9 @@ static int osc_page_protected(const struct lu_env *env,
* Page operations.
*
*/
-static void osc_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
-
- CDEBUG(D_TRACE, "%p\n", opg);
- LASSERT(!opg->ops_lock);
-}
-
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
LASSERT(!opg->ops_transfer_pinned);
cl_page_get(page);
@@ -85,11 +77,11 @@ static void osc_page_transfer_get(struct osc_page *opg, const char *label)
static void osc_page_transfer_put(const struct lu_env *env,
struct osc_page *opg)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
if (opg->ops_transfer_pinned) {
- lu_ref_del(&page->cp_reference, "transfer", page);
opg->ops_transfer_pinned = 0;
+ lu_ref_del(&page->cp_reference, "transfer", page);
cl_page_put(env, page);
}
}
@@ -104,10 +96,7 @@ static void osc_page_transfer_add(const struct lu_env *env,
{
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- /* ops_lru and ops_inflight share the same field, so take it from LRU
- * first and then use it as inflight.
- */
- osc_lru_del(osc_cli(obj), opg, false);
+ osc_lru_use(osc_cli(obj), opg);
spin_lock(&obj->oo_seatbelt);
list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
@@ -115,11 +104,9 @@ static void osc_page_transfer_add(const struct lu_env *env,
spin_unlock(&obj->oo_seatbelt);
}
-static int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
+int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io)
{
- struct osc_io *oio = osc_env_io(env);
struct osc_page *opg = cl2osc_page(slice);
int result;
@@ -132,17 +119,6 @@ static int osc_page_cache_add(const struct lu_env *env,
else
osc_page_transfer_add(env, opg, CRT_WRITE);
- /* for sync write, kernel will wait for this page to be flushed before
- * osc_io_end() is called, so release it earlier.
- * for mkwrite(), it's known there is no further pages.
- */
- if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
- if (oio->oi_active) {
- osc_extent_release(env, oio->oi_active);
- oio->oi_active = NULL;
- }
- }
-
return result;
}
@@ -154,102 +130,25 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
-static int osc_page_addref_lock(const struct lu_env *env,
- struct osc_page *opg,
- struct cl_lock *lock)
-{
- struct osc_lock *olock;
- int rc;
-
- LASSERT(!opg->ops_lock);
-
- olock = osc_lock_at(lock);
- if (atomic_inc_return(&olock->ols_pageref) <= 0) {
- atomic_dec(&olock->ols_pageref);
- rc = -ENODATA;
- } else {
- cl_lock_get(lock);
- opg->ops_lock = lock;
- rc = 0;
- }
- return rc;
-}
-
-static void osc_page_putref_lock(const struct lu_env *env,
- struct osc_page *opg)
-{
- struct cl_lock *lock = opg->ops_lock;
- struct osc_lock *olock;
-
- LASSERT(lock);
- olock = osc_lock_at(lock);
-
- atomic_dec(&olock->ols_pageref);
- opg->ops_lock = NULL;
-
- cl_lock_put(env, lock);
-}
-
static int osc_page_is_under_lock(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *unused)
+ struct cl_io *unused, pgoff_t *max_index)
{
- struct cl_lock *lock;
+ struct osc_page *opg = cl2osc_page(slice);
+ struct ldlm_lock *dlmlock;
int result = -ENODATA;
- lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
- NULL, 1, 0);
- if (lock) {
- if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
- result = -EBUSY;
- cl_lock_put(env, lock);
+ dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
+ osc_index(opg), 1, 0);
+ if (dlmlock) {
+ *max_index = cl_index(slice->cpl_obj,
+ dlmlock->l_policy_data.l_extent.end);
+ LDLM_LOCK_PUT(dlmlock);
+ result = 0;
}
return result;
}
-static void osc_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct osc_page *opg = cl2osc_page(slice);
-
- if (unlikely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
-}
-
-static void osc_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
-
- if (likely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static void osc_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(slice->cpl_obj);
-
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static int osc_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
static const char *osc_list(struct list_head *head)
{
return list_empty(head) ? "-" : "+";
@@ -272,8 +171,8 @@ static int osc_page_print(const struct lu_env *env,
struct osc_object *obj = cl2osc(slice->cpl_obj);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
- return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
- opg,
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
+ opg, osc_index(opg),
/* 1 */
oap->oap_magic, oap->oap_cmd,
oap->oap_interrupted,
@@ -321,7 +220,7 @@ static void osc_page_delete(const struct lu_env *env,
osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
if (rc) {
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
+ CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
"Trying to teardown failed: %d\n", rc);
LASSERT(0);
}
@@ -334,7 +233,19 @@ static void osc_page_delete(const struct lu_env *env,
}
spin_unlock(&obj->oo_seatbelt);
- osc_lru_del(osc_cli(obj), opg, true);
+ osc_lru_del(osc_cli(obj), opg);
+
+ if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
+ void *value;
+
+ spin_lock(&obj->oo_tree_lock);
+ value = radix_tree_delete(&obj->oo_tree, osc_index(opg));
+ if (value)
+ --obj->oo_npages;
+ spin_unlock(&obj->oo_tree_lock);
+
+ LASSERT(ergo(value, value == opg));
+ }
}
static void osc_page_clip(const struct lu_env *env,
@@ -382,28 +293,16 @@ static int osc_page_flush(const struct lu_env *env,
}
static const struct cl_page_operations osc_page_ops = {
- .cpo_fini = osc_page_fini,
.cpo_print = osc_page_print,
.cpo_delete = osc_page_delete,
.cpo_is_under_lock = osc_page_is_under_lock,
- .cpo_disown = osc_page_disown,
- .io = {
- [CRT_READ] = {
- .cpo_cache_add = osc_page_fail,
- .cpo_completion = osc_page_completion_read
- },
- [CRT_WRITE] = {
- .cpo_cache_add = osc_page_cache_add,
- .cpo_completion = osc_page_completion_write
- }
- },
.cpo_clip = osc_page_clip,
.cpo_cancel = osc_page_cancel,
.cpo_flush = osc_page_flush
};
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
@@ -412,13 +311,14 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
opg->ops_from = 0;
opg->ops_to = PAGE_SIZE;
- result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
+ result = osc_prep_async_page(osc, opg, page->cp_vmpage,
+ cl_offset(obj, index));
if (result == 0) {
struct osc_io *oio = osc_env_io(env);
opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops);
+ cl_page_slice_add(page, &opg->ops_cl, obj, index,
+ &osc_page_ops);
}
/*
* Cannot assert osc_page_protected() here as read-ahead
@@ -431,12 +331,47 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
- if (page->cp_type == CPT_CACHEABLE && result == 0)
+ if (page->cp_type == CPT_CACHEABLE && result == 0) {
result = osc_lru_reserve(env, osc, opg);
+ if (result == 0) {
+ spin_lock(&osc->oo_tree_lock);
+ result = radix_tree_insert(&osc->oo_tree, index, opg);
+ if (result == 0)
+ ++osc->oo_npages;
+ spin_unlock(&osc->oo_tree_lock);
+ LASSERT(result == 0);
+ }
+ }
return result;
}
+int osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long obd_upages, obd_dpages, osc_upages;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (!cli)
+ return 0;
+
+ obd_upages = atomic_read(&obd_unstable_pages);
+ obd_dpages = atomic_read(&obd_dirty_pages);
+
+ osc_upages = atomic_read(&cli->cl_unstable_count);
+
+ /*
+ * obd_max_dirty_pages is the max number of (dirty + unstable)
+ * pages allowed at any given time. To simulate an unstable page
+ * only limit, we subtract the current number of dirty pages
+ * from this max. This difference is roughly the amount of pages
+ * currently available for unstable pages. Thus, the soft limit
+ * is half of that difference. Check osc_upages to ensure we don't
+ * set SOFT_SYNC for OSCs without any outstanding unstable pages.
+ */
+ return osc_upages &&
+ obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
+}
+
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
@@ -460,6 +395,9 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
+ if (osc_over_unstable_soft_limit(oap->oap_cli))
+ oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+
if (!client_is_remote(osc_export(obj)) &&
capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
@@ -483,13 +421,12 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
*/
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and..
*/
static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
+static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
@@ -500,65 +437,142 @@ static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
+ int pages = atomic_read(&cli->cl_lru_in_list);
+ unsigned long budget;
- if (atomic_read(&osc_lru_waiters) > 0 &&
- atomic_read(cli->cl_lru_left) < lru_shrink_max)
- /* drop lru pages aggressively */
- return min(pages, lru_shrink_max);
+ budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
*/
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
- unsigned long tmp;
+ if (pages >= budget)
+ return lru_shrink_max;
+ else if (pages >= budget / 2)
+ return lru_shrink_min;
+ } else if (pages >= budget * 2) {
+ return lru_shrink_min;
+ }
+ return 0;
+}
- tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
- if (pages > tmp)
- return min(pages, lru_shrink_max);
+int lru_queue_work(const struct lu_env *env, void *data)
+{
+ struct client_obd *cli = data;
- return pages > lru_shrink_min ? lru_shrink_min : 0;
- }
+ CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
+
+ if (osc_cache_too_much(cli))
+ osc_lru_shrink(env, cli, lru_shrink_max, true);
return 0;
}
-/* Return how many pages are not discarded in @pvec. */
-static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
- struct cl_page **pvec, int max_index)
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
+{
+ LIST_HEAD(lru);
+ struct osc_async_page *oap;
+ int npages = 0;
+
+ list_for_each_entry(oap, plist, oap_pending_item) {
+ struct osc_page *opg = oap2osc_page(oap);
+
+ if (!opg->ops_in_lru)
+ continue;
+
+ ++npages;
+ LASSERT(list_empty(&opg->ops_lru));
+ list_add(&opg->ops_lru, &lru);
+ }
+
+ if (npages > 0) {
+ spin_lock(&cli->cl_lru_list_lock);
+ list_splice_tail(&lru, &cli->cl_lru_list);
+ atomic_sub(npages, &cli->cl_lru_busy);
+ atomic_add(npages, &cli->cl_lru_in_list);
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ /* XXX: May set force to be true for better performance */
+ if (osc_cache_too_much(cli))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+}
+
+static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+ LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
+ list_del_init(&opg->ops_lru);
+ atomic_dec(&cli->cl_lru_in_list);
+}
+
+/**
+ * Page is being destroyed. The page may be not in LRU list, if the transfer
+ * has never finished(error occurred).
+ */
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+ if (opg->ops_in_lru) {
+ spin_lock(&cli->cl_lru_list_lock);
+ if (!list_empty(&opg->ops_lru)) {
+ __osc_lru_del(cli, opg);
+ } else {
+ LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
+ atomic_dec(&cli->cl_lru_busy);
+ }
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ atomic_inc(cli->cl_lru_left);
+ /* this is a great place to release more LRU pages if
+ * this osc occupies too many LRU pages and kernel is
+ * stealing one of them.
+ */
+ if (!memory_pressure_get())
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ wake_up(&osc_lru_waitq);
+ } else {
+ LASSERT(list_empty(&opg->ops_lru));
+ }
+}
+
+/**
+ * Delete page from LRUlist for redirty.
+ */
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
+{
+ /* If page is being transferred for the first time,
+ * ops_lru should be empty
+ */
+ if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
+ spin_lock(&cli->cl_lru_list_lock);
+ __osc_lru_del(cli, opg);
+ spin_unlock(&cli->cl_lru_list_lock);
+ atomic_inc(&cli->cl_lru_busy);
+ }
+}
+
+static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
{
- int count;
int i;
- for (count = 0, i = 0; i < max_index; i++) {
+ for (i = 0; i < max_index; i++) {
struct cl_page *page = pvec[i];
- if (cl_page_own_try(env, io, page) == 0) {
- /* free LRU page only if nobody is using it.
- * This check is necessary to avoid freeing the pages
- * having already been removed from LRU and pinned
- * for IO.
- */
- if (!cl_page_in_use(page)) {
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- ++count;
- }
- cl_page_disown(env, io, page);
- }
+ LASSERT(cl_page_is_owned(page, io));
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
cl_page_put(env, page);
+
pvec[i] = NULL;
}
- return max_index - count;
}
/**
* Drop @target of pages from LRU at most.
*/
-int osc_lru_shrink(struct client_obd *cli, int target)
+int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ int target, bool force)
{
- struct cl_env_nest nest;
- struct lu_env *env;
struct cl_io *io;
struct cl_object *clobj = NULL;
struct cl_page **pvec;
@@ -573,23 +587,31 @@ int osc_lru_shrink(struct client_obd *cli, int target)
if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
return 0;
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- return PTR_ERR(env);
+ if (!force) {
+ if (atomic_read(&cli->cl_lru_shrinkers) > 0)
+ return -EBUSY;
- pvec = osc_env_info(env)->oti_pvec;
+ if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+ atomic_dec(&cli->cl_lru_shrinkers);
+ return -EBUSY;
+ }
+ } else {
+ atomic_inc(&cli->cl_lru_shrinkers);
+ }
+
+ pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
io = &osc_env_info(env)->oti_io;
- client_obd_list_lock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_shrinkers);
+ spin_lock(&cli->cl_lru_list_lock);
maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
struct cl_page *page;
+ bool will_free = false;
if (--maxscan < 0)
break;
- page = cl_page_top(opg->ops_cl.cpl_page);
+ page = opg->ops_cl.cpl_page;
if (cl_page_in_use_noref(page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
@@ -600,10 +622,10 @@ int osc_lru_shrink(struct client_obd *cli, int target)
struct cl_object *tmp = page->cp_obj;
cl_object_get(tmp);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj) {
- count -= discard_pagevec(env, io, pvec, index);
+ discard_pagevec(env, io, pvec, index);
index = 0;
cl_io_fini(env, io);
@@ -616,7 +638,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, clobj);
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
if (rc != 0)
break;
@@ -625,98 +647,54 @@ int osc_lru_shrink(struct client_obd *cli, int target)
continue;
}
- /* move this page to the end of list as it will be discarded
- * soon. The page will be finally removed from LRU list in
- * osc_page_delete().
- */
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ if (cl_page_own_try(env, io, page) == 0) {
+ if (!cl_page_in_use_noref(page)) {
+ /* remove it from lru list earlier to avoid
+ * lock contention
+ */
+ __osc_lru_del(cli, opg);
+ opg->ops_in_lru = 0; /* will be discarded */
+
+ cl_page_get(page);
+ will_free = true;
+ } else {
+ cl_page_disown(env, io, page);
+ }
+ }
- /* it's okay to grab a refcount here w/o holding lock because
- * it has to grab cl_lru_list_lock to delete the page.
- */
- cl_page_get(page);
- pvec[index++] = page;
- if (++count >= target)
- break;
+ if (!will_free) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ continue;
+ }
+ /* Don't discard and free the page with cl_lru_list held */
+ pvec[index++] = page;
if (unlikely(index == OTI_PVEC_SIZE)) {
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- count -= discard_pagevec(env, io, pvec, index);
+ spin_unlock(&cli->cl_lru_list_lock);
+ discard_pagevec(env, io, pvec, index);
index = 0;
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
}
+
+ if (++count >= target)
+ break;
}
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj) {
- count -= discard_pagevec(env, io, pvec, index);
+ discard_pagevec(env, io, pvec, index);
cl_io_fini(env, io);
cl_object_put(env, clobj);
}
- cl_env_nested_put(&nest, env);
atomic_dec(&cli->cl_lru_shrinkers);
- return count > 0 ? count : rc;
-}
-
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
-{
- bool wakeup = false;
-
- if (!opg->ops_in_lru)
- return;
-
- atomic_dec(&cli->cl_lru_busy);
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (list_empty(&opg->ops_lru)) {
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
- atomic_inc_return(&cli->cl_lru_in_list);
- wakeup = atomic_read(&osc_lru_waiters) > 0;
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
-
- if (wakeup) {
- osc_lru_shrink(cli, osc_cache_too_much(cli));
+ if (count > 0) {
+ atomic_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
-}
-
-/* delete page from LRUlist. The page can be deleted from LRUlist for two
- * reasons: redirtied or deleted from page cache.
- */
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
-{
- if (opg->ops_in_lru) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (!list_empty(&opg->ops_lru)) {
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
- list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
- if (!del)
- atomic_inc(&cli->cl_lru_busy);
- } else if (del) {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- if (del) {
- atomic_inc(cli->cl_lru_left);
- /* this is a great place to release more LRU pages if
- * this osc occupies too many LRU pages and kernel is
- * stealing one of them.
- * cl_lru_shrinkers is to avoid recursive call in case
- * we're already in the context of osc_lru_shrink().
- */
- if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
- !memory_pressure_get())
- osc_lru_shrink(cli, osc_cache_too_much(cli));
- wake_up(&osc_lru_waitq);
- }
- } else {
- LASSERT(list_empty(&opg->ops_lru));
- }
+ return count > 0 ? count : rc;
}
static inline int max_to_shrink(struct client_obd *cli)
@@ -724,19 +702,28 @@ static inline int max_to_shrink(struct client_obd *cli)
return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
}
-static int osc_lru_reclaim(struct client_obd *cli)
+int osc_lru_reclaim(struct client_obd *cli)
{
+ struct cl_env_nest nest;
+ struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
- int rc;
+ int rc = 0;
LASSERT(cache);
- rc = osc_lru_shrink(cli, lru_shrink_min);
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return 0;
+
+ rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
if (rc != 0) {
+ if (rc == -EBUSY)
+ rc = 0;
+
CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
cli->cl_import->imp_obd->obd_name, rc, cli);
- return rc;
+ goto out;
}
CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
@@ -764,10 +751,11 @@ static int osc_lru_reclaim(struct client_obd *cli)
atomic_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- if (atomic_read(&cli->cl_lru_in_list) > 0) {
+ if (osc_cache_too_much(cli) > 0) {
spin_unlock(&cache->ccc_lru_lock);
- rc = osc_lru_shrink(cli, max_to_shrink(cli));
+ rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
+ true);
spin_lock(&cache->ccc_lru_lock);
if (rc != 0)
break;
@@ -775,6 +763,8 @@ static int osc_lru_reclaim(struct client_obd *cli)
}
spin_unlock(&cache->ccc_lru_lock);
+out:
+ cl_env_nested_put(&nest, env);
CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
@@ -784,16 +774,20 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg)
{
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct osc_io *oio = osc_env_io(env);
struct client_obd *cli = osc_cli(obj);
int rc = 0;
if (!cli->cl_cache) /* shall not be in LRU */
return 0;
+ if (oio->oi_lru_reserved > 0) {
+ --oio->oi_lru_reserved;
+ goto out;
+ }
+
LASSERT(atomic_read(cli->cl_lru_left) >= 0);
while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
- int gen;
-
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
if (rc < 0)
@@ -803,23 +797,15 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
cond_resched();
- /* slowest case, all of caching pages are busy, notifying
- * other OSCs that we're lack of LRU slots.
- */
- atomic_inc(&osc_lru_waiters);
-
- gen = atomic_read(&cli->cl_lru_in_list);
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0 ||
- (atomic_read(&cli->cl_lru_in_list) > 0 &&
- gen != atomic_read(&cli->cl_lru_in_list)),
+ atomic_read(cli->cl_lru_left) > 0,
&lwi);
- atomic_dec(&osc_lru_waiters);
if (rc < 0)
break;
}
+out:
if (rc >= 0) {
atomic_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 30526ebca..47417f88f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -92,12 +92,13 @@ struct osc_fsync_args {
struct osc_enqueue_args {
struct obd_export *oa_exp;
+ enum ldlm_type oa_type;
+ enum ldlm_mode oa_mode;
__u64 *oa_flags;
- obd_enqueue_update_f oa_upcall;
+ osc_enqueue_upcall_f oa_upcall;
void *oa_cookie;
struct ost_lvb *oa_lvb;
- struct lustre_handle *oa_lockh;
- struct ldlm_enqueue_info *oa_ei;
+ struct lustre_handle oa_lockh;
unsigned int oa_agl:1;
};
@@ -801,21 +802,24 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
LASSERT(!(oa->o_valid & bits));
oa->o_valid |= bits;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
oa->o_dirty = cli->cl_dirty;
if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
cli->cl_dirty_max)) {
CERROR("dirty %lu - %lu > dirty_max %lu\n",
cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
oa->o_undirty = 0;
- } else if (unlikely(atomic_read(&obd_dirty_pages) -
+ } else if (unlikely(atomic_read(&obd_unstable_pages) +
+ atomic_read(&obd_dirty_pages) -
atomic_read(&obd_dirty_transit_pages) >
(long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1).
*/
- CERROR("dirty %d - %d > system dirty_max %d\n",
+ CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
+ cli->cl_import->imp_obd->obd_name,
+ atomic_read(&obd_unstable_pages),
atomic_read(&obd_dirty_pages),
atomic_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
@@ -833,10 +837,9 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
oa->o_dropped = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
-
}
void osc_update_next_shrink(struct client_obd *cli)
@@ -849,9 +852,9 @@ void osc_update_next_shrink(struct client_obd *cli)
static void __osc_update_grant(struct client_obd *cli, u64 grant)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant += grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
@@ -889,10 +892,10 @@ out:
static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
oa->o_grant = cli->cl_avail_grant / 4;
cli->cl_avail_grant -= oa->o_grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
oa->o_valid |= OBD_MD_FLFLAGS;
oa->o_flags = 0;
@@ -911,10 +914,10 @@ static int osc_shrink_grant(struct client_obd *cli)
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
(cli->cl_max_pages_per_rpc << PAGE_SHIFT);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
}
@@ -924,7 +927,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
int rc = 0;
struct ost_body *body;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* Don't shrink if we are already above or below the desired limit
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance.
@@ -933,10 +936,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
body = kzalloc(sizeof(*body), GFP_NOFS);
if (!body)
@@ -944,10 +947,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
osc_announce_cached(cli, &body->oa, 0);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
body->oa.o_grant = cli->cl_avail_grant - target_bytes;
cli->cl_avail_grant = target_bytes;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
body->oa.o_valid |= OBD_MD_FLFLAGS;
body->oa.o_flags = 0;
@@ -1035,7 +1038,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
* race is tolerable here: if we're evicted, but imp_state already
* left EVICTED state, then cl_dirty must be 0 already.
*/
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
cli->cl_avail_grant = ocd->ocd_grant;
else
@@ -1053,7 +1056,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
/* determine the appropriate chunk size used by osc_extent. */
cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
cli->cl_import->imp_obd->obd_name,
@@ -1082,7 +1085,7 @@ static void handle_short_read(int nob_read, u32 page_count,
if (pga[i]->count > nob_read) {
/* EOF inside this page */
ptr = kmap(pga[i]->pg) +
- (pga[i]->off & ~CFS_PAGE_MASK);
+ (pga[i]->off & ~PAGE_MASK);
memset(ptr + nob_read, 0, pga[i]->count - nob_read);
kunmap(pga[i]->pg);
page_count--;
@@ -1097,7 +1100,7 @@ static void handle_short_read(int nob_read, u32 page_count,
/* zero remaining pages */
while (page_count-- > 0) {
- ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
+ ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
memset(ptr, 0, pga[i]->count);
kunmap(pga[i]->pg);
i++;
@@ -1144,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
{
if (p1->flag != p2->flag) {
unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
- OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
+ OBD_BRW_SYNC | OBD_BRW_ASYNC |
+ OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine
@@ -1188,32 +1192,29 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
unsigned char *ptr = kmap(pga[i]->pg);
- int off = pga[i]->off & ~CFS_PAGE_MASK;
+ int off = pga[i]->off & ~PAGE_MASK;
memcpy(ptr + off, "bad1", min(4, nob));
kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~CFS_PAGE_MASK,
+ pga[i]->off & ~PAGE_MASK,
count);
CDEBUG(D_PAGE,
"page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
(long)pga[i]->pg->flags, page_count(pga[i]->pg),
page_private(pga[i]->pg),
- (int)(pga[i]->off & ~CFS_PAGE_MASK));
+ (int)(pga[i]->off & ~PAGE_MASK));
nob -= pga[i]->count;
pg_count--;
i++;
}
- bufsize = 4;
+ bufsize = sizeof(cksum);
err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
-
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo
*/
@@ -1312,7 +1313,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
pg_prev = pga[0];
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = pga[i];
- int poff = pg->off & ~CFS_PAGE_MASK;
+ int poff = pg->off & ~PAGE_MASK;
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
@@ -1658,6 +1659,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
aa->aa_resends++;
new_req->rq_interpret_reply = request->rq_interpret_reply;
new_req->rq_async_args = request->rq_async_args;
+ new_req->rq_commit_cb = request->rq_commit_cb;
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply())
*/
@@ -1737,7 +1739,6 @@ static int brw_interpret(const struct lu_env *env,
struct osc_brw_async_args *aa = data;
struct osc_extent *ext;
struct osc_extent *tmp;
- struct cl_object *obj = NULL;
struct client_obd *cli = aa->aa_cli;
rc = osc_brw_fini_request(req, rc);
@@ -1766,24 +1767,17 @@ static int brw_interpret(const struct lu_env *env,
rc = -EIO;
}
- list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
- if (!obj && rc == 0) {
- obj = osc2cl(ext->oe_obj);
- cl_object_get(obj);
- }
-
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 1, rc);
- }
- LASSERT(list_empty(&aa->aa_exts));
- LASSERT(list_empty(&aa->aa_oaps));
-
- if (obj) {
+ if (rc == 0) {
struct obdo *oa = aa->aa_oa;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
unsigned long valid = 0;
+ struct cl_object *obj;
+ struct osc_async_page *last;
+
+ last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
+ obj = osc2cl(last->oap_obj);
- LASSERT(rc == 0);
+ cl_object_attr_lock(obj);
if (oa->o_valid & OBD_MD_FLBLOCKS) {
attr->cat_blocks = oa->o_blocks;
valid |= CAT_BLOCKS;
@@ -1800,21 +1794,45 @@ static int brw_interpret(const struct lu_env *env,
attr->cat_ctime = oa->o_ctime;
valid |= CAT_CTIME;
}
- if (valid != 0) {
- cl_object_attr_lock(obj);
- cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
+
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
+ struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
+ loff_t last_off = last->oap_count + last->oap_obj_off;
+
+ /* Change file size if this is an out of quota or
+ * direct IO write and it extends the file size
+ */
+ if (loi->loi_lvb.lvb_size < last_off) {
+ attr->cat_size = last_off;
+ valid |= CAT_SIZE;
+ }
+ /* Extend KMS if it's not a lockless write */
+ if (loi->loi_kms < last_off &&
+ oap2osc_page(last)->ops_srvlock == 0) {
+ attr->cat_kms = last_off;
+ valid |= CAT_KMS;
+ }
}
- cl_object_put(env, obj);
+
+ if (valid != 0)
+ cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
}
kmem_cache_free(obdo_cachep, aa->aa_oa);
+ list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
+ list_del_init(&ext->oe_link);
+ osc_extent_finish(env, ext, 1, rc);
+ }
+ LASSERT(list_empty(&aa->aa_exts));
+ LASSERT(list_empty(&aa->aa_oaps));
+
cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
req->rq_bulk->bd_nob_transferred);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
* is called so we know whether to go to sync BRWs or wait for more
* RPCs to complete
@@ -1824,12 +1842,31 @@ static int brw_interpret(const struct lu_env *env,
else
cli->cl_r_in_flight--;
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug(env, cli, NULL);
return rc;
}
+static void brw_commit(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ /*
+ * If osc_inc_unstable_pages (via osc_extent_finish) races with
+ * this called via the rq_commit_cb, I need to ensure
+ * osc_dec_unstable_pages is still called. Otherwise unstable
+ * pages may be leaked.
+ */
+ if (req->rq_unstable) {
+ spin_unlock(&req->rq_lock);
+ osc_dec_unstable_pages(req);
+ spin_lock(&req->rq_lock);
+ } else {
+ req->rq_committed = 1;
+ }
+ spin_unlock(&req->rq_lock);
+}
+
/**
* Build an RPC by the list of extent @ext_list. The caller must ensure
* that the total pages in this list are NOT over max pages per RPC.
@@ -1920,7 +1957,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, page_index(oap->oap_page), oap,
+ pga[i]->pg, oap->oap_page->index, oap,
pga[i]->flag);
i++;
cl_req_page_add(env, clerq, page);
@@ -1949,6 +1986,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
goto out;
}
+ req->rq_commit_cb = brw_commit;
req->rq_interpret_reply = brw_interpret;
if (mem_tight != 0)
@@ -1992,7 +2030,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
if (tmp)
tmp->oap_request = ptlrpc_request_addref(req);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
@@ -2007,7 +2045,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
starting_offset + 1);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
page_count, aa, cli->cl_r_in_flight,
@@ -2055,14 +2093,12 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
lock_res_and_lock(lock);
- spin_lock(&osc_ast_guard);
if (!lock->l_ast_data)
lock->l_ast_data = data;
if (lock->l_ast_data == data)
set = 1;
- spin_unlock(&osc_ast_guard);
unlock_res_and_lock(lock);
return set;
@@ -2104,36 +2140,38 @@ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
return rc;
}
-static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
- obd_enqueue_update_f upcall, void *cookie,
- __u64 *flags, int agl, int rc)
+static int osc_enqueue_fini(struct ptlrpc_request *req,
+ osc_enqueue_upcall_f upcall, void *cookie,
+ struct lustre_handle *lockh, enum ldlm_mode mode,
+ __u64 *flags, int agl, int errcode)
{
- int intent = *flags & LDLM_FL_HAS_INTENT;
-
- if (intent) {
- /* The request was created before ldlm_cli_enqueue call. */
- if (rc == ELDLM_LOCK_ABORTED) {
- struct ldlm_reply *rep;
+ bool intent = *flags & LDLM_FL_HAS_INTENT;
+ int rc;
- rep = req_capsule_server_get(&req->rq_pill,
- &RMF_DLM_REP);
+ /* The request was created before ldlm_cli_enqueue call. */
+ if (intent && errcode == ELDLM_LOCK_ABORTED) {
+ struct ldlm_reply *rep;
- rep->lock_policy_res1 =
- ptlrpc_status_ntoh(rep->lock_policy_res1);
- if (rep->lock_policy_res1)
- rc = rep->lock_policy_res1;
- }
- }
+ rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
- (rc == 0)) {
+ rep->lock_policy_res1 =
+ ptlrpc_status_ntoh(rep->lock_policy_res1);
+ if (rep->lock_policy_res1)
+ errcode = rep->lock_policy_res1;
+ if (!agl)
+ *flags |= LDLM_FL_LVB_READY;
+ } else if (errcode == ELDLM_OK) {
*flags |= LDLM_FL_LVB_READY;
- CDEBUG(D_INODE, "got kms %llu blocks %llu mtime %llu\n",
- lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
}
/* Call the update callback. */
- rc = (*upcall)(cookie, rc);
+ rc = (*upcall)(cookie, lockh, errcode);
+ /* release the reference taken in ldlm_cli_enqueue() */
+ if (errcode == ELDLM_LOCK_MATCHED)
+ errcode = ELDLM_OK;
+ if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
+ ldlm_lock_decref(lockh, mode);
+
return rc;
}
@@ -2142,62 +2180,50 @@ static int osc_enqueue_interpret(const struct lu_env *env,
struct osc_enqueue_args *aa, int rc)
{
struct ldlm_lock *lock;
- struct lustre_handle handle;
- __u32 mode;
- struct ost_lvb *lvb;
- __u32 lvb_len;
- __u64 *flags = aa->oa_flags;
-
- /* Make a local copy of a lock handle and a mode, because aa->oa_*
- * might be freed anytime after lock upcall has been called.
- */
- lustre_handle_copy(&handle, aa->oa_lockh);
- mode = aa->oa_ei->ei_mode;
+ struct lustre_handle *lockh = &aa->oa_lockh;
+ enum ldlm_mode mode = aa->oa_mode;
+ struct ost_lvb *lvb = aa->oa_lvb;
+ __u32 lvb_len = sizeof(*lvb);
+ __u64 flags = 0;
+
/* ldlm_cli_enqueue is holding a reference on the lock, so it must
* be valid.
*/
- lock = ldlm_handle2lock(&handle);
+ lock = ldlm_handle2lock(lockh);
+ LASSERTF(lock, "lockh %llx, req %p, aa %p - client evicted?\n",
+ lockh->cookie, req, aa);
/* Take an additional reference so that a blocking AST that
* ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
* to arrive after an upcall has been executed by
* osc_enqueue_fini().
*/
- ldlm_lock_addref(&handle, mode);
+ ldlm_lock_addref(lockh, mode);
+
+ /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
/* Let CP AST to grant the lock first. */
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
- if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
- lvb = NULL;
- lvb_len = 0;
- } else {
- lvb = aa->oa_lvb;
- lvb_len = sizeof(*aa->oa_lvb);
+ if (aa->oa_agl) {
+ LASSERT(!aa->oa_lvb);
+ LASSERT(!aa->oa_flags);
+ aa->oa_flags = &flags;
}
/* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
- mode, flags, lvb, lvb_len, &handle, rc);
+ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
+ aa->oa_mode, aa->oa_flags, lvb, lvb_len,
+ lockh, rc);
/* Complete osc stuff. */
- rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
- flags, aa->oa_agl, rc);
+ rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
+ aa->oa_flags, aa->oa_agl, rc);
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
- /* Release the lock for async request. */
- if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
- /*
- * Releases a reference taken by ldlm_cli_enqueue(), if it is
- * not already released by
- * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
- */
- ldlm_lock_decref(&handle, mode);
-
- LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n",
- aa->oa_lockh, req, aa);
- ldlm_lock_decref(&handle, mode);
+ ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(lock);
return rc;
}
@@ -2209,29 +2235,29 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
* other synchronous requests, however keeping some locks and trying to obtain
* others may take a considerable amount of time in a case of ost failure; and
* when other sync requests do not get released lock from a client, the client
- * is excluded from the cluster -- such scenarious make the life difficult, so
+ * is evicted from the cluster -- such scenaries make the life difficult, so
* release locks just after they are obtained.
*/
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall, void *cookie,
+ osc_enqueue_upcall_f upcall, void *cookie,
struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
struct ptlrpc_request_set *rqset, int async, int agl)
{
struct obd_device *obd = exp->exp_obd;
+ struct lustre_handle lockh = { 0 };
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
+ __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
enum ldlm_mode mode;
int rc;
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother.
*/
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/*
* kms is not valid when either object is completely fresh (so that no
@@ -2259,64 +2285,46 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
- einfo->ei_type, policy, mode, lockh, 0);
+ einfo->ei_type, policy, mode, &lockh, 0);
if (mode) {
- struct ldlm_lock *matched = ldlm_handle2lock(lockh);
+ struct ldlm_lock *matched;
- if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
- /* For AGL, if enqueue RPC is sent but the lock is not
- * granted, then skip to process this strpe.
- * Return -ECANCELED to tell the caller.
+ if (*flags & LDLM_FL_TEST_LOCK)
+ return ELDLM_OK;
+
+ matched = ldlm_handle2lock(&lockh);
+ if (agl) {
+ /* AGL enqueues DLM locks speculatively. Therefore if
+ * it already exists a DLM lock, it wll just inform the
+ * caller to cancel the AGL process for this stripe.
*/
- ldlm_lock_decref(lockh, mode);
+ ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return -ECANCELED;
- }
-
- if (osc_set_lock_data_with_check(matched, einfo)) {
+ } else if (osc_set_lock_data_with_check(matched, einfo)) {
*flags |= LDLM_FL_LVB_READY;
- /* addref the lock only if not async requests and PW
- * lock is matched whereas we asked for PR.
- */
- if (!rqset && einfo->ei_mode != mode)
- ldlm_lock_addref(lockh, LCK_PR);
- if (intent) {
- /* I would like to be able to ASSERT here that
- * rss <= kms, but I can't, for reasons which
- * are explained in lov_enqueue()
- */
- }
-
- /* We already have a lock, and it's referenced.
- *
- * At this point, the cl_lock::cll_state is CLS_QUEUING,
- * AGL upcall may change it to CLS_HELD directly.
- */
- (*upcall)(cookie, ELDLM_OK);
+ /* We already have a lock, and it's referenced. */
+ (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
- if (einfo->ei_mode != mode)
- ldlm_lock_decref(lockh, LCK_PW);
- else if (rqset)
- /* For async requests, decref the lock. */
- ldlm_lock_decref(lockh, einfo->ei_mode);
+ ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return ELDLM_OK;
+ } else {
+ ldlm_lock_decref(&lockh, mode);
+ LDLM_LOCK_PUT(matched);
}
-
- ldlm_lock_decref(lockh, mode);
- LDLM_LOCK_PUT(matched);
}
- no_match:
+no_match:
+ if (*flags & LDLM_FL_TEST_LOCK)
+ return -ENOLCK;
if (intent) {
- LIST_HEAD(cancels);
-
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_ENQUEUE_LVB);
if (!req)
return -ENOMEM;
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
return rc;
@@ -2331,21 +2339,31 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
*flags &= ~LDLM_FL_BLOCK_GRANTED;
rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), LVB_T_OST, lockh, async);
- if (rqset) {
+ sizeof(*lvb), LVB_T_OST, &lockh, async);
+ if (async) {
if (!rc) {
struct osc_enqueue_args *aa;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- aa->oa_ei = einfo;
aa->oa_exp = exp;
- aa->oa_flags = flags;
+ aa->oa_mode = einfo->ei_mode;
+ aa->oa_type = einfo->ei_type;
+ lustre_handle_copy(&aa->oa_lockh, &lockh);
aa->oa_upcall = upcall;
aa->oa_cookie = cookie;
- aa->oa_lvb = lvb;
- aa->oa_lockh = lockh;
aa->oa_agl = !!agl;
+ if (!agl) {
+ aa->oa_flags = flags;
+ aa->oa_lvb = lvb;
+ } else {
+ /* AGL is essentially to enqueue an DLM lock
+ * in advance, so we don't care about the
+ * result of AGL enqueue.
+ */
+ aa->oa_lvb = NULL;
+ aa->oa_flags = NULL;
+ }
req->rq_interpret_reply =
(ptlrpc_interpterer_t)osc_enqueue_interpret;
@@ -2359,7 +2377,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
return rc;
}
- rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
+ rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
+ flags, agl, rc);
if (intent)
ptlrpc_req_finished(req);
@@ -2381,8 +2400,8 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother
*/
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
@@ -2493,7 +2512,7 @@ static int osc_statfs_async(struct obd_export *exp,
}
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
@@ -2787,7 +2806,7 @@ out:
goto skip_locking;
policy.l_extent.start = fm_key->fiemap.fm_start &
- CFS_PAGE_MASK;
+ PAGE_MASK;
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
fm_key->fiemap.fm_start + PAGE_SIZE - 1)
@@ -2795,7 +2814,7 @@ out:
else
policy.l_extent.end = (fm_key->fiemap.fm_start +
fm_key->fiemap.fm_length +
- PAGE_SIZE - 1) & CFS_PAGE_MASK;
+ PAGE_SIZE - 1) & PAGE_MASK;
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
@@ -2913,7 +2932,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
int target = *(int *)val;
- nr = osc_lru_shrink(cli, min(nr, target));
+ nr = osc_lru_shrink(env, cli, min(nr, target), true);
*(int *)val -= nr;
return 0;
}
@@ -2992,12 +3011,12 @@ static int osc_reconnect(const struct lu_env *env,
if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
long lost_grant;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
2 * cli_brw_size(obd);
lost_grant = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
data->ocd_connect_flags,
@@ -3047,10 +3066,10 @@ static int osc_import_event(struct obd_device *obd,
switch (event) {
case IMP_EVENT_DISCON: {
cli = &obd->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant = 0;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
break;
}
case IMP_EVENT_INACTIVE: {
@@ -3073,8 +3092,9 @@ static int osc_import_event(struct obd_device *obd,
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
cl_env_put(env, &refcheck);
- } else
+ } else {
rc = PTR_ERR(env);
+ }
break;
}
case IMP_EVENT_ACTIVE: {
@@ -3116,20 +3136,14 @@ static int osc_import_event(struct obd_device *obd,
* \retval zero the lock can't be canceled
* \retval other ok to cancel
*/
-static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+static int osc_cancel_weight(struct ldlm_lock *lock)
{
- check_res_locked(lock->l_resource);
-
/*
- * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
- *
- * XXX as a future improvement, we can also cancel unused write lock
- * if it doesn't have dirty data and active mmaps.
+ * Cancel all unused and granted extent lock.
*/
if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_granted_mode == LCK_PR ||
- lock->l_granted_mode == LCK_CR) &&
- (osc_dlm_lock_pageref(lock) == 0))
+ lock->l_granted_mode == lock->l_req_mode &&
+ osc_ldlm_weigh_ast(lock) == 0)
return 1;
return 0;
@@ -3170,6 +3184,14 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
cli->cl_writeback_work = handler;
+ handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
+ if (IS_ERR(handler)) {
+ rc = PTR_ERR(handler);
+ goto out_ptlrpcd_work;
+ }
+
+ cli->cl_lru_work = handler;
+
rc = osc_quota_setup(obd);
if (rc)
goto out_ptlrpcd_work;
@@ -3198,11 +3220,18 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
- ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
+ ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
return rc;
out_ptlrpcd_work:
- ptlrpcd_destroy_work(handler);
+ if (cli->cl_writeback_work) {
+ ptlrpcd_destroy_work(cli->cl_writeback_work);
+ cli->cl_writeback_work = NULL;
+ }
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
+ }
out_client_setup:
client_obd_cleanup(obd);
out_ptlrpcd:
@@ -3241,6 +3270,10 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
ptlrpcd_destroy_work(cli->cl_writeback_work);
cli->cl_writeback_work = NULL;
}
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
+ }
obd_cleanup_client_import(obd);
ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
@@ -3330,7 +3363,6 @@ static struct obd_ops osc_obd_ops = {
};
extern struct lu_kmem_descr osc_caches[];
-extern spinlock_t osc_ast_guard;
extern struct lock_class_key osc_ast_guard_class;
static int __init osc_init(void)
@@ -3357,9 +3389,6 @@ static int __init osc_init(void)
if (rc)
goto out_kmem;
- spin_lock_init(&osc_ast_guard);
- lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
-
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index cf3ac8eee..4b7912a2c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -595,9 +595,9 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
struct obd_import *imp = request->rq_import;
int rc;
- if (unlikely(ctx))
+ if (unlikely(ctx)) {
request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
- else {
+ } else {
rc = sptlrpc_req_get_ctx(request);
if (rc)
goto out_free;
@@ -1082,7 +1082,6 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req)
*/
if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
(opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
-
/* Suppress timed out reconnect requests */
if (req->rq_timedout)
return 0;
@@ -2087,7 +2086,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
set, timeout);
- if (timeout == 0 && !cfs_signal_pending())
+ if (timeout == 0 && !signal_pending(current))
/*
* No requests are in-flight (ether timed out
* or delayed), so we can allow interrupts.
@@ -2114,7 +2113,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* it being ignored forever
*/
if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
+ signal_pending(current)) {
sigset_t blocked_sigs =
cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
@@ -2124,7 +2123,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* important signals since ptlrpc set is not easily
* reentrant from userspace again
*/
- if (cfs_signal_pending())
+ if (signal_pending(current))
ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 47be21ac9..fdcde9bbd 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -69,7 +69,6 @@ void request_out_callback(lnet_event_t *ev)
req->rq_req_unlink = 0;
if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
-
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently...
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index cd94fed0f..a4f7544f4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1001,6 +1001,7 @@ finish:
return 0;
}
} else {
+ static bool warned;
spin_lock(&imp->imp_lock);
list_del(&imp->imp_conn_current->oic_item);
@@ -1021,7 +1022,7 @@ finish:
goto out;
}
- if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
(ocd->ocd_version > LUSTRE_VERSION_CODE +
LUSTRE_VERSION_OFFSET_WARN ||
ocd->ocd_version < LUSTRE_VERSION_CODE -
@@ -1029,10 +1030,8 @@ finish:
/* Sigh, some compilers do not like #ifdef in the middle
* of macro arguments
*/
- const char *older = "older. Consider upgrading server or downgrading client"
- ;
- const char *newer = "newer than client version. Consider upgrading client"
- ;
+ const char *older = "older than client. Consider upgrading server";
+ const char *newer = "newer than client. Consider recompiling application";
LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
obd2cli_tgt(imp->imp_obd),
@@ -1042,6 +1041,7 @@ finish:
OBD_OCD_VERSION_FIX(ocd->ocd_version),
ocd->ocd_version > LUSTRE_VERSION_CODE ?
newer : older, LUSTRE_VERSION_STRING);
+ warned = true;
}
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
@@ -1370,7 +1370,6 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
if (rc)
goto out;
}
-
}
if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
@@ -1453,7 +1452,6 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
!ptlrpc_import_in_recovery(imp), &lwi);
-
}
spin_lock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 5b06901e5..c0ecd1625 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -160,6 +160,16 @@ static const struct req_msg_field *fld_query_server[] = {
&RMF_FLD_MDFLD
};
+static const struct req_msg_field *fld_read_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FLD_MDFLD
+};
+
+static const struct req_msg_field *fld_read_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GENERIC_DATA
+};
+
static const struct req_msg_field *mds_getattr_name_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_BODY,
@@ -566,7 +576,7 @@ static const struct req_msg_field *ost_get_info_generic_server[] = {
static const struct req_msg_field *ost_get_info_generic_client[] = {
&RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY
+ &RMF_GETINFO_KEY
};
static const struct req_msg_field *ost_get_last_id_server[] = {
@@ -574,6 +584,12 @@ static const struct req_msg_field *ost_get_last_id_server[] = {
&RMF_OBD_ID
};
+static const struct req_msg_field *ost_get_last_fid_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GETINFO_KEY,
+ &RMF_FID,
+};
+
static const struct req_msg_field *ost_get_last_fid_server[] = {
&RMF_PTLRPC_BODY,
&RMF_FID,
@@ -643,6 +659,7 @@ static struct req_format *req_formats[] = {
&RQF_MGS_CONFIG_READ,
&RQF_SEQ_QUERY,
&RQF_FLD_QUERY,
+ &RQF_FLD_READ,
&RQF_MDS_CONNECT,
&RQF_MDS_DISCONNECT,
&RQF_MDS_GET_INFO,
@@ -696,7 +713,7 @@ static struct req_format *req_formats[] = {
&RQF_OST_BRW_WRITE,
&RQF_OST_STATFS,
&RQF_OST_SET_GRANT_INFO,
- &RQF_OST_GET_INFO_GENERIC,
+ &RQF_OST_GET_INFO,
&RQF_OST_GET_INFO_LAST_ID,
&RQF_OST_GET_INFO_LAST_FID,
&RQF_OST_SET_INFO_LAST_FID,
@@ -1162,6 +1179,10 @@ struct req_format RQF_FLD_QUERY =
DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server);
EXPORT_SYMBOL(RQF_FLD_QUERY);
+struct req_format RQF_FLD_READ =
+ DEFINE_REQ_FMT0("FLD_READ", fld_read_client, fld_read_server);
+EXPORT_SYMBOL(RQF_FLD_READ);
+
struct req_format RQF_LOG_CANCEL =
DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
EXPORT_SYMBOL(RQF_LOG_CANCEL);
@@ -1519,10 +1540,10 @@ struct req_format RQF_OST_SET_GRANT_INFO =
ost_body_only);
EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
-struct req_format RQF_OST_GET_INFO_GENERIC =
+struct req_format RQF_OST_GET_INFO =
DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
ost_get_info_generic_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC);
+EXPORT_SYMBOL(RQF_OST_GET_INFO);
struct req_format RQF_OST_GET_INFO_LAST_ID =
DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
@@ -1530,7 +1551,7 @@ struct req_format RQF_OST_GET_INFO_LAST_ID =
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
struct req_format RQF_OST_GET_INFO_LAST_FID =
- DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client,
+ DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", ost_get_last_fid_client,
ost_get_last_fid_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index c95a91ce2..64c0f1e17 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -131,6 +131,7 @@ static struct ll_rpc_opcode {
{ SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
{ SEC_CTX_FINI, "sec_ctx_fini" },
{ FLD_QUERY, "fld_query" },
+ { FLD_READ, "fld_read" },
};
static struct ll_eopcode {
@@ -679,11 +680,11 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
/**
* The second token is either NULL, or an optional [reg|hp] string
*/
- if (strcmp(cmd, "reg") == 0)
+ if (strcmp(cmd, "reg") == 0) {
queue = PTLRPC_NRS_QUEUE_REG;
- else if (strcmp(cmd, "hp") == 0)
+ } else if (strcmp(cmd, "hp") == 0) {
queue = PTLRPC_NRS_QUEUE_HP;
- else {
+ } else {
rc = -EINVAL;
goto out;
}
@@ -693,8 +694,9 @@ default_queue:
if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
rc = -ENODEV;
goto out;
- } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
+ } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) {
queue = PTLRPC_NRS_QUEUE_REG;
+ }
/**
* Serialize NRS core lprocfs operations with policy registration/
@@ -1320,6 +1322,5 @@ int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
up_read(&obd->u.cli.cl_sem);
return count;
-
}
EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 710fb806f..c444f5168 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -975,7 +975,11 @@ static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
+ /* scp_nrs_hp could be NULL due to short of memory. */
+ nrs = hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
+ /* check the nrs_svcpt to see if nrs is initialized. */
+ if (!nrs || !nrs->nrs_svcpt)
+ return;
nrs->nrs_stopping = 1;
list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) {
@@ -1038,7 +1042,6 @@ static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
-
if (!nrs_policy_compatible(svc, desc) ||
unlikely(svc->srv_is_stopping))
continue;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 492d63fad..811acf6fc 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -1160,7 +1160,6 @@ __u32 lustre_msg_get_timeout(struct lustre_msg *msg)
if (!pb) {
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
return 0;
-
}
return pb->pb_timeout;
}
@@ -1179,7 +1178,6 @@ __u32 lustre_msg_get_service_time(struct lustre_msg *msg)
if (!pb) {
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
return 0;
-
}
return pb->pb_service_time;
}
@@ -1572,7 +1570,6 @@ static void lustre_swab_obdo(struct obdo *o)
CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
-
}
void lustre_swab_obd_statfs(struct obd_statfs *os)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index db003f5da..76a355a9d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -387,7 +387,8 @@ static int ptlrpcd(void *arg)
{
struct ptlrpcd_ctl *pc = arg;
struct ptlrpc_request_set *set;
- struct lu_env env = { .le_ses = NULL };
+ struct lu_context ses = { 0 };
+ struct lu_env env = { .le_ses = &ses };
int rc = 0;
int exit = 0;
@@ -416,6 +417,13 @@ static int ptlrpcd(void *arg)
*/
rc = lu_context_init(&env.le_ctx,
LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
+ if (rc == 0) {
+ rc = lu_context_init(env.le_ses,
+ LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
+ if (rc != 0)
+ lu_context_fini(&env.le_ctx);
+ }
+
if (rc != 0)
goto failed;
@@ -436,9 +444,10 @@ static int ptlrpcd(void *arg)
ptlrpc_expired_set, set);
lu_context_enter(&env.le_ctx);
- l_wait_event(set->set_waitq,
- ptlrpcd_check(&env, pc), &lwi);
+ lu_context_enter(env.le_ses);
+ l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
lu_context_exit(&env.le_ctx);
+ lu_context_exit(env.le_ses);
/*
* Abort inflight rpcs for forced stop case.
@@ -461,6 +470,7 @@ static int ptlrpcd(void *arg)
if (!list_empty(&set->set_requests))
ptlrpc_set_wait(set);
lu_context_fini(&env.le_ctx);
+ lu_context_fini(env.le_ses);
complete(&pc->pc_finishing);
@@ -899,8 +909,11 @@ int ptlrpcd_addref(void)
int rc = 0;
mutex_lock(&ptlrpcd_mutex);
- if (++ptlrpcd_users == 1)
+ if (++ptlrpcd_users == 1) {
rc = ptlrpcd_init();
+ if (rc < 0)
+ ptlrpcd_users--;
+ }
mutex_unlock(&ptlrpcd_mutex);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index d3872b8c9..02e6cda4c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -41,7 +41,6 @@
#define DEBUG_SUBSYSTEM S_SEC
#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/crypto.h>
#include "../include/obd.h"
#include "../include/obd_cksum.h"
@@ -511,7 +510,6 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
{
struct cfs_crypto_hash_desc *hdesc;
int hashsize;
- char hashbuf[64];
unsigned int bufsize;
int i, err;
@@ -529,21 +527,23 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
+ desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
desc->bd_iov[i].kiov_len);
}
+
if (hashsize > buflen) {
+ unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+
bufsize = sizeof(hashbuf);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
- &bufsize);
+ LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
+ bufsize, hashsize);
+ err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
memcpy(buf, hashbuf, buflen);
} else {
bufsize = buflen;
err = cfs_crypto_hash_final(hdesc, buf, &bufsize);
}
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
return err;
}
EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 6276bf59c..37c9f4c45 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -162,7 +162,7 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
continue;
ptr = kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
kunmap(desc->bd_iov[i].kiov_page);
return;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 1bbd1d39c..17c7b9749 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -838,6 +838,11 @@ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
{
ptlrpc_server_hpreq_fini(req);
+ if (req->rq_session.lc_thread) {
+ lu_context_exit(&req->rq_session);
+ lu_context_fini(&req->rq_session);
+ }
+
ptlrpc_server_drop_request(req);
}
@@ -1579,6 +1584,21 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
}
req->rq_svc_thread = thread;
+ if (thread) {
+ /* initialize request session, it is needed for request
+ * processing by target
+ */
+ rc = lu_context_init(&req->rq_session,
+ LCT_SERVER_SESSION | LCT_NOREF);
+ if (rc) {
+ CERROR("%s: failure to initialize session: rc = %d\n",
+ thread->t_name, rc);
+ goto err_req;
+ }
+ req->rq_session.lc_thread = thread;
+ lu_context_enter(&req->rq_session);
+ req->rq_svc_thread->t_env->le_ses = &req->rq_session;
+ }
ptlrpc_at_add_timed(req);
@@ -1612,7 +1632,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
struct timespec64 arrived;
unsigned long timediff_usecs;
unsigned long arrived_usecs;
- int rc;
int fail_opc = 0;
request = ptlrpc_server_request_get(svcpt, false);
@@ -1649,21 +1668,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
at_get(&svcpt->scp_at_estimate));
}
- rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
- if (rc) {
- CERROR("Failure to initialize session: %d\n", rc);
- goto out_req;
- }
- request->rq_session.lc_thread = thread;
- request->rq_session.lc_cookie = 0x5;
- lu_context_enter(&request->rq_session);
-
- CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
-
- request->rq_svc_thread = thread;
- if (thread)
- request->rq_svc_thread->t_env->le_ses = &request->rq_session;
-
if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
@@ -1695,14 +1699,21 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
- rc = svc->srv_ops.so_req_handler(request);
+ CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
+
+ /* re-assign request and sesson thread to the current one */
+ request->rq_svc_thread = thread;
+ if (thread) {
+ LASSERT(request->rq_session.lc_thread);
+ request->rq_session.lc_thread = thread;
+ request->rq_session.lc_cookie = 0x55;
+ thread->t_env->le_ses = &request->rq_session;
+ }
+ svc->srv_ops.so_req_handler(request);
ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
put_conn:
- lu_context_exit(&request->rq_session);
- lu_context_fini(&request->rq_session);
-
if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
DEBUG_REQ(D_WARNING, request,
"Request took longer than estimated (%lld:%llds); "
@@ -1756,7 +1767,6 @@ put_conn:
request->rq_arrival_time.tv_sec);
}
-out_req:
ptlrpc_server_finish_active_request(svcpt, request);
return 1;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 3ffd2d91f..aacc81083 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -276,7 +276,9 @@ void lustre_assert_wire_constants(void)
(long long)FLD_QUERY);
LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
(long long)FLD_FIRST_OPC);
- LASSERTF(FLD_LAST_OPC == 901, "found %lld\n",
+ LASSERTF(FLD_READ == 901, "found %lld\n",
+ (long long)FLD_READ);
+ LASSERTF(FLD_LAST_OPC == 902, "found %lld\n",
(long long)FLD_LAST_OPC);
LASSERTF(SEQ_QUERY == 700, "found %lld\n",
(long long)SEQ_QUERY);
@@ -1069,6 +1071,8 @@ void lustre_assert_wire_constants(void)
OBD_CONNECT_PINGLESS);
LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
"found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
+ LASSERTF(OBD_CONNECT_OPEN_BY_FID == 0x20000000000000ULL,
+ "found 0x%.16llxULL\n", OBD_CONNECT_OPEN_BY_FID);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
(unsigned)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
@@ -1639,6 +1643,12 @@ void lustre_assert_wire_constants(void)
OBD_BRW_ASYNC);
LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
OBD_BRW_MEMALLOC);
+ LASSERTF(OBD_BRW_OVER_USRQUOTA == 0x1000, "found 0x%.8x\n",
+ OBD_BRW_OVER_USRQUOTA);
+ LASSERTF(OBD_BRW_OVER_GRPQUOTA == 0x2000, "found 0x%.8x\n",
+ OBD_BRW_OVER_GRPQUOTA);
+ LASSERTF(OBD_BRW_SOFT_SYNC == 0x4000, "found 0x%.8x\n",
+ OBD_BRW_SOFT_SYNC);
/* Checks for struct ost_body */
LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n",
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 0078b6a92..de7e9f52e 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -37,6 +37,8 @@ source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/timb/Kconfig"
+source "drivers/staging/media/tw686x-kh/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 91495882a..60a35b3a4 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_VIDEO_OMAP1) += omap1/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_DVB_MN88472) += mn88472/
obj-$(CONFIG_VIDEO_TIMBERDALE) += timb/
+obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index abf330f92..8dade197f 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -308,7 +308,7 @@ module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr,
"Minor number for radio device (-1 ==> auto assign)");
-static struct region_info region_configs[] = {
+static const struct region_info region_configs[] = {
/* USA */
{
.channel_spacing = 20,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index be72a8e5f..ea3ddec75 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -154,7 +154,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
while ((entity = media_entity_graph_walk_next(&graph))) {
if (entity == &video->video_dev.entity)
continue;
- if (!is_media_entity_v4l2_io(entity))
+ if (!is_media_entity_v4l2_video_device(entity))
continue;
far_end = to_vpfe_video(media_entity_to_video_device(entity));
if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
diff --git a/drivers/staging/media/omap1/omap1_camera.c b/drivers/staging/media/omap1/omap1_camera.c
index bd721e354..54b8dd2d2 100644
--- a/drivers/staging/media/omap1/omap1_camera.c
+++ b/drivers/staging/media/omap1/omap1_camera.c
@@ -1569,27 +1569,21 @@ static int omap1_cam_probe(struct platform_device *pdev)
unsigned int irq;
int err = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || (int)irq <= 0) {
+ if ((int)irq <= 0) {
err = -ENODEV;
goto exit;
}
- clk = clk_get(&pdev->dev, "armper_ck");
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- goto exit;
- }
+ clk = devm_clk_get(&pdev->dev, "armper_ck");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
- if (!pcdev) {
- dev_err(&pdev->dev, "Could not allocate pcdev\n");
- err = -ENOMEM;
- goto exit_put_clk;
- }
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev) + resource_size(res),
+ GFP_KERNEL);
+ if (!pcdev)
+ return -ENOMEM;
- pcdev->res = res;
pcdev->clk = clk;
pcdev->pdata = pdev->dev.platform_data;
@@ -1620,19 +1614,11 @@ static int omap1_cam_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&pcdev->capture);
spin_lock_init(&pcdev->lock);
- /*
- * Request the region.
- */
- if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
- err = -EBUSY;
- goto exit_kfree;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- err = -ENOMEM;
- goto exit_release;
- }
pcdev->irq = irq;
pcdev->base = base;
@@ -1642,8 +1628,7 @@ static int omap1_cam_probe(struct platform_device *pdev)
dma_isr, (void *)pcdev, &pcdev->dma_ch);
if (err < 0) {
dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
- err = -EBUSY;
- goto exit_iounmap;
+ return -EBUSY;
}
dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
@@ -1655,7 +1640,8 @@ static int omap1_cam_probe(struct platform_device *pdev)
/* setup DMA autoinitialization */
omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
- err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
+ err = devm_request_irq(&pdev->dev, pcdev->irq, cam_isr, 0, DRIVER_NAME,
+ pcdev);
if (err) {
dev_err(&pdev->dev, "Camera interrupt register failed\n");
goto exit_free_dma;
@@ -1669,24 +1655,14 @@ static int omap1_cam_probe(struct platform_device *pdev)
err = soc_camera_host_register(&pcdev->soc_host);
if (err)
- goto exit_free_irq;
+ return err;
dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
return 0;
-exit_free_irq:
- free_irq(pcdev->irq, pcdev);
exit_free_dma:
omap_free_dma(pcdev->dma_ch);
-exit_iounmap:
- iounmap(base);
-exit_release:
- release_mem_region(res->start, resource_size(res));
-exit_kfree:
- kfree(pcdev);
-exit_put_clk:
- clk_put(clk);
exit:
return err;
}
@@ -1696,23 +1672,11 @@ static int omap1_cam_remove(struct platform_device *pdev)
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct omap1_cam_dev *pcdev = container_of(soc_host,
struct omap1_cam_dev, soc_host);
- struct resource *res;
-
- free_irq(pcdev->irq, pcdev);
omap_free_dma(pcdev->dma_ch);
soc_camera_host_unregister(soc_host);
- iounmap(pcdev->base);
-
- res = pcdev->res;
- release_mem_region(res->start, resource_size(res));
-
- clk_put(pcdev->clk);
-
- kfree(pcdev);
-
dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
return 0;
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index c5a5138b3..6ceb4eb00 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1065,7 +1065,7 @@ static int iss_register_entities(struct iss_device *iss)
}
ret = media_create_pad_link(&sensor->entity, 0, input, pad,
- flags);
+ flags);
if (ret < 0)
goto done;
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index f54349bce..cf8da2355 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -223,7 +223,7 @@ iss_video_far_end(struct iss_video *video)
if (entity == &video->video.entity)
continue;
- if (!is_media_entity_v4l2_io(entity))
+ if (!is_media_entity_v4l2_video_device(entity))
continue;
far_end = to_iss_video(media_entity_to_video_device(entity));
diff --git a/drivers/staging/media/tw686x-kh/Kconfig b/drivers/staging/media/tw686x-kh/Kconfig
new file mode 100644
index 000000000..6264d30ed
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/Kconfig
@@ -0,0 +1,17 @@
+config VIDEO_TW686X_KH
+ tristate "Intersil/Techwell TW686x Video For Linux"
+ depends on VIDEO_DEV && PCI && VIDEO_V4L2
+ depends on !(VIDEO_TW686X=y || VIDEO_TW686X=m) || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ help
+ Support for Intersil/Techwell TW686x-based frame grabber cards.
+
+ Currently supported chips:
+ - TW6864 (4 video channels),
+ - TW6865 (4 video channels, not tested, second generation chip),
+ - TW6868 (8 video channels but only 4 first channels using
+ built-in video decoder are supported, not tested),
+ - TW6869 (8 video channels, second generation chip).
+
+ To compile this driver as a module, choose M here: the module
+ will be named tw686x-kh.
diff --git a/drivers/staging/media/tw686x-kh/Makefile b/drivers/staging/media/tw686x-kh/Makefile
new file mode 100644
index 000000000..2a36a38cf
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/Makefile
@@ -0,0 +1,3 @@
+tw686x-kh-objs := tw686x-kh-core.o tw686x-kh-video.o
+
+obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh.o
diff --git a/drivers/staging/media/tw686x-kh/TODO b/drivers/staging/media/tw686x-kh/TODO
new file mode 100644
index 000000000..480a495b1
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/TODO
@@ -0,0 +1,6 @@
+TODO:
+
+- implement V4L2_FIELD_INTERLACED* mode(s).
+- add audio support
+
+Please Cc: patches to Krzysztof Halasa <khalasa@piap.pl>.
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-core.c b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
new file mode 100644
index 000000000..03b3b62c5
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "tw686x-kh.h"
+#include "tw686x-kh-regs.h"
+
+static irqreturn_t tw686x_irq(int irq, void *dev_id)
+{
+ struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
+ u32 int_status = reg_read(dev, INT_STATUS); /* cleared on read */
+ unsigned long flags;
+ unsigned int handled = 0;
+
+ if (int_status) {
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ dev->dma_requests |= int_status;
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ if (int_status & 0xFF0000FF)
+ handled = tw686x_kh_video_irq(dev);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int tw686x_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ struct tw686x_dev *dev;
+ int err;
+
+ dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev) +
+ (pci_id->driver_data & TYPE_MAX_CHANNELS) *
+ sizeof(dev->video_channels[0]), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ sprintf(dev->name, "TW%04X", pci_dev->device);
+ dev->type = pci_id->driver_data;
+
+ pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
+ pci_name(pci_dev), pci_dev->irq,
+ (unsigned long)pci_resource_start(pci_dev, 0));
+
+ dev->pci_dev = pci_dev;
+ if (pcim_enable_device(pci_dev))
+ return -EIO;
+
+ pci_set_master(pci_dev);
+
+ if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ pr_err("%s: 32-bit PCI DMA not supported\n", dev->name);
+ return -EIO;
+ }
+
+ err = pci_request_regions(pci_dev, dev->name);
+ if (err < 0) {
+ pr_err("%s: Unable to get MMIO region\n", dev->name);
+ return err;
+ }
+
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+ if (!dev->mmio) {
+ pr_err("%s: Unable to remap MMIO region\n", dev->name);
+ return -EIO;
+ }
+
+ reg_write(dev, SYS_SOFT_RST, 0x0F); /* Reset all subsystems */
+ mdelay(1);
+
+ reg_write(dev, SRST[0], 0x3F);
+ if (max_channels(dev) > 4)
+ reg_write(dev, SRST[1], 0x3F);
+ reg_write(dev, DMA_CMD, 0);
+ reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+ reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x3EFF0FF0);
+ reg_write(dev, DMA_TIMER_INTERVAL, 0x38000);
+ reg_write(dev, DMA_CONFIG, 0xFFFFFF04);
+
+ spin_lock_init(&dev->irq_lock);
+
+ err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw686x_irq,
+ IRQF_SHARED, dev->name, dev);
+ if (err < 0) {
+ pr_err("%s: Unable to get IRQ\n", dev->name);
+ return err;
+ }
+
+ err = tw686x_kh_video_init(dev);
+ if (err)
+ return err;
+
+ pci_set_drvdata(pci_dev, dev);
+ return 0;
+}
+
+static void tw686x_remove(struct pci_dev *pci_dev)
+{
+ struct tw686x_dev *dev = pci_get_drvdata(pci_dev);
+
+ tw686x_kh_video_free(dev);
+}
+
+/* driver_data is number of A/V channels */
+static const struct pci_device_id tw686x_pci_tbl[] = {
+ {PCI_DEVICE(0x1797, 0x6864), .driver_data = 4},
+ /* not tested */
+ {PCI_DEVICE(0x1797, 0x6865), .driver_data = 4 | TYPE_SECOND_GEN},
+ /* TW6868 supports 8 A/V channels with an external TW2865 chip -
+ not supported by the driver */
+ {PCI_DEVICE(0x1797, 0x6868), .driver_data = 4}, /* not tested */
+ {PCI_DEVICE(0x1797, 0x6869), .driver_data = 8 | TYPE_SECOND_GEN},
+ {}
+};
+
+static struct pci_driver tw686x_pci_driver = {
+ .name = "tw686x-kh",
+ .id_table = tw686x_pci_tbl,
+ .probe = tw686x_probe,
+ .remove = tw686x_remove,
+};
+
+MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
+module_pci_driver(tw686x_pci_driver);
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
new file mode 100644
index 000000000..53e1889ba
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
@@ -0,0 +1,103 @@
+/* DMA controller registers */
+#define REG8_1(a0) ((const u16[8]) {a0, a0 + 1, a0 + 2, a0 + 3, \
+ a0 + 4, a0 + 5, a0 + 6, a0 + 7})
+#define REG8_2(a0) ((const u16[8]) {a0, a0 + 2, a0 + 4, a0 + 6, \
+ a0 + 8, a0 + 0xA, a0 + 0xC, a0 + 0xE})
+#define REG8_8(a0) ((const u16[8]) {a0, a0 + 8, a0 + 0x10, a0 + 0x18, \
+ a0 + 0x20, a0 + 0x28, a0 + 0x30, a0 + 0x38})
+#define INT_STATUS 0x00
+#define PB_STATUS 0x01
+#define DMA_CMD 0x02
+#define VIDEO_FIFO_STATUS 0x03
+#define VIDEO_CHANNEL_ID 0x04
+#define VIDEO_PARSER_STATUS 0x05
+#define SYS_SOFT_RST 0x06
+#define DMA_PAGE_TABLE0_ADDR ((const u16[8]) {0x08, 0xD0, 0xD2, 0xD4, \
+ 0xD6, 0xD8, 0xDA, 0xDC})
+#define DMA_PAGE_TABLE1_ADDR ((const u16[8]) {0x09, 0xD1, 0xD3, 0xD5, \
+ 0xD7, 0xD9, 0xDB, 0xDD})
+#define DMA_CHANNEL_ENABLE 0x0A
+#define DMA_CONFIG 0x0B
+#define DMA_TIMER_INTERVAL 0x0C
+#define DMA_CHANNEL_TIMEOUT 0x0D
+#define VDMA_CHANNEL_CONFIG REG8_1(0x10)
+#define ADMA_P_ADDR REG8_2(0x18)
+#define ADMA_B_ADDR REG8_2(0x19)
+#define DMA10_P_ADDR 0x28 /* ??? */
+#define DMA10_B_ADDR 0x29
+#define VIDEO_CONTROL1 0x2A
+#define VIDEO_CONTROL2 0x2B
+#define AUDIO_CONTROL1 0x2C
+#define AUDIO_CONTROL2 0x2D
+#define PHASE_REF 0x2E
+#define GPIO_REG 0x2F
+#define INTL_HBAR_CTRL REG8_1(0x30)
+#define AUDIO_CONTROL3 0x38
+#define VIDEO_FIELD_CTRL REG8_1(0x39)
+#define HSCALER_CTRL REG8_1(0x42)
+#define VIDEO_SIZE REG8_1(0x4A)
+#define VIDEO_SIZE_F2 REG8_1(0x52)
+#define MD_CONF REG8_1(0x60)
+#define MD_INIT REG8_1(0x68)
+#define MD_MAP0 REG8_1(0x70)
+#define VDMA_P_ADDR REG8_8(0x80) /* not used in DMA SG mode */
+#define VDMA_WHP REG8_8(0x81)
+#define VDMA_B_ADDR REG8_8(0x82)
+#define VDMA_F2_P_ADDR REG8_8(0x84)
+#define VDMA_F2_WHP REG8_8(0x85)
+#define VDMA_F2_B_ADDR REG8_8(0x86)
+#define EP_REG_ADDR 0xFE
+#define EP_REG_DATA 0xFF
+
+/* Video decoder registers */
+#define VDREG8(a0) ((const u16[8]) { \
+ a0 + 0x000, a0 + 0x010, a0 + 0x020, a0 + 0x030, \
+ a0 + 0x100, a0 + 0x110, a0 + 0x120, a0 + 0x130})
+#define VIDSTAT VDREG8(0x100)
+#define BRIGHT VDREG8(0x101)
+#define CONTRAST VDREG8(0x102)
+#define SHARPNESS VDREG8(0x103)
+#define SAT_U VDREG8(0x104)
+#define SAT_V VDREG8(0x105)
+#define HUE VDREG8(0x106)
+#define CROP_HI VDREG8(0x107)
+#define VDELAY_LO VDREG8(0x108)
+#define VACTIVE_LO VDREG8(0x109)
+#define HDELAY_LO VDREG8(0x10A)
+#define HACTIVE_LO VDREG8(0x10B)
+#define MVSN VDREG8(0x10C)
+#define STATUS2 VDREG8(0x10C)
+#define SDT VDREG8(0x10E)
+#define SDT_EN VDREG8(0x10F)
+
+#define VSCALE_LO VDREG8(0x144)
+#define SCALE_HI VDREG8(0x145)
+#define HSCALE_LO VDREG8(0x146)
+#define F2CROP_HI VDREG8(0x147)
+#define F2VDELAY_LO VDREG8(0x148)
+#define F2VACTIVE_LO VDREG8(0x149)
+#define F2HDELAY_LO VDREG8(0x14A)
+#define F2HACTIVE_LO VDREG8(0x14B)
+#define F2VSCALE_LO VDREG8(0x14C)
+#define F2SCALE_HI VDREG8(0x14D)
+#define F2HSCALE_LO VDREG8(0x14E)
+#define F2CNT VDREG8(0x14F)
+
+#define VDREG2(a0) ((const u16[2]) {a0, a0 + 0x100})
+#define SRST VDREG2(0x180)
+#define ACNTL VDREG2(0x181)
+#define ACNTL2 VDREG2(0x182)
+#define CNTRL1 VDREG2(0x183)
+#define CKHY VDREG2(0x184)
+#define SHCOR VDREG2(0x185)
+#define CORING VDREG2(0x186)
+#define CLMPG VDREG2(0x187)
+#define IAGC VDREG2(0x188)
+#define VCTRL1 VDREG2(0x18F)
+#define MISC1 VDREG2(0x194)
+#define LOOP VDREG2(0x195)
+#define MISC2 VDREG2(0x196)
+
+#define CLMD VDREG2(0x197)
+#define AIGAIN ((const u16[8]) {0x1D0, 0x1D1, 0x1D2, 0x1D3, \
+ 0x2D0, 0x2D1, 0x2D2, 0x2D3})
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
new file mode 100644
index 000000000..6ecb504a7
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
@@ -0,0 +1,821 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include "tw686x-kh.h"
+#include "tw686x-kh-regs.h"
+
+#define MAX_SG_ENTRY_SIZE (/* 8192 - 128 */ 4096)
+#define MAX_SG_DESC_COUNT 256 /* PAL 704x576 needs up to 198 4-KB pages */
+
+static const struct tw686x_format formats[] = {
+ {
+ .name = "4:2:2 packed, UYVY", /* aka Y422 */
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mode = 0,
+ .depth = 16,
+ }, {
+#if 0
+ .name = "4:2:0 packed, YUV",
+ .mode = 1, /* non-standard */
+ .depth = 12,
+ }, {
+ .name = "4:1:1 packed, YUV",
+ .mode = 2, /* non-standard */
+ .depth = 12,
+ }, {
+#endif
+ .name = "4:1:1 packed, YUV",
+ .fourcc = V4L2_PIX_FMT_Y41P,
+ .mode = 3,
+ .depth = 12,
+ }, {
+ .name = "15 bpp RGB",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .mode = 4,
+ .depth = 16,
+ }, {
+ .name = "16 bpp RGB",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mode = 5,
+ .depth = 16,
+ }, {
+ .name = "4:2:2 packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mode = 6,
+ .depth = 16,
+ }
+ /* mode 7 is "reserved" */
+};
+
+static const v4l2_std_id video_standards[7] = {
+ V4L2_STD_NTSC,
+ V4L2_STD_PAL,
+ V4L2_STD_SECAM,
+ V4L2_STD_NTSC_443,
+ V4L2_STD_PAL_M,
+ V4L2_STD_PAL_N,
+ V4L2_STD_PAL_60,
+};
+
+static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(formats); cnt++)
+ if (formats[cnt].fourcc == fourcc)
+ return &formats[cnt];
+ return NULL;
+}
+
+static void tw686x_get_format(struct tw686x_video_channel *vc,
+ struct v4l2_format *f)
+{
+ const struct tw686x_format *format;
+ unsigned int width, height, height_div = 1;
+
+ format = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (!format) {
+ format = &formats[0];
+ f->fmt.pix.pixelformat = format->fourcc;
+ }
+
+ width = 704;
+ if (f->fmt.pix.width < width * 3 / 4 /* halfway */)
+ width /= 2;
+
+ height = (vc->video_standard & V4L2_STD_625_50) ? 576 : 480;
+ if (f->fmt.pix.height < height * 3 / 4 /* halfway */)
+ height_div = 2;
+
+ switch (f->fmt.pix.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ height_div = 2;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ if (height_div > 1)
+ f->fmt.pix.field = V4L2_FIELD_BOTTOM;
+ break;
+ default:
+ if (height_div > 1)
+ f->fmt.pix.field = V4L2_FIELD_TOP;
+ else
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+ }
+ height /= height_div;
+
+ f->fmt.pix.width = width;
+ f->fmt.pix.height = height;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * format->depth / 8;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+}
+
+/* video queue operations */
+
+static int tw686x_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ void *alloc_ctxs[])
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ unsigned int size = vc->width * vc->height * vc->format->depth / 8;
+
+ alloc_ctxs[0] = vc->alloc_ctx;
+ if (*nbuffers < 2)
+ *nbuffers = 2;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ sizes[0] = size;
+ *nplanes = 1; /* packed formats only */
+ return 0;
+}
+
+static void tw686x_buf_queue(struct vb2_buffer *vb)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tw686x_vb2_buf *buf;
+
+ buf = container_of(vbuf, struct tw686x_vb2_buf, vb);
+
+ spin_lock(&vc->qlock);
+ list_add_tail(&buf->list, &vc->vidq_queued);
+ spin_unlock(&vc->qlock);
+}
+
+static void setup_descs(struct tw686x_video_channel *vc, unsigned int n)
+{
+loop:
+ while (!list_empty(&vc->vidq_queued)) {
+ struct vdma_desc *descs = vc->sg_descs[n];
+ struct tw686x_vb2_buf *buf;
+ struct sg_table *vbuf;
+ struct scatterlist *sg;
+ unsigned int buf_len, count = 0;
+ int i;
+
+ buf = list_first_entry(&vc->vidq_queued, struct tw686x_vb2_buf,
+ list);
+ list_del(&buf->list);
+
+ buf_len = vc->width * vc->height * vc->format->depth / 8;
+ if (vb2_plane_size(&buf->vb.vb2_buf, 0) < buf_len) {
+ pr_err("Video buffer size too small\n");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ goto loop; /* try another */
+ }
+
+ vbuf = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
+ for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
+ dma_addr_t phys = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
+
+ while (len && buf_len) {
+ unsigned int entry_len = min_t(unsigned int, len,
+ MAX_SG_ENTRY_SIZE);
+ entry_len = min(entry_len, buf_len);
+ if (count == MAX_SG_DESC_COUNT) {
+ pr_err("Video buffer size too fragmented\n");
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ goto loop;
+ }
+ descs[count].phys = cpu_to_le32(phys);
+ descs[count++].flags_length =
+ cpu_to_le32(0x40000000 /* available */ |
+ entry_len);
+ phys += entry_len;
+ len -= entry_len;
+ buf_len -= entry_len;
+ }
+ if (!buf_len)
+ break;
+ }
+
+ /* clear the remaining entries */
+ while (count < MAX_SG_DESC_COUNT) {
+ descs[count].phys = 0;
+ descs[count++].flags_length = 0; /* unavailable */
+ }
+
+ buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ vc->curr_bufs[n] = buf;
+ return;
+ }
+ vc->curr_bufs[n] = NULL;
+}
+
+/* On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
+ with 10-bit start_idx and end_idx determining start and end of frame buffer
+ for particular channel.
+ TW6868 with all its 8 channels would be problematic (only 127 SG entries per
+ channel) but we support only 4 channels on this chip anyway (the first
+ 4 channels are driven with internal video decoder, the other 4 would require
+ an external TW286x part).
+
+ On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
+ starting with 0. Both chips have complete sets of internal video decoders
+ (respectively 4 or 8-channel).
+
+ All chips have separate SG tables for two video frames. */
+
+static void setup_dma_cfg(struct tw686x_video_channel *vc)
+{
+ unsigned int field_width = 704;
+ unsigned int field_height = (vc->video_standard & V4L2_STD_625_50) ?
+ 288 : 240;
+ unsigned int start_idx = is_second_gen(vc->dev) ? 0 :
+ vc->ch * MAX_SG_DESC_COUNT;
+ unsigned int end_idx = start_idx + MAX_SG_DESC_COUNT - 1;
+ u32 dma_cfg = (0 << 30) /* input selection */ |
+ (1 << 29) /* field2 dropped (if any) */ |
+ ((vc->height < 300) << 28) /* field dropping */ |
+ (1 << 27) /* master */ |
+ (0 << 25) /* master channel (for slave only) */ |
+ (0 << 24) /* (no) vertical (line) decimation */ |
+ ((vc->width < 400) << 23) /* horizontal decimation */ |
+ (vc->format->mode << 20) /* output video format */ |
+ (end_idx << 10) /* DMA end index */ |
+ start_idx /* DMA start index */;
+ u32 reg;
+
+ reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], dma_cfg);
+ reg_write(vc->dev, VIDEO_SIZE[vc->ch], (1 << 31) | (field_height << 16)
+ | field_width);
+ reg = reg_read(vc->dev, VIDEO_CONTROL1);
+ if (vc->video_standard & V4L2_STD_625_50)
+ reg |= 1 << (vc->ch + 13);
+ else
+ reg &= ~(1 << (vc->ch + 13));
+ reg_write(vc->dev, VIDEO_CONTROL1, reg);
+}
+
+static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ struct tw686x_dev *dev = vc->dev;
+ u32 dma_ch_mask;
+ unsigned int n;
+
+ setup_dma_cfg(vc);
+
+ /* queue video buffers if available */
+ spin_lock(&vc->qlock);
+ for (n = 0; n < 2; n++)
+ setup_descs(vc, n);
+ spin_unlock(&vc->qlock);
+
+ dev->video_active |= 1 << vc->ch;
+ vc->seq = 0;
+ dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE) | (1 << vc->ch);
+ reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
+ reg_write(dev, DMA_CMD, (1 << 31) | dma_ch_mask);
+ return 0;
+}
+
+static void tw686x_stop_streaming(struct vb2_queue *vq)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ struct tw686x_dev *dev = vc->dev;
+ u32 dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE);
+ u32 dma_cmd = reg_read(dev, DMA_CMD);
+ unsigned int n;
+
+ dma_ch_mask &= ~(1 << vc->ch);
+ reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
+
+ dev->video_active &= ~(1 << vc->ch);
+
+ dma_cmd &= ~(1 << vc->ch);
+ reg_write(dev, DMA_CMD, dma_cmd);
+
+ if (!dev->video_active) {
+ reg_write(dev, DMA_CMD, 0);
+ reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+ }
+
+ spin_lock(&vc->qlock);
+ while (!list_empty(&vc->vidq_queued)) {
+ struct tw686x_vb2_buf *buf;
+
+ buf = list_entry(vc->vidq_queued.next, struct tw686x_vb2_buf,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ for (n = 0; n < 2; n++)
+ if (vc->curr_bufs[n])
+ vb2_buffer_done(&vc->curr_bufs[n]->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+
+ spin_unlock(&vc->qlock);
+}
+
+static struct vb2_ops tw686x_video_qops = {
+ .queue_setup = tw686x_queue_setup,
+ .buf_queue = tw686x_buf_queue,
+ .start_streaming = tw686x_start_streaming,
+ .stop_streaming = tw686x_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int tw686x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tw686x_video_channel *vc;
+ struct tw686x_dev *dev;
+ unsigned int ch;
+
+ vc = container_of(ctrl->handler, struct tw686x_video_channel,
+ ctrl_handler);
+ dev = vc->dev;
+ ch = vc->ch;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ reg_write(dev, BRIGHT[ch], ctrl->val & 0xFF);
+ return 0;
+
+ case V4L2_CID_CONTRAST:
+ reg_write(dev, CONTRAST[ch], ctrl->val);
+ return 0;
+
+ case V4L2_CID_SATURATION:
+ reg_write(dev, SAT_U[ch], ctrl->val);
+ reg_write(dev, SAT_V[ch], ctrl->val);
+ return 0;
+
+ case V4L2_CID_HUE:
+ reg_write(dev, HUE[ch], ctrl->val & 0xFF);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = tw686x_s_ctrl,
+};
+
+static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ f->fmt.pix.width = vc->width;
+ f->fmt.pix.height = vc->height;
+ f->fmt.pix.field = vc->field;
+ f->fmt.pix.pixelformat = vc->format->fourcc;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * vc->format->depth / 8;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ return 0;
+}
+
+static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ tw686x_get_format(video_drvdata(file), f);
+ return 0;
+}
+
+static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ tw686x_get_format(vc, f);
+ vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
+ vc->field = f->fmt.pix.field;
+ vc->width = f->fmt.pix.width;
+ vc->height = f->fmt.pix.height;
+ return 0;
+}
+
+static int tw686x_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct tw686x_dev *dev = vc->dev;
+
+ strcpy(cap->driver, "tw686x-kh");
+ strcpy(cap->card, dev->name);
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci_dev));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ unsigned int cnt;
+ u32 sdt = 0; /* default */
+
+ for (cnt = 0; cnt < ARRAY_SIZE(video_standards); cnt++)
+ if (id & video_standards[cnt]) {
+ sdt = cnt;
+ break;
+ }
+
+ reg_write(vc->dev, SDT[vc->ch], sdt);
+ vc->video_standard = video_standards[sdt];
+ return 0;
+}
+
+static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ *id = vc->video_standard;
+ return 0;
+}
+
+static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ strlcpy(f->description, formats[f->index].name, sizeof(f->description));
+ f->pixelformat = formats[f->index].fourcc;
+ return 0;
+}
+
+static int tw686x_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ memset(&sp->parm.capture, 0, sizeof(sp->parm.capture));
+ sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ v4l2_video_std_frame_period(vc->video_standard,
+ &sp->parm.capture.timeperframe);
+
+ return 0;
+}
+
+static int tw686x_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ /* the chip has internal multiplexer, support can be added
+ if the actual hw uses it */
+ if (inp->index)
+ return -EINVAL;
+
+ snprintf(inp->name, sizeof(inp->name), "Composite");
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_ALL;
+ inp->capabilities = V4L2_IN_CAP_STD;
+ return 0;
+}
+
+static int tw686x_g_input(struct file *file, void *priv, unsigned int *v)
+{
+ *v = 0;
+ return 0;
+}
+
+static int tw686x_s_input(struct file *file, void *priv, unsigned int v)
+{
+ if (v)
+ return -EINVAL;
+ return 0;
+}
+
+static const struct v4l2_file_operations tw686x_video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .unlocked_ioctl = video_ioctl2,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
+ .vidioc_querycap = tw686x_querycap,
+ .vidioc_enum_fmt_vid_cap = tw686x_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = tw686x_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = tw686x_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = tw686x_try_fmt_vid_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_g_std = tw686x_g_std,
+ .vidioc_s_std = tw686x_s_std,
+ .vidioc_g_parm = tw686x_g_parm,
+ .vidioc_enum_input = tw686x_enum_input,
+ .vidioc_g_input = tw686x_g_input,
+ .vidioc_s_input = tw686x_s_input,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int video_thread(void *arg)
+{
+ struct tw686x_dev *dev = arg;
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_freezable();
+ add_wait_queue(&dev->video_thread_wait, &wait);
+
+ while (1) {
+ long timeout = schedule_timeout_interruptible(HZ);
+ unsigned int ch;
+
+ if (timeout == -ERESTARTSYS || kthread_should_stop())
+ break;
+
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc;
+ unsigned long flags;
+ u32 request, n, stat = VB2_BUF_STATE_DONE;
+
+ vc = &dev->video_channels[ch];
+ if (!(dev->video_active & (1 << ch)))
+ continue;
+
+ spin_lock_irq(&dev->irq_lock);
+ request = dev->dma_requests & (0x01000001 << ch);
+ if (request)
+ dev->dma_requests &= ~request;
+ spin_unlock_irq(&dev->irq_lock);
+
+ if (!request)
+ continue;
+
+ request >>= ch;
+
+ /* handle channel events */
+ if ((request & 0x01000000) |
+ (reg_read(dev, VIDEO_FIFO_STATUS) & (0x01010001 << ch)) |
+ (reg_read(dev, VIDEO_PARSER_STATUS) & (0x00000101 << ch))) {
+ /* DMA Errors - reset channel */
+ u32 reg;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ reg = reg_read(dev, DMA_CMD);
+ /* Reset DMA channel */
+ reg_write(dev, DMA_CMD, reg & ~(1 << ch));
+ reg_write(dev, DMA_CMD, reg);
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ stat = VB2_BUF_STATE_ERROR;
+ }
+
+ /* handle video stream */
+ mutex_lock(&vc->vb_mutex);
+ spin_lock(&vc->qlock);
+ n = !!(reg_read(dev, PB_STATUS) & (1 << ch));
+ if (vc->curr_bufs[n]) {
+ struct vb2_v4l2_buffer *vb;
+
+ vb = &vc->curr_bufs[n]->vb;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+ vb->field = vc->field;
+ if (V4L2_FIELD_HAS_BOTH(vc->field))
+ vb->sequence = vc->seq++;
+ else
+ vb->sequence = (vc->seq++) / 2;
+ vb2_set_plane_payload(&vb->vb2_buf, 0,
+ vc->width * vc->height * vc->format->depth / 8);
+ vb2_buffer_done(&vb->vb2_buf, stat);
+ }
+ setup_descs(vc, n);
+ spin_unlock(&vc->qlock);
+ mutex_unlock(&vc->vb_mutex);
+ }
+ try_to_freeze();
+ }
+
+ remove_wait_queue(&dev->video_thread_wait, &wait);
+ return 0;
+}
+
+int tw686x_kh_video_irq(struct tw686x_dev *dev)
+{
+ unsigned long flags, handled = 0;
+ u32 requests;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ requests = dev->dma_requests;
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ if (requests & dev->video_active) {
+ wake_up_interruptible_all(&dev->video_thread_wait);
+ handled = 1;
+ }
+ return handled;
+}
+
+void tw686x_kh_video_free(struct tw686x_dev *dev)
+{
+ unsigned int ch, n;
+
+ if (dev->video_thread)
+ kthread_stop(dev->video_thread);
+
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+ v4l2_ctrl_handler_free(&vc->ctrl_handler);
+ if (vc->device)
+ video_unregister_device(vc->device);
+ vb2_dma_sg_cleanup_ctx(vc->alloc_ctx);
+ for (n = 0; n < 2; n++) {
+ struct dma_desc *descs = &vc->sg_tables[n];
+
+ if (descs->virt)
+ pci_free_consistent(dev->pci_dev, descs->size,
+ descs->virt, descs->phys);
+ }
+ }
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+}
+
+#define SG_TABLE_SIZE (MAX_SG_DESC_COUNT * sizeof(struct vdma_desc))
+
+int tw686x_kh_video_init(struct tw686x_dev *dev)
+{
+ unsigned int ch, n;
+ int err;
+
+ init_waitqueue_head(&dev->video_thread_wait);
+
+ err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
+ if (err)
+ return err;
+
+ reg_write(dev, VIDEO_CONTROL1, 0); /* NTSC, disable scaler */
+ reg_write(dev, PHASE_REF, 0x00001518); /* Scatter-gather DMA mode */
+
+ /* setup required SG table sizes */
+ for (n = 0; n < 2; n++)
+ if (is_second_gen(dev)) {
+ /* TW 6865, TW6869 - each channel needs a pair of
+ descriptor tables */
+ for (ch = 0; ch < max_channels(dev); ch++)
+ dev->video_channels[ch].sg_tables[n].size =
+ SG_TABLE_SIZE;
+
+ } else
+ /* TW 6864, TW6868 - we need to allocate a pair of
+ descriptor tables, common for all channels.
+ Each table will be bigger than 4 KB. */
+ dev->video_channels[0].sg_tables[n].size =
+ max_channels(dev) * SG_TABLE_SIZE;
+
+ /* allocate SG tables and initialize video channels */
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+ struct video_device *vdev;
+
+ mutex_init(&vc->vb_mutex);
+ spin_lock_init(&vc->qlock);
+ INIT_LIST_HEAD(&vc->vidq_queued);
+
+ vc->dev = dev;
+ vc->ch = ch;
+
+ /* default settings: NTSC */
+ vc->format = &formats[0];
+ vc->video_standard = V4L2_STD_NTSC;
+ reg_write(vc->dev, SDT[vc->ch], 0);
+ vc->field = V4L2_FIELD_SEQ_BT;
+ vc->width = 704;
+ vc->height = 480;
+
+ for (n = 0; n < 2; n++) {
+ void *cpu;
+
+ if (vc->sg_tables[n].size) {
+ unsigned int reg = n ? DMA_PAGE_TABLE1_ADDR[ch] :
+ DMA_PAGE_TABLE0_ADDR[ch];
+
+ cpu = pci_alloc_consistent(dev->pci_dev,
+ vc->sg_tables[n].size,
+ &vc->sg_tables[n].phys);
+ if (!cpu) {
+ pr_err("Error allocating video DMA scatter-gather tables\n");
+ err = -ENOMEM;
+ goto error;
+ }
+ vc->sg_tables[n].virt = cpu;
+ reg_write(dev, reg, vc->sg_tables[n].phys);
+ } else
+ cpu = dev->video_channels[0].sg_tables[n].virt +
+ ch * SG_TABLE_SIZE;
+
+ vc->sg_descs[n] = cpu;
+ }
+
+ reg_write(dev, VCTRL1[0], 0x24);
+ reg_write(dev, LOOP[0], 0xA5);
+ if (max_channels(dev) > 4) {
+ reg_write(dev, VCTRL1[1], 0x24);
+ reg_write(dev, LOOP[1], 0xA5);
+ }
+ reg_write(dev, VIDEO_FIELD_CTRL[ch], 0);
+ reg_write(dev, VDELAY_LO[ch], 0x14);
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ pr_warn("Unable to allocate video device\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
+ vc->alloc_ctx = vb2_dma_sg_init_ctx(&dev->pci_dev->dev);
+ if (IS_ERR(vc->alloc_ctx)) {
+ pr_warn("Unable to initialize DMA scatter-gather context\n");
+ err = PTR_ERR(vc->alloc_ctx);
+ goto error;
+ }
+
+ vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vc->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ vc->vidq.drv_priv = vc;
+ vc->vidq.buf_struct_size = sizeof(struct tw686x_vb2_buf);
+ vc->vidq.ops = &tw686x_video_qops;
+ vc->vidq.mem_ops = &vb2_dma_sg_memops;
+ vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vc->vidq.min_buffers_needed = 2;
+ vc->vidq.lock = &vc->vb_mutex;
+ vc->vidq.gfp_flags = GFP_DMA32;
+
+ err = vb2_queue_init(&vc->vidq);
+ if (err)
+ goto error;
+
+ strcpy(vdev->name, "TW686x-video");
+ snprintf(vdev->name, sizeof(vdev->name), "%s video", dev->name);
+ vdev->fops = &tw686x_video_fops;
+ vdev->ioctl_ops = &tw686x_video_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->v4l2_dev = &dev->v4l2_dev;
+ vdev->queue = &vc->vidq;
+ vdev->tvnorms = V4L2_STD_ALL;
+ vdev->minor = -1;
+ vdev->lock = &vc->vb_mutex;
+
+ dev->video_channels[ch].device = vdev;
+ video_set_drvdata(vdev, vc);
+ err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (err < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&vc->ctrl_handler,
+ 4 /* number of controls */);
+ vdev->ctrl_handler = &vc->ctrl_handler;
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 64);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops, V4L2_CID_HUE,
+ -124, 127, 1, 0);
+ err = vc->ctrl_handler.error;
+ if (err)
+ goto error;
+
+ v4l2_ctrl_handler_setup(&vc->ctrl_handler);
+ }
+
+ dev->video_thread = kthread_run(video_thread, dev, "tw686x_video");
+ if (IS_ERR(dev->video_thread)) {
+ err = PTR_ERR(dev->video_thread);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ tw686x_kh_video_free(dev);
+ return err;
+}
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh.h b/drivers/staging/media/tw686x-kh/tw686x-kh.h
new file mode 100644
index 000000000..dc257967d
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <media/videobuf2-dma-sg.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#define TYPE_MAX_CHANNELS 0x0F
+#define TYPE_SECOND_GEN 0x10
+
+struct tw686x_format {
+ char *name;
+ unsigned int fourcc;
+ unsigned int depth;
+ unsigned int mode;
+};
+
+struct dma_desc {
+ dma_addr_t phys;
+ void *virt;
+ unsigned int size;
+};
+
+struct vdma_desc {
+ __le32 flags_length; /* 3 MSBits for flags, 13 LSBits for length */
+ __le32 phys;
+};
+
+struct tw686x_vb2_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct tw686x_video_channel {
+ struct tw686x_dev *dev;
+
+ struct vb2_queue vidq;
+ struct list_head vidq_queued;
+ struct video_device *device;
+ struct dma_desc sg_tables[2];
+ struct tw686x_vb2_buf *curr_bufs[2];
+ void *alloc_ctx;
+ struct vdma_desc *sg_descs[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ const struct tw686x_format *format;
+ struct mutex vb_mutex;
+ spinlock_t qlock;
+ v4l2_std_id video_standard;
+ unsigned int width, height;
+ enum v4l2_field field; /* supported TOP, BOTTOM, SEQ_TB and SEQ_BT */
+ unsigned int seq; /* video field or frame counter */
+ unsigned int ch;
+};
+
+/* global device status */
+struct tw686x_dev {
+ spinlock_t irq_lock;
+
+ struct v4l2_device v4l2_dev;
+ struct snd_card *card; /* sound card */
+
+ unsigned int video_active; /* active video channel mask */
+
+ char name[32];
+ unsigned int type;
+ struct pci_dev *pci_dev;
+ __u32 __iomem *mmio;
+
+ struct task_struct *video_thread;
+ wait_queue_head_t video_thread_wait;
+ u32 dma_requests;
+
+ struct tw686x_video_channel video_channels[0];
+};
+
+static inline uint32_t reg_read(struct tw686x_dev *dev, unsigned int reg)
+{
+ return readl(dev->mmio + reg);
+}
+
+static inline void reg_write(struct tw686x_dev *dev, unsigned int reg,
+ uint32_t value)
+{
+ writel(value, dev->mmio + reg);
+}
+
+static inline unsigned int max_channels(struct tw686x_dev *dev)
+{
+ return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
+}
+
+static inline unsigned int is_second_gen(struct tw686x_dev *dev)
+{
+ /* each channel has its own DMA SG table */
+ return dev->type & TYPE_SECOND_GEN;
+}
+
+int tw686x_kh_video_irq(struct tw686x_dev *dev);
+int tw686x_kh_video_init(struct tw686x_dev *dev);
+void tw686x_kh_video_free(struct tw686x_dev *dev);
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
index 5a713df1d..66343ba42 100644
--- a/drivers/staging/most/hdm-dim2/dim2_errors.h
+++ b/drivers/staging/most/hdm-dim2/dim2_errors.h
@@ -15,10 +15,6 @@
#ifndef _MOST_DIM_ERRORS_H
#define _MOST_DIM_ERRORS_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/**
* MOST DIM errors.
*/
@@ -58,8 +54,4 @@ enum dim_errors_t {
DIM_ERR_OVERFLOW,
};
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index fc73d4f97..1c924e869 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -18,10 +18,6 @@
#include <linux/types.h>
#include "dim2_reg.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
* The values below are specified in the hardware specification.
* So, they should not be changed until the hardware specification changes.
@@ -42,14 +38,12 @@ struct dim_ch_state_t {
u16 done_buffers; /* Number of completed buffers */
};
-typedef int atomic_counter_t;
-
struct int_ch_state {
/* changed only in interrupt context */
- volatile atomic_counter_t request_counter;
+ volatile int request_counter;
/* changed only in task context */
- volatile atomic_counter_t service_counter;
+ volatile int service_counter;
u8 idx1;
u8 idx2;
@@ -110,8 +104,4 @@ void dimcb_io_write(u32 __iomem *ptr32, u32 value);
void dimcb_on_error(u8 error_id, const char *error_message);
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
index bcf6a79f6..e0837b6b9 100644
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -17,10 +17,6 @@
#include <linux/types.h>
-#ifdef __cplusplus
-extern "C" {
-#endif
-
struct dim2_regs {
/* 0x00 */ u32 MLBC0;
/* 0x01 */ u32 rsvd0[1];
@@ -166,8 +162,4 @@ enum {
CAT_CL_MASK = DIM2_MASK(6)
};
-#ifdef __cplusplus
-}
-#endif
-
#endif /* DIM2_OS62420_H */
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 163f21a12..e389009fc 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -42,23 +42,33 @@ static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
static int enable_hw_ecc;
static int enable_read_hw_ecc;
-static struct nand_ecclayout spinand_oob_64 = {
- .eccbytes = 24,
- .eccpos = {
- 1, 2, 3, 4, 5, 6,
- 17, 18, 19, 20, 21, 22,
- 33, 34, 35, 36, 37, 38,
- 49, 50, 51, 52, 53, 54, },
- .oobfree = {
- {.offset = 8,
- .length = 8},
- {.offset = 24,
- .length = 8},
- {.offset = 40,
- .length = 8},
- {.offset = 56,
- .length = 8},
- }
+static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 1;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_oob_64_ops = {
+ .ecc = spinand_ooblayout_64_ecc,
+ .free = spinand_ooblayout_64_free,
};
#endif
@@ -886,11 +896,11 @@ static int spinand_probe(struct spi_device *spi_nand)
chip->ecc.strength = 1;
chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
- chip->ecc.layout = &spinand_oob_64;
chip->ecc.read_page = spinand_read_page_hwecc;
chip->ecc.write_page = spinand_write_page_hwecc;
#else
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
if (spinand_disable_ecc(spi_nand) < 0)
dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
__func__);
@@ -912,6 +922,9 @@ static int spinand_probe(struct spi_device *spi_nand)
mtd->dev.parent = &spi_nand->dev;
mtd->oobsize = 64;
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
+#endif
if (nand_scan(mtd, 1))
return -ENXIO;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index aa1cdf602..99445d0fc 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -850,7 +850,7 @@ static int xlr_mii_probe(struct xlr_net_priv *priv)
/* Attach MAC to PHY */
phydev = phy_connect(priv->ndev, phydev_name(phydev),
- &xlr_gmac_link_adjust, priv->nd->phy_interface);
+ xlr_gmac_link_adjust, priv->nd->phy_interface);
if (IS_ERR(phydev)) {
pr_err("could not attach PHY\n");
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 9fda136b8..c1feccf8d 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
- if (msg == NULL)
+ if (!msg)
return -ENOMEM;
msg->data[0] = size;
@@ -620,7 +620,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
} else {
nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
/* Should not happen in a normal world */
- if (unlikely(nvec->rx == NULL)) {
+ if (unlikely(!nvec->rx)) {
nvec->state = 0;
break;
}
@@ -659,10 +659,11 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
to_send = nvec->tx->data[nvec->tx->pos++];
} else {
- dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
+ dev_err(nvec->dev,
+ "tx buffer underflow on %p (%u > %u)\n",
nvec->tx,
- (uint) (nvec->tx ? nvec->tx->pos : 0),
- (uint) (nvec->tx ? nvec->tx->size : 0));
+ (uint)(nvec->tx ? nvec->tx->pos : 0),
+ (uint)(nvec->tx ? nvec->tx->size : 0));
nvec->state = 0;
}
break;
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index b4a0545e8..fcbb0fa03 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -90,7 +90,7 @@ static int nvec_power_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
if (event_type != NVEC_SYS)
return NOTIFY_DONE;
@@ -126,7 +126,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
int status_changed = 0;
if (event_type != NVEC_BAT)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index b6993b0b8..a10fe3af9 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -172,12 +172,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
- 1ull << pow_receive_group);
+ 1ull << pow_receive_group);
cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
} else {
old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
- (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+ (old_group_mask & ~0xFFFFull) |
+ 1 << pow_receive_group);
}
if (USE_ASYNC_IOBDMA) {
@@ -374,7 +375,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* doesn't exist.
*/
printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
- port);
+ port);
dev_kfree_skb_irq(skb);
}
/*
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a5973fd01..315a63d70 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -30,7 +30,7 @@ static inline void cvm_oct_rx_refill_pool(int fill_threshold)
number_to_free);
if (num_freed != number_to_free) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- number_to_free - num_freed);
+ number_to_free - num_freed);
}
}
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index ffe9bd77a..6b4c20872 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -58,9 +58,9 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
-static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
+static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
{
- int32_t undo;
+ int undo;
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
@@ -83,7 +83,7 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
static void cvm_oct_free_tx_skbs(struct net_device *dev)
{
- int32_t skb_to_free;
+ int skb_to_free;
int qos, queues_per_port;
int total_freed = 0;
int total_remaining = 0;
@@ -148,8 +148,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
struct octeon_ethernet *priv = netdev_priv(dev);
struct sk_buff *to_free_list;
- int32_t skb_to_free;
- int32_t buffers_to_free;
+ int skb_to_free;
+ int buffers_to_free;
u32 total_to_clean;
unsigned long flags;
#if REUSE_SKBUFFS_WITHOUT_FREE
@@ -220,7 +220,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
priv->fau + qos * 4, MAX_SKB_TO_FREE);
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ priv->fau +
+ qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
@@ -402,7 +403,7 @@ dont_put_skbuff_in_hw:
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ priv->fau + qos * 4);
/*
* If we're sending faster than the receive can free them then
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 271e1b8d8..e9cd5f242 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -635,7 +635,7 @@ static struct device_node *cvm_oct_of_get_child(
}
static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
+ int interface, int port)
{
struct device_node *ni, *np;
@@ -815,7 +815,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
free_netdev(dev);
} else if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device for interface %d, port %d\n",
- interface, priv->port);
+ interface, priv->port);
free_netdev(dev);
} else {
cvm_oct_device[priv->port] = dev;
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
deleted file mode 100644
index f1f3ecadf..000000000
--- a/drivers/staging/rdma/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
-menuconfig STAGING_RDMA
- tristate "RDMA staging drivers"
- depends on INFINIBAND
- depends on PCI || BROKEN
- depends on HAS_IOMEM
- depends on NET
- depends on INET
- default n
- ---help---
- This option allows you to select a number of RDMA drivers that
- fall into one of two categories: deprecated drivers being held
- here before finally being removed or new drivers that still need
- some work before being moved to the normal RDMA driver area.
-
- If you wish to work on these drivers, to help improve them, or
- to report problems you have with them, please use the
- linux-rdma@vger.kernel.org mailing list.
-
- If in doubt, say N here.
-
-
-# Please keep entries in alphabetic order
-if STAGING_RDMA
-
-source "drivers/staging/rdma/hfi1/Kconfig"
-
-endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
deleted file mode 100644
index 8c7fc1de4..000000000
--- a/drivers/staging/rdma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
deleted file mode 100644
index a925fb0db..000000000
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
-config INFINIBAND_HFI1
- tristate "Intel OPA Gen1 support"
- depends on X86_64 && INFINIBAND_RDMAVT
- select MMU_NOTIFIER
- select CRC32
- default m
- ---help---
- This is a low-level driver for Intel OPA Gen1 adapter.
-config HFI1_DEBUG_SDMA_ORDER
- bool "HFI1 SDMA Order debug"
- depends on INFINIBAND_HFI1
- default n
- ---help---
- This is a debug flag to test for out of order
- sdma completions for unit testing
-config HFI1_VERBS_31BIT_PSN
- bool "HFI1 enable 31 bit PSN"
- depends on INFINIBAND_HFI1
- default y
- ---help---
- Setting this enables 31 BIT PSN
- For verbs RC/UC
-config SDMA_VERBOSITY
- bool "Config SDMA Verbosity"
- depends on INFINIBAND_HFI1
- default n
- ---help---
- This is a configuration flag to enable verbose
- SDMA debug
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile
deleted file mode 100644
index 8dc59382e..000000000
--- a/drivers/staging/rdma/hfi1/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# HFI driver
-#
-#
-#
-# Called from the kernel module build system.
-#
-obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-
-hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \
- eprom.o file_ops.o firmware.o \
- init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
- qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
- uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
- verbs_txreq.o
-hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
-
-CFLAGS_trace.o = -I$(src)
-ifdef MVERSION
-CFLAGS_driver.o = -DHFI_DRIVER_VERSION_BASE=\"$(MVERSION)\"
-endif
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
deleted file mode 100644
index 4c6f1d7d2..000000000
--- a/drivers/staging/rdma/hfi1/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-July, 2015
-
-- Remove unneeded file entries in sysfs
-- Remove software processing of IB protocol and place in library for use
- by qib, ipath (if still present), hfi1, and eventually soft-roce
-- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/staging/rdma/hfi1/affinity.c
deleted file mode 100644
index 2cb8ca77f..000000000
--- a/drivers/staging/rdma/hfi1/affinity.c
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/topology.h>
-#include <linux/cpumask.h>
-#include <linux/module.h>
-
-#include "hfi.h"
-#include "affinity.h"
-#include "sdma.h"
-#include "trace.h"
-
-struct cpu_mask_set {
- struct cpumask mask;
- struct cpumask used;
- uint gen;
-};
-
-struct hfi1_affinity {
- struct cpu_mask_set def_intr;
- struct cpu_mask_set rcv_intr;
- struct cpu_mask_set proc;
- /* spin lock to protect affinity struct */
- spinlock_t lock;
-};
-
-/* Name of IRQ types, indexed by enum irq_type */
-static const char * const irq_type_names[] = {
- "SDMA",
- "RCVCTXT",
- "GENERAL",
- "OTHER",
-};
-
-static inline void init_cpu_mask_set(struct cpu_mask_set *set)
-{
- cpumask_clear(&set->mask);
- cpumask_clear(&set->used);
- set->gen = 0;
-}
-
-/*
- * Interrupt affinity.
- *
- * non-rcv avail gets a default mask that
- * starts as possible cpus with threads reset
- * and each rcv avail reset.
- *
- * rcv avail gets node relative 1 wrapping back
- * to the node relative 1 as necessary.
- *
- */
-int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
-{
- int node = pcibus_to_node(dd->pcidev->bus);
- struct hfi1_affinity *info;
- const struct cpumask *local_mask;
- int curr_cpu, possible, i, ht;
-
- if (node < 0)
- node = numa_node_id();
- dd->node = node;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
- spin_lock_init(&info->lock);
-
- init_cpu_mask_set(&info->def_intr);
- init_cpu_mask_set(&info->rcv_intr);
- init_cpu_mask_set(&info->proc);
-
- local_mask = cpumask_of_node(dd->node);
- if (cpumask_first(local_mask) >= nr_cpu_ids)
- local_mask = topology_core_cpumask(0);
- /* use local mask as default */
- cpumask_copy(&info->def_intr.mask, local_mask);
- /*
- * Remove HT cores from the default mask. Do this in two steps below.
- */
- possible = cpumask_weight(&info->def_intr.mask);
- ht = cpumask_weight(topology_sibling_cpumask(
- cpumask_first(&info->def_intr.mask)));
- /*
- * Step 1. Skip over the first N HT siblings and use them as the
- * "real" cores. Assumes that HT cores are not enumerated in
- * succession (except in the single core case).
- */
- curr_cpu = cpumask_first(&info->def_intr.mask);
- for (i = 0; i < possible / ht; i++)
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- /*
- * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
- * skip any gaps.
- */
- for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, &info->def_intr.mask);
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- }
-
- /* fill in the receive list */
- possible = cpumask_weight(&info->def_intr.mask);
- curr_cpu = cpumask_first(&info->def_intr.mask);
- if (possible == 1) {
- /* only one CPU, everyone will use it */
- cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
- } else {
- /*
- * Retain the first CPU in the default list for the control
- * context.
- */
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- /*
- * Remove the remaining kernel receive queues from
- * the default list and add them to the receive list.
- */
- for (i = 0; i < dd->n_krcv_queues - 1; i++) {
- cpumask_clear_cpu(curr_cpu, &info->def_intr.mask);
- cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- if (curr_cpu >= nr_cpu_ids)
- break;
- }
- }
-
- cpumask_copy(&info->proc.mask, cpu_online_mask);
- dd->affinity = info;
- return 0;
-}
-
-void hfi1_dev_affinity_free(struct hfi1_devdata *dd)
-{
- kfree(dd->affinity);
-}
-
-int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
-{
- int ret;
- cpumask_var_t diff;
- struct cpu_mask_set *set;
- struct sdma_engine *sde = NULL;
- struct hfi1_ctxtdata *rcd = NULL;
- char extra[64];
- int cpu = -1;
-
- extra[0] = '\0';
- cpumask_clear(&msix->mask);
-
- ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
- if (!ret)
- return -ENOMEM;
-
- switch (msix->type) {
- case IRQ_SDMA:
- sde = (struct sdma_engine *)msix->arg;
- scnprintf(extra, 64, "engine %u", sde->this_idx);
- /* fall through */
- case IRQ_GENERAL:
- set = &dd->affinity->def_intr;
- break;
- case IRQ_RCVCTXT:
- rcd = (struct hfi1_ctxtdata *)msix->arg;
- if (rcd->ctxt == HFI1_CTRL_CTXT) {
- set = &dd->affinity->def_intr;
- cpu = cpumask_first(&set->mask);
- } else {
- set = &dd->affinity->rcv_intr;
- }
- scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
- break;
- default:
- dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
- return -EINVAL;
- }
-
- /*
- * The control receive context is placed on a particular CPU, which
- * is set above. Skip accounting for it. Everything else finds its
- * CPU here.
- */
- if (cpu == -1) {
- spin_lock(&dd->affinity->lock);
- if (cpumask_equal(&set->mask, &set->used)) {
- /*
- * We've used up all the CPUs, bump up the generation
- * and reset the 'used' map
- */
- set->gen++;
- cpumask_clear(&set->used);
- }
- cpumask_andnot(diff, &set->mask, &set->used);
- cpu = cpumask_first(diff);
- cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&dd->affinity->lock);
- }
-
- switch (msix->type) {
- case IRQ_SDMA:
- sde->cpu = cpu;
- break;
- case IRQ_GENERAL:
- case IRQ_RCVCTXT:
- case IRQ_OTHER:
- break;
- }
-
- cpumask_set_cpu(cpu, &msix->mask);
- dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
- msix->msix.vector, irq_type_names[msix->type],
- extra, cpu);
- irq_set_affinity_hint(msix->msix.vector, &msix->mask);
-
- free_cpumask_var(diff);
- return 0;
-}
-
-void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
- struct hfi1_msix_entry *msix)
-{
- struct cpu_mask_set *set = NULL;
- struct hfi1_ctxtdata *rcd;
-
- switch (msix->type) {
- case IRQ_SDMA:
- case IRQ_GENERAL:
- set = &dd->affinity->def_intr;
- break;
- case IRQ_RCVCTXT:
- rcd = (struct hfi1_ctxtdata *)msix->arg;
- /* only do accounting for non control contexts */
- if (rcd->ctxt != HFI1_CTRL_CTXT)
- set = &dd->affinity->rcv_intr;
- break;
- default:
- return;
- }
-
- if (set) {
- spin_lock(&dd->affinity->lock);
- cpumask_andnot(&set->used, &set->used, &msix->mask);
- if (cpumask_empty(&set->used) && set->gen) {
- set->gen--;
- cpumask_copy(&set->used, &set->mask);
- }
- spin_unlock(&dd->affinity->lock);
- }
-
- irq_set_affinity_hint(msix->msix.vector, NULL);
- cpumask_clear(&msix->mask);
-}
-
-int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
-{
- int cpu = -1, ret;
- cpumask_var_t diff, mask, intrs;
- const struct cpumask *node_mask,
- *proc_mask = tsk_cpus_allowed(current);
- struct cpu_mask_set *set = &dd->affinity->proc;
- char buf[1024];
-
- /*
- * check whether process/context affinity has already
- * been set
- */
- if (cpumask_weight(proc_mask) == 1) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
- current->pid, current->comm, buf);
- /*
- * Mark the pre-set CPU as used. This is atomic so we don't
- * need the lock
- */
- cpu = cpumask_first(proc_mask);
- cpumask_set_cpu(cpu, &set->used);
- goto done;
- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
- current->pid, current->comm, buf);
- goto done;
- }
-
- /*
- * The process does not have a preset CPU affinity so find one to
- * recommend. We prefer CPUs on the same NUMA as the device.
- */
-
- ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
- if (!ret)
- goto done;
- ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
- if (!ret)
- goto free_diff;
- ret = zalloc_cpumask_var(&intrs, GFP_KERNEL);
- if (!ret)
- goto free_mask;
-
- spin_lock(&dd->affinity->lock);
- /*
- * If we've used all available CPUs, clear the mask and start
- * overloading.
- */
- if (cpumask_equal(&set->mask, &set->used)) {
- set->gen++;
- cpumask_clear(&set->used);
- }
-
- /* CPUs used by interrupt handlers */
- cpumask_copy(intrs, (dd->affinity->def_intr.gen ?
- &dd->affinity->def_intr.mask :
- &dd->affinity->def_intr.used));
- cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
- &dd->affinity->rcv_intr.mask :
- &dd->affinity->rcv_intr.used));
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
- hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
-
- /*
- * If we don't have a NUMA node requested, preference is towards
- * device NUMA node
- */
- if (node == -1)
- node = dd->node;
- node_mask = cpumask_of_node(node);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
- hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
-
- /* diff will hold all unused cpus */
- cpumask_andnot(diff, &set->mask, &set->used);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
- hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
-
- /* get cpumask of available CPUs on preferred NUMA */
- cpumask_and(mask, diff, node_mask);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
-
- /*
- * At first, we don't want to place processes on the same
- * CPUs as interrupt handlers.
- */
- cpumask_andnot(diff, mask, intrs);
- if (!cpumask_empty(diff))
- cpumask_copy(mask, diff);
-
- /*
- * if we don't have a cpu on the preferred NUMA, get
- * the list of the remaining available CPUs
- */
- if (cpumask_empty(mask)) {
- cpumask_andnot(diff, &set->mask, &set->used);
- cpumask_andnot(mask, diff, node_mask);
- }
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
-
- cpu = cpumask_first(mask);
- if (cpu >= nr_cpu_ids) /* empty */
- cpu = -1;
- else
- cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&dd->affinity->lock);
-
- free_cpumask_var(intrs);
-free_mask:
- free_cpumask_var(mask);
-free_diff:
- free_cpumask_var(diff);
-done:
- return cpu;
-}
-
-void hfi1_put_proc_affinity(struct hfi1_devdata *dd, int cpu)
-{
- struct cpu_mask_set *set = &dd->affinity->proc;
-
- if (cpu < 0)
- return;
- spin_lock(&dd->affinity->lock);
- cpumask_clear_cpu(cpu, &set->used);
- if (cpumask_empty(&set->used) && set->gen) {
- set->gen--;
- cpumask_copy(&set->used, &set->mask);
- }
- spin_unlock(&dd->affinity->lock);
-}
-
diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/staging/rdma/hfi1/affinity.h
deleted file mode 100644
index b287e4963..000000000
--- a/drivers/staging/rdma/hfi1/affinity.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#ifndef _HFI1_AFFINITY_H
-#define _HFI1_AFFINITY_H
-
-#include "hfi.h"
-
-enum irq_type {
- IRQ_SDMA,
- IRQ_RCVCTXT,
- IRQ_GENERAL,
- IRQ_OTHER
-};
-
-/* Can be used for both memory and cpu */
-enum affinity_flags {
- AFF_AUTO,
- AFF_NUMA_LOCAL,
- AFF_DEV_LOCAL,
- AFF_IRQ_LOCAL
-};
-
-struct hfi1_msix_entry;
-
-/* Initialize driver affinity data */
-int hfi1_dev_affinity_init(struct hfi1_devdata *);
-/* Free driver affinity data */
-void hfi1_dev_affinity_free(struct hfi1_devdata *);
-/*
- * Set IRQ affinity to a CPU. The function will determine the
- * CPU and set the affinity to it.
- */
-int hfi1_get_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
-/*
- * Remove the IRQ's CPU affinity. This function also updates
- * any internal CPU tracking data
- */
-void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
-/*
- * Determine a CPU affinity for a user process, if the process does not
- * have an affinity set yet.
- */
-int hfi1_get_proc_affinity(struct hfi1_devdata *, int);
-/* Release a CPU used by a user process. */
-void hfi1_put_proc_affinity(struct hfi1_devdata *, int);
-
-#endif /* _HFI1_AFFINITY_H */
diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/staging/rdma/hfi1/aspm.h
deleted file mode 100644
index 0d58fe3b4..000000000
--- a/drivers/staging/rdma/hfi1/aspm.h
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#ifndef _ASPM_H
-#define _ASPM_H
-
-#include "hfi.h"
-
-extern uint aspm_mode;
-
-enum aspm_mode {
- ASPM_MODE_DISABLED = 0, /* ASPM always disabled, performance mode */
- ASPM_MODE_ENABLED = 1, /* ASPM always enabled, power saving mode */
- ASPM_MODE_DYNAMIC = 2, /* ASPM enabled/disabled dynamically */
-};
-
-/* Time after which the timer interrupt will re-enable ASPM */
-#define ASPM_TIMER_MS 1000
-/* Time for which interrupts are ignored after a timer has been scheduled */
-#define ASPM_RESCHED_TIMER_MS (ASPM_TIMER_MS / 2)
-/* Two interrupts within this time trigger ASPM disable */
-#define ASPM_TRIGGER_MS 1
-#define ASPM_TRIGGER_NS (ASPM_TRIGGER_MS * 1000 * 1000ull)
-#define ASPM_L1_SUPPORTED(reg) \
- (((reg & PCI_EXP_LNKCAP_ASPMS) >> 10) & 0x2)
-
-static inline bool aspm_hw_l1_supported(struct hfi1_devdata *dd)
-{
- struct pci_dev *parent = dd->pcidev->bus->self;
- u32 up, dn;
-
- /*
- * If the driver does not have access to the upstream component,
- * it cannot support ASPM L1 at all.
- */
- if (!parent)
- return false;
-
- pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &dn);
- dn = ASPM_L1_SUPPORTED(dn);
-
- pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &up);
- up = ASPM_L1_SUPPORTED(up);
-
- /* ASPM works on A-step but is reported as not supported */
- return (!!dn || is_ax(dd)) && !!up;
-}
-
-/* Set L1 entrance latency for slower entry to L1 */
-static inline void aspm_hw_set_l1_ent_latency(struct hfi1_devdata *dd)
-{
- u32 l1_ent_lat = 0x4u;
- u32 reg32;
-
- pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, &reg32);
- reg32 &= ~PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK;
- reg32 |= l1_ent_lat << PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT;
- pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, reg32);
-}
-
-static inline void aspm_hw_enable_l1(struct hfi1_devdata *dd)
-{
- struct pci_dev *parent = dd->pcidev->bus->self;
-
- /*
- * If the driver does not have access to the upstream component,
- * it cannot support ASPM L1 at all.
- */
- if (!parent)
- return;
-
- /* Enable ASPM L1 first in upstream component and then downstream */
- pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPMC,
- PCI_EXP_LNKCTL_ASPM_L1);
- pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPMC,
- PCI_EXP_LNKCTL_ASPM_L1);
-}
-
-static inline void aspm_hw_disable_l1(struct hfi1_devdata *dd)
-{
- struct pci_dev *parent = dd->pcidev->bus->self;
-
- /* Disable ASPM L1 first in downstream component and then upstream */
- pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPMC, 0x0);
- if (parent)
- pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPMC, 0x0);
-}
-
-static inline void aspm_enable(struct hfi1_devdata *dd)
-{
- if (dd->aspm_enabled || aspm_mode == ASPM_MODE_DISABLED ||
- !dd->aspm_supported)
- return;
-
- aspm_hw_enable_l1(dd);
- dd->aspm_enabled = true;
-}
-
-static inline void aspm_disable(struct hfi1_devdata *dd)
-{
- if (!dd->aspm_enabled || aspm_mode == ASPM_MODE_ENABLED)
- return;
-
- aspm_hw_disable_l1(dd);
- dd->aspm_enabled = false;
-}
-
-static inline void aspm_disable_inc(struct hfi1_devdata *dd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->aspm_lock, flags);
- aspm_disable(dd);
- atomic_inc(&dd->aspm_disabled_cnt);
- spin_unlock_irqrestore(&dd->aspm_lock, flags);
-}
-
-static inline void aspm_enable_dec(struct hfi1_devdata *dd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->aspm_lock, flags);
- if (atomic_dec_and_test(&dd->aspm_disabled_cnt))
- aspm_enable(dd);
- spin_unlock_irqrestore(&dd->aspm_lock, flags);
-}
-
-/* ASPM processing for each receive context interrupt */
-static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd)
-{
- bool restart_timer;
- bool close_interrupts;
- unsigned long flags;
- ktime_t now, prev;
-
- /* Quickest exit for minimum impact */
- if (!rcd->aspm_intr_supported)
- return;
-
- spin_lock_irqsave(&rcd->aspm_lock, flags);
- /* PSM contexts are open */
- if (!rcd->aspm_intr_enable)
- goto unlock;
-
- prev = rcd->aspm_ts_last_intr;
- now = ktime_get();
- rcd->aspm_ts_last_intr = now;
-
- /* An interrupt pair close together in time */
- close_interrupts = ktime_to_ns(ktime_sub(now, prev)) < ASPM_TRIGGER_NS;
-
- /* Don't push out our timer till this much time has elapsed */
- restart_timer = ktime_to_ns(ktime_sub(now, rcd->aspm_ts_timer_sched)) >
- ASPM_RESCHED_TIMER_MS * NSEC_PER_MSEC;
- restart_timer = restart_timer && close_interrupts;
-
- /* Disable ASPM and schedule timer */
- if (rcd->aspm_enabled && close_interrupts) {
- aspm_disable_inc(rcd->dd);
- rcd->aspm_enabled = false;
- restart_timer = true;
- }
-
- if (restart_timer) {
- mod_timer(&rcd->aspm_timer,
- jiffies + msecs_to_jiffies(ASPM_TIMER_MS));
- rcd->aspm_ts_timer_sched = now;
- }
-unlock:
- spin_unlock_irqrestore(&rcd->aspm_lock, flags);
-}
-
-/* Timer function for re-enabling ASPM in the absence of interrupt activity */
-static inline void aspm_ctx_timer_function(unsigned long data)
-{
- struct hfi1_ctxtdata *rcd = (struct hfi1_ctxtdata *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&rcd->aspm_lock, flags);
- aspm_enable_dec(rcd->dd);
- rcd->aspm_enabled = true;
- spin_unlock_irqrestore(&rcd->aspm_lock, flags);
-}
-
-/* Disable interrupt processing for verbs contexts when PSM contexts are open */
-static inline void aspm_disable_all(struct hfi1_devdata *dd)
-{
- struct hfi1_ctxtdata *rcd;
- unsigned long flags;
- unsigned i;
-
- for (i = 0; i < dd->first_user_ctxt; i++) {
- rcd = dd->rcd[i];
- del_timer_sync(&rcd->aspm_timer);
- spin_lock_irqsave(&rcd->aspm_lock, flags);
- rcd->aspm_intr_enable = false;
- spin_unlock_irqrestore(&rcd->aspm_lock, flags);
- }
-
- aspm_disable(dd);
- atomic_set(&dd->aspm_disabled_cnt, 0);
-}
-
-/* Re-enable interrupt processing for verbs contexts */
-static inline void aspm_enable_all(struct hfi1_devdata *dd)
-{
- struct hfi1_ctxtdata *rcd;
- unsigned long flags;
- unsigned i;
-
- aspm_enable(dd);
-
- if (aspm_mode != ASPM_MODE_DYNAMIC)
- return;
-
- for (i = 0; i < dd->first_user_ctxt; i++) {
- rcd = dd->rcd[i];
- spin_lock_irqsave(&rcd->aspm_lock, flags);
- rcd->aspm_intr_enable = true;
- rcd->aspm_enabled = true;
- spin_unlock_irqrestore(&rcd->aspm_lock, flags);
- }
-}
-
-static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
-{
- spin_lock_init(&rcd->aspm_lock);
- setup_timer(&rcd->aspm_timer, aspm_ctx_timer_function,
- (unsigned long)rcd);
- rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
- aspm_mode == ASPM_MODE_DYNAMIC &&
- rcd->ctxt < rcd->dd->first_user_ctxt;
-}
-
-static inline void aspm_init(struct hfi1_devdata *dd)
-{
- unsigned i;
-
- spin_lock_init(&dd->aspm_lock);
- dd->aspm_supported = aspm_hw_l1_supported(dd);
-
- for (i = 0; i < dd->first_user_ctxt; i++)
- aspm_ctx_init(dd->rcd[i]);
-
- /* Start with ASPM disabled */
- aspm_hw_set_l1_ent_latency(dd);
- dd->aspm_enabled = false;
- aspm_hw_disable_l1(dd);
-
- /* Now turn on ASPM if configured */
- aspm_enable_all(dd);
-}
-
-static inline void aspm_exit(struct hfi1_devdata *dd)
-{
- aspm_disable_all(dd);
-
- /* Turn on ASPM on exit to conserve power */
- aspm_enable(dd);
-}
-
-#endif /* _ASPM_H */
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
deleted file mode 100644
index 16eb65390..000000000
--- a/drivers/staging/rdma/hfi1/chip.c
+++ /dev/null
@@ -1,14418 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
- * This file contains all of the code that is specific to the HFI chip
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-
-#include "hfi.h"
-#include "trace.h"
-#include "mad.h"
-#include "pio.h"
-#include "sdma.h"
-#include "eprom.h"
-#include "efivar.h"
-#include "platform.h"
-#include "aspm.h"
-
-#define NUM_IB_PORTS 1
-
-uint kdeth_qp;
-module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
-MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
-
-uint num_vls = HFI1_MAX_VLS_SUPPORTED;
-module_param(num_vls, uint, S_IRUGO);
-MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
-
-/*
- * Default time to aggregate two 10K packets from the idle state
- * (timer not running). The timer starts at the end of the first packet,
- * so only the time for one 10K packet and header plus a bit extra is needed.
- * 10 * 1024 + 64 header byte = 10304 byte
- * 10304 byte / 12.5 GB/s = 824.32ns
- */
-uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
-module_param(rcv_intr_timeout, uint, S_IRUGO);
-MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
-
-uint rcv_intr_count = 16; /* same as qib */
-module_param(rcv_intr_count, uint, S_IRUGO);
-MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
-
-ushort link_crc_mask = SUPPORTED_CRCS;
-module_param(link_crc_mask, ushort, S_IRUGO);
-MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
-
-uint loopback;
-module_param_named(loopback, loopback, uint, S_IRUGO);
-MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
-
-/* Other driver tunables */
-uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
-static ushort crc_14b_sideband = 1;
-static uint use_flr = 1;
-uint quick_linkup; /* skip LNI */
-
-struct flag_table {
- u64 flag; /* the flag */
- char *str; /* description string */
- u16 extra; /* extra information */
- u16 unused0;
- u32 unused1;
-};
-
-/* str must be a string constant */
-#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
-#define FLAG_ENTRY0(str, flag) {flag, str, 0}
-
-/* Send Error Consequences */
-#define SEC_WRITE_DROPPED 0x1
-#define SEC_PACKET_DROPPED 0x2
-#define SEC_SC_HALTED 0x4 /* per-context only */
-#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
-
-#define MIN_KERNEL_KCTXTS 2
-#define FIRST_KERNEL_KCTXT 1
-#define NUM_MAP_REGS 32
-
-/* Bit offset into the GUID which carries HFI id information */
-#define GUID_HFI_INDEX_SHIFT 39
-
-/* extract the emulation revision */
-#define emulator_rev(dd) ((dd)->irev >> 8)
-/* parallel and serial emulation versions are 3 and 4 respectively */
-#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
-#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
-
-/* RSM fields */
-
-/* packet type */
-#define IB_PACKET_TYPE 2ull
-#define QW_SHIFT 6ull
-/* QPN[7..1] */
-#define QPN_WIDTH 7ull
-
-/* LRH.BTH: QW 0, OFFSET 48 - for match */
-#define LRH_BTH_QW 0ull
-#define LRH_BTH_BIT_OFFSET 48ull
-#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
-#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
-#define LRH_BTH_SELECT
-#define LRH_BTH_MASK 3ull
-#define LRH_BTH_VALUE 2ull
-
-/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
-#define LRH_SC_QW 0ull
-#define LRH_SC_BIT_OFFSET 56ull
-#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
-#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
-#define LRH_SC_MASK 128ull
-#define LRH_SC_VALUE 0ull
-
-/* SC[n..0] QW 0, OFFSET 60 - for select */
-#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
-
-/* QPN[m+n:1] QW 1, OFFSET 1 */
-#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
-
-/* defines to build power on SC2VL table */
-#define SC2VL_VAL( \
- num, \
- sc0, sc0val, \
- sc1, sc1val, \
- sc2, sc2val, \
- sc3, sc3val, \
- sc4, sc4val, \
- sc5, sc5val, \
- sc6, sc6val, \
- sc7, sc7val) \
-( \
- ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
- ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
- ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
- ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
- ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
- ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
- ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
- ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
-)
-
-#define DC_SC_VL_VAL( \
- range, \
- e0, e0val, \
- e1, e1val, \
- e2, e2val, \
- e3, e3val, \
- e4, e4val, \
- e5, e5val, \
- e6, e6val, \
- e7, e7val, \
- e8, e8val, \
- e9, e9val, \
- e10, e10val, \
- e11, e11val, \
- e12, e12val, \
- e13, e13val, \
- e14, e14val, \
- e15, e15val) \
-( \
- ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
- ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
- ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
- ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
- ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
- ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
- ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
- ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
- ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
- ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
- ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
- ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
- ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
- ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
- ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
- ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
-)
-
-/* all CceStatus sub-block freeze bits */
-#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
- | CCE_STATUS_RXE_FROZE_SMASK \
- | CCE_STATUS_TXE_FROZE_SMASK \
- | CCE_STATUS_TXE_PIO_FROZE_SMASK)
-/* all CceStatus sub-block TXE pause bits */
-#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
- | CCE_STATUS_TXE_PAUSED_SMASK \
- | CCE_STATUS_SDMA_PAUSED_SMASK)
-/* all CceStatus sub-block RXE pause bits */
-#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
-
-/*
- * CCE Error flags.
- */
-static struct flag_table cce_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
- CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
-/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
- CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
-/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
- CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
-/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
- CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
-/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
- CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
-/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
- CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
-/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
- CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
-/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
- CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
-/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
- CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
-/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
- CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
-/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
- CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
-/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
- CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
-/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
- CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
-/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
- CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
-/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
- CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
-/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
- CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
-/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
- CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
-/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
- CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
-/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
- CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
-/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
- CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
-/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
- CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
-/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
- CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
-/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
- CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
-/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
- CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
-/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
- CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
-/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
- CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
-/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
- CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
-/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
- CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
-/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
- CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
-/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
- CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
-/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
- CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
-/*31*/ FLAG_ENTRY0("LATriggered",
- CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
-/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
- CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
-/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
- CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
-/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
- CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
-/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
- CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
-/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
- CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
-/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
- CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
-/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
- CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
-/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
- CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
-/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
- CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
-/*41-63 reserved*/
-};
-
-/*
- * Misc Error flags
- */
-#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
-static struct flag_table misc_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
-/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
-/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
-/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
-/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
-/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
-/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
-/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
-/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
-/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
-/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
-/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
-/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
-};
-
-/*
- * TXE PIO Error flags and consequences
- */
-static struct flag_table pio_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
- SEC_WRITE_DROPPED,
- SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
-/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
-/* 2*/ FLAG_ENTRY("PioCsrParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
-/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
-/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
-/* 5*/ FLAG_ENTRY("PioPccFifoParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
-/* 6*/ FLAG_ENTRY("PioPecFifoParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
-/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
-/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
-/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
-/*10*/ FLAG_ENTRY("PioSmPktResetParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
-/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
-/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
-/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
- 0,
- SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
-/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
- 0,
- SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
-/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
-/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
-/*17*/ FLAG_ENTRY("PioInitSmIn",
- 0,
- SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
-/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
-/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
-/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
- 0,
- SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
-/*21*/ FLAG_ENTRY("PioWriteDataParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
-/*22*/ FLAG_ENTRY("PioStateMachine",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
-/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
- SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
-/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
- SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
-/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
-/*26*/ FLAG_ENTRY("PioVlfSopParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
-/*27*/ FLAG_ENTRY("PioVlFifoParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
-/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
-/*29*/ FLAG_ENTRY("PioPpmcSopLen",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
-/*30-31 reserved*/
-/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
-/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
-/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
-/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
- SEC_SPC_FREEZE,
- SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
-/*36-63 reserved*/
-};
-
-/* TXE PIO errors that cause an SPC freeze */
-#define ALL_PIO_FREEZE_ERR \
- (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
- | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
-
-/*
- * TXE SDMA Error flags
- */
-static struct flag_table sdma_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
- SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
-/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
- SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
-/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
- SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
-/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
- SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
-/*04-63 reserved*/
-};
-
-/* TXE SDMA errors that cause an SPC freeze */
-#define ALL_SDMA_FREEZE_ERR \
- (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
- | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
- | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
-
-/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
-#define PORT_DISCARD_EGRESS_ERRS \
- (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
- | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
- | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
-
-/*
- * TXE Egress Error flags
- */
-#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
-static struct flag_table egress_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
-/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
-/* 2 reserved */
-/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
- SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
-/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
-/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
-/* 6 reserved */
-/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
- SEES(TX_PIO_LAUNCH_INTF_PARITY)),
-/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
- SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
-/* 9-10 reserved */
-/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
- SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
-/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
-/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
-/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
-/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
-/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
- SEES(TX_SDMA0_DISALLOWED_PACKET)),
-/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
- SEES(TX_SDMA1_DISALLOWED_PACKET)),
-/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
- SEES(TX_SDMA2_DISALLOWED_PACKET)),
-/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
- SEES(TX_SDMA3_DISALLOWED_PACKET)),
-/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
- SEES(TX_SDMA4_DISALLOWED_PACKET)),
-/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
- SEES(TX_SDMA5_DISALLOWED_PACKET)),
-/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
- SEES(TX_SDMA6_DISALLOWED_PACKET)),
-/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
- SEES(TX_SDMA7_DISALLOWED_PACKET)),
-/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
- SEES(TX_SDMA8_DISALLOWED_PACKET)),
-/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
- SEES(TX_SDMA9_DISALLOWED_PACKET)),
-/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
- SEES(TX_SDMA10_DISALLOWED_PACKET)),
-/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
- SEES(TX_SDMA11_DISALLOWED_PACKET)),
-/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
- SEES(TX_SDMA12_DISALLOWED_PACKET)),
-/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
- SEES(TX_SDMA13_DISALLOWED_PACKET)),
-/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
- SEES(TX_SDMA14_DISALLOWED_PACKET)),
-/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
- SEES(TX_SDMA15_DISALLOWED_PACKET)),
-/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
- SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
-/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
- SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
-/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
- SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
-/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
- SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
-/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
- SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
-/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
- SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
-/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
- SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
-/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
- SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
-/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
- SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
-/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
-/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
-/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
-/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
-/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
-/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
-/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
-/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
-/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
-/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
-/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
-/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
-/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
-/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
-/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
-/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
-/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
-/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
-/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
-/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
-/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
-/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
- SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
-/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
- SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
-};
-
-/*
- * TXE Egress Error Info flags
- */
-#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
-static struct flag_table egress_err_info_flags[] = {
-/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
-/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
-/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
-/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
-/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
-/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
-/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
-/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
-/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
-/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
-/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
-/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
-/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
-/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
-/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
-/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
-/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
-/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
-/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
-/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
-/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
-/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
-};
-
-/* TXE Egress errors that cause an SPC freeze */
-#define ALL_TXE_EGRESS_FREEZE_ERR \
- (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
- | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
- | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
- | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
- | SEES(TX_LAUNCH_CSR_PARITY) \
- | SEES(TX_SBRD_CTL_CSR_PARITY) \
- | SEES(TX_CONFIG_PARITY) \
- | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
- | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
- | SEES(TX_CREDIT_RETURN_PARITY))
-
-/*
- * TXE Send error flags
- */
-#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
-static struct flag_table send_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
-/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
-/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
-};
-
-/*
- * TXE Send Context Error flags and consequences
- */
-static struct flag_table sc_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY("InconsistentSop",
- SEC_PACKET_DROPPED | SEC_SC_HALTED,
- SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
-/* 1*/ FLAG_ENTRY("DisallowedPacket",
- SEC_PACKET_DROPPED | SEC_SC_HALTED,
- SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
-/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
- SEC_WRITE_DROPPED | SEC_SC_HALTED,
- SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
-/* 3*/ FLAG_ENTRY("WriteOverflow",
- SEC_WRITE_DROPPED | SEC_SC_HALTED,
- SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
-/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
- SEC_WRITE_DROPPED | SEC_SC_HALTED,
- SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
-/* 5-63 reserved*/
-};
-
-/*
- * RXE Receive Error flags
- */
-#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
-static struct flag_table rxe_err_status_flags[] = {
-/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
-/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
-/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
-/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
-/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
-/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
-/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
-/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
-/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
-/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
-/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
-/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
-/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
-/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
-/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
-/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
-/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
- RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
-/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
-/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
-/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
- RXES(RBUF_BLOCK_LIST_READ_UNC)),
-/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
- RXES(RBUF_BLOCK_LIST_READ_COR)),
-/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
- RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
-/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
- RXES(RBUF_CSR_QENT_CNT_PARITY)),
-/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
- RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
-/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
- RXES(RBUF_CSR_QVLD_BIT_PARITY)),
-/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
-/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
-/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
- RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
-/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
-/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
-/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
-/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
-/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
-/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
-/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
-/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
- RXES(RBUF_FL_INITDONE_PARITY)),
-/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
- RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
-/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
-/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
-/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
-/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
- RXES(LOOKUP_DES_PART1_UNC_COR)),
-/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
- RXES(LOOKUP_DES_PART2_PARITY)),
-/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
-/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
-/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
-/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
-/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
-/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
-/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
-/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
-/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
-/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
-/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
-/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
-/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
-/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
-/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
-/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
-/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
-/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
-/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
-/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
-/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
-/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
-};
-
-/* RXE errors that will trigger an SPC freeze */
-#define ALL_RXE_FREEZE_ERR \
- (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
- | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
- | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
- | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
- | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
- | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
-
-#define RXE_FREEZE_ABORT_MASK \
- (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
- RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
- RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
-
-/*
- * DCC Error Flags
- */
-#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
-static struct flag_table dcc_err_flags[] = {
- FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
- FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
- FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
- FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
- FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
- FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
- FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
- FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
- FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
- FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
- FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
- FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
- FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
- FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
- FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
- FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
- FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
- FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
- FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
- FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
- FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
- FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
- FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
- FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
- FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
- FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
- FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
- FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
- FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
- FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
- FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
- FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
- FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
- FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
- FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
- FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
- FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
- FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
- FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
- FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
- FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
- FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
- FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
- FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
- FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
- FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
-};
-
-/*
- * LCB error flags
- */
-#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
-static struct flag_table lcb_err_flags[] = {
-/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
-/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
-/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
-/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
- LCBE(ALL_LNS_FAILED_REINIT_TEST)),
-/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
-/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
-/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
-/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
-/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
-/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
-/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
-/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
-/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
-/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
- LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
-/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
-/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
-/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
-/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
-/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
-/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
- LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
-/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
-/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
-/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
-/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
-/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
-/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
-/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
- LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
-/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
-/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
- LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
-/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
- LCBE(REDUNDANT_FLIT_PARITY_ERR))
-};
-
-/*
- * DC8051 Error Flags
- */
-#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
-static struct flag_table dc8051_err_flags[] = {
- FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
- FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
- FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
- FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
- FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
- FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
- FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
- FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
- FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
- D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
- FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
-};
-
-/*
- * DC8051 Information Error flags
- *
- * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
- */
-static struct flag_table dc8051_info_err_flags[] = {
- FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
- FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
- FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
- FLAG_ENTRY0("Serdes internal loopback failure",
- FAILED_SERDES_INTERNAL_LOOPBACK),
- FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
- FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
- FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
- FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
- FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
- FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
- FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
- FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
- FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
-};
-
-/*
- * DC8051 Information Host Information flags
- *
- * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
- */
-static struct flag_table dc8051_info_host_msg_flags[] = {
- FLAG_ENTRY0("Host request done", 0x0001),
- FLAG_ENTRY0("BC SMA message", 0x0002),
- FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
- FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
- FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
- FLAG_ENTRY0("External device config request", 0x0020),
- FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
- FLAG_ENTRY0("LinkUp achieved", 0x0080),
- FLAG_ENTRY0("Link going down", 0x0100),
-};
-
-static u32 encoded_size(u32 size);
-static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
-static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
-static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
- u8 *continuous);
-static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
- u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
-static void read_vc_remote_link_width(struct hfi1_devdata *dd,
- u8 *remote_tx_rate, u16 *link_widths);
-static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
- u8 *flag_bits, u16 *link_widths);
-static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
- u8 *device_rev);
-static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
-static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
-static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
- u8 *tx_polarity_inversion,
- u8 *rx_polarity_inversion, u8 *max_rate);
-static void handle_sdma_eng_err(struct hfi1_devdata *dd,
- unsigned int context, u64 err_status);
-static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
-static void handle_dcc_err(struct hfi1_devdata *dd,
- unsigned int context, u64 err_status);
-static void handle_lcb_err(struct hfi1_devdata *dd,
- unsigned int context, u64 err_status);
-static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
-static void set_partition_keys(struct hfi1_pportdata *);
-static const char *link_state_name(u32 state);
-static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
- u32 state);
-static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data);
-static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
-static int thermal_init(struct hfi1_devdata *dd);
-
-static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
- int msecs);
-static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
-static void handle_temp_err(struct hfi1_devdata *);
-static void dc_shutdown(struct hfi1_devdata *);
-static void dc_start(struct hfi1_devdata *);
-
-/*
- * Error interrupt table entry. This is used as input to the interrupt
- * "clear down" routine used for all second tier error interrupt register.
- * Second tier interrupt registers have a single bit representing them
- * in the top-level CceIntStatus.
- */
-struct err_reg_info {
- u32 status; /* status CSR offset */
- u32 clear; /* clear CSR offset */
- u32 mask; /* mask CSR offset */
- void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
- const char *desc;
-};
-
-#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
-#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
-#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
-
-/*
- * Helpers for building HFI and DC error interrupt table entries. Different
- * helpers are needed because of inconsistent register names.
- */
-#define EE(reg, handler, desc) \
- { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
- handler, desc }
-#define DC_EE1(reg, handler, desc) \
- { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
-#define DC_EE2(reg, handler, desc) \
- { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
-
-/*
- * Table of the "misc" grouping of error interrupts. Each entry refers to
- * another register containing more information.
- */
-static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
-/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
-/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
-/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
-/* 3*/ { 0, 0, 0, NULL }, /* reserved */
-/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
-/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
-/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
-/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
- /* the rest are reserved */
-};
-
-/*
- * Index into the Various section of the interrupt sources
- * corresponding to the Critical Temperature interrupt.
- */
-#define TCRIT_INT_SOURCE 4
-
-/*
- * SDMA error interrupt entry - refers to another register containing more
- * information.
- */
-static const struct err_reg_info sdma_eng_err =
- EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
-
-static const struct err_reg_info various_err[NUM_VARIOUS] = {
-/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
-/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
-/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
-/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
-/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
- /* rest are reserved */
-};
-
-/*
- * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
- * register can not be derived from the MTU value because 10K is not
- * a power of 2. Therefore, we need a constant. Everything else can
- * be calculated.
- */
-#define DCC_CFG_PORT_MTU_CAP_10240 7
-
-/*
- * Table of the DC grouping of error interrupts. Each entry refers to
- * another register containing more information.
- */
-static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
-/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
-/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
-/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
-/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
- /* the rest are reserved */
-};
-
-struct cntr_entry {
- /*
- * counter name
- */
- char *name;
-
- /*
- * csr to read for name (if applicable)
- */
- u64 csr;
-
- /*
- * offset into dd or ppd to store the counter's value
- */
- int offset;
-
- /*
- * flags
- */
- u8 flags;
-
- /*
- * accessor for stat element, context either dd or ppd
- */
- u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
- int mode, u64 data);
-};
-
-#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
-#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
-
-#define CNTR_ELEM(name, csr, offset, flags, accessor) \
-{ \
- name, \
- csr, \
- offset, \
- flags, \
- accessor \
-}
-
-/* 32bit RXE */
-#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + RCV_COUNTER_ARRAY32), \
- 0, flags | CNTR_32BIT, \
- port_access_u32_csr)
-
-#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + RCV_COUNTER_ARRAY32), \
- 0, flags | CNTR_32BIT, \
- dev_access_u32_csr)
-
-/* 64bit RXE */
-#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + RCV_COUNTER_ARRAY64), \
- 0, flags, \
- port_access_u64_csr)
-
-#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + RCV_COUNTER_ARRAY64), \
- 0, flags, \
- dev_access_u64_csr)
-
-#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
-#define OVR_ELM(ctx) \
-CNTR_ELEM("RcvHdrOvr" #ctx, \
- (RCV_HDR_OVFL_CNT + ctx * 0x100), \
- 0, CNTR_NORMAL, port_access_u64_csr)
-
-/* 32bit TXE */
-#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + SEND_COUNTER_ARRAY32), \
- 0, flags | CNTR_32BIT, \
- port_access_u32_csr)
-
-/* 64bit TXE */
-#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + SEND_COUNTER_ARRAY64), \
- 0, flags, \
- port_access_u64_csr)
-
-# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name,\
- counter * 8 + SEND_COUNTER_ARRAY64, \
- 0, \
- flags, \
- dev_access_u64_csr)
-
-/* CCE */
-#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + CCE_COUNTER_ARRAY32), \
- 0, flags | CNTR_32BIT, \
- dev_access_u32_csr)
-
-#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
-CNTR_ELEM(#name, \
- (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
- 0, flags | CNTR_32BIT, \
- dev_access_u32_csr)
-
-/* DC */
-#define DC_PERF_CNTR(name, counter, flags) \
-CNTR_ELEM(#name, \
- counter, \
- 0, \
- flags, \
- dev_access_u64_csr)
-
-#define DC_PERF_CNTR_LCB(name, counter, flags) \
-CNTR_ELEM(#name, \
- counter, \
- 0, \
- flags, \
- dc_access_lcb_cntr)
-
-/* ibp counters */
-#define SW_IBP_CNTR(name, cntr) \
-CNTR_ELEM(#name, \
- 0, \
- 0, \
- CNTR_SYNTH, \
- access_ibp_##cntr)
-
-u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
-{
- if (dd->flags & HFI1_PRESENT) {
- return readq((void __iomem *)dd->kregbase + offset);
- }
- return -1;
-}
-
-void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
-{
- if (dd->flags & HFI1_PRESENT)
- writeq(value, (void __iomem *)dd->kregbase + offset);
-}
-
-void __iomem *get_csr_addr(
- struct hfi1_devdata *dd,
- u32 offset)
-{
- return (void __iomem *)dd->kregbase + offset;
-}
-
-static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
- int mode, u64 value)
-{
- u64 ret;
-
- if (mode == CNTR_MODE_R) {
- ret = read_csr(dd, csr);
- } else if (mode == CNTR_MODE_W) {
- write_csr(dd, csr, value);
- ret = value;
- } else {
- dd_dev_err(dd, "Invalid cntr register access mode");
- return 0;
- }
-
- hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
- return ret;
-}
-
-/* Dev Access */
-static u64 dev_access_u32_csr(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
- u64 csr = entry->csr;
-
- if (entry->flags & CNTR_SDMA) {
- if (vl == CNTR_INVALID_VL)
- return 0;
- csr += 0x100 * vl;
- } else {
- if (vl != CNTR_INVALID_VL)
- return 0;
- }
- return read_write_csr(dd, csr, mode, data);
-}
-
-static u64 access_sde_err_cnt(const struct cntr_entry *entry,
- void *context, int idx, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- if (dd->per_sdma && idx < dd->num_sdma)
- return dd->per_sdma[idx].err_cnt;
- return 0;
-}
-
-static u64 access_sde_int_cnt(const struct cntr_entry *entry,
- void *context, int idx, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- if (dd->per_sdma && idx < dd->num_sdma)
- return dd->per_sdma[idx].sdma_int_cnt;
- return 0;
-}
-
-static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
- void *context, int idx, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- if (dd->per_sdma && idx < dd->num_sdma)
- return dd->per_sdma[idx].idle_int_cnt;
- return 0;
-}
-
-static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
- void *context, int idx, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- if (dd->per_sdma && idx < dd->num_sdma)
- return dd->per_sdma[idx].progress_int_cnt;
- return 0;
-}
-
-static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
-
- u64 val = 0;
- u64 csr = entry->csr;
-
- if (entry->flags & CNTR_VL) {
- if (vl == CNTR_INVALID_VL)
- return 0;
- csr += 8 * vl;
- } else {
- if (vl != CNTR_INVALID_VL)
- return 0;
- }
-
- val = read_write_csr(dd, csr, mode, data);
- return val;
-}
-
-static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
- u32 csr = entry->csr;
- int ret = 0;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
- if (mode == CNTR_MODE_R)
- ret = read_lcb_csr(dd, csr, &data);
- else if (mode == CNTR_MODE_W)
- ret = write_lcb_csr(dd, csr, data);
-
- if (ret) {
- dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
- return 0;
- }
-
- hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
- return data;
-}
-
-/* Port Access */
-static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
-{
- struct hfi1_pportdata *ppd = context;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
- return read_write_csr(ppd->dd, entry->csr, mode, data);
-}
-
-static u64 port_access_u64_csr(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_pportdata *ppd = context;
- u64 val;
- u64 csr = entry->csr;
-
- if (entry->flags & CNTR_VL) {
- if (vl == CNTR_INVALID_VL)
- return 0;
- csr += 8 * vl;
- } else {
- if (vl != CNTR_INVALID_VL)
- return 0;
- }
- val = read_write_csr(ppd->dd, csr, mode, data);
- return val;
-}
-
-/* Software defined */
-static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
- u64 data)
-{
- u64 ret;
-
- if (mode == CNTR_MODE_R) {
- ret = *cntr;
- } else if (mode == CNTR_MODE_W) {
- *cntr = data;
- ret = data;
- } else {
- dd_dev_err(dd, "Invalid cntr sw access mode");
- return 0;
- }
-
- hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
-
- return ret;
-}
-
-static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
-{
- struct hfi1_pportdata *ppd = context;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
- return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
-}
-
-static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
-{
- struct hfi1_pportdata *ppd = context;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
- return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
-}
-
-static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
- return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
-}
-
-static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
- u64 zero = 0;
- u64 *counter;
-
- if (vl == CNTR_INVALID_VL)
- counter = &ppd->port_xmit_discards;
- else if (vl >= 0 && vl < C_VL_COUNT)
- counter = &ppd->port_xmit_discards_vl[vl];
- else
- counter = &zero;
-
- return read_write_sw(ppd->dd, counter, mode, data);
-}
-
-static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_pportdata *ppd = context;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
-
- return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
- mode, data);
-}
-
-static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_pportdata *ppd = context;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
-
- return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
- mode, data);
-}
-
-u64 get_all_cpu_total(u64 __percpu *cntr)
-{
- int cpu;
- u64 counter = 0;
-
- for_each_possible_cpu(cpu)
- counter += *per_cpu_ptr(cntr, cpu);
- return counter;
-}
-
-static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
- u64 __percpu *cntr,
- int vl, int mode, u64 data)
-{
- u64 ret = 0;
-
- if (vl != CNTR_INVALID_VL)
- return 0;
-
- if (mode == CNTR_MODE_R) {
- ret = get_all_cpu_total(cntr) - *z_val;
- } else if (mode == CNTR_MODE_W) {
- /* A write can only zero the counter */
- if (data == 0)
- *z_val = get_all_cpu_total(cntr);
- else
- dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
- } else {
- dd_dev_err(dd, "Invalid cntr sw cpu access mode");
- return 0;
- }
-
- return ret;
-}
-
-static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
-
- return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
- mode, data);
-}
-
-static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
-
- return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
- mode, data);
-}
-
-static u64 access_sw_pio_wait(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
-
- return dd->verbs_dev.n_piowait;
-}
-
-static u64 access_sw_pio_drain(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->verbs_dev.n_piodrain;
-}
-
-static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
-
- return dd->verbs_dev.n_txwait;
-}
-
-static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = context;
-
- return dd->verbs_dev.n_kmem_wait;
-}
-
-static u64 access_sw_send_schedule(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
- mode, data);
-}
-
-/* Software counters for the error status bits within MISC_ERR_STATUS */
-static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[12];
-}
-
-static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[11];
-}
-
-static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[10];
-}
-
-static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[9];
-}
-
-static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[8];
-}
-
-static u64 access_misc_efuse_read_bad_addr_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[7];
-}
-
-static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[6];
-}
-
-static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[5];
-}
-
-static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[4];
-}
-
-static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[3];
-}
-
-static u64 access_misc_csr_write_bad_addr_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[2];
-}
-
-static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[1];
-}
-
-static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->misc_err_status_cnt[0];
-}
-
-/*
- * Software counter for the aggregate of
- * individual CceErrStatus counters
- */
-static u64 access_sw_cce_err_status_aggregated_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_cce_err_status_aggregate;
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within CceErrStatus
- */
-static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[40];
-}
-
-static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[39];
-}
-
-static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[38];
-}
-
-static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[37];
-}
-
-static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[36];
-}
-
-static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[35];
-}
-
-static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[34];
-}
-
-static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[33];
-}
-
-static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[32];
-}
-
-static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[31];
-}
-
-static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[30];
-}
-
-static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[29];
-}
-
-static u64 access_pcic_transmit_back_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[28];
-}
-
-static u64 access_pcic_transmit_front_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[27];
-}
-
-static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[26];
-}
-
-static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[25];
-}
-
-static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[24];
-}
-
-static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[23];
-}
-
-static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[22];
-}
-
-static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[21];
-}
-
-static u64 access_pcic_n_post_dat_q_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[20];
-}
-
-static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[19];
-}
-
-static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[18];
-}
-
-static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[17];
-}
-
-static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[16];
-}
-
-static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[15];
-}
-
-static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[14];
-}
-
-static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[13];
-}
-
-static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[12];
-}
-
-static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[11];
-}
-
-static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[10];
-}
-
-static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[9];
-}
-
-static u64 access_cce_cli2_async_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[8];
-}
-
-static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[7];
-}
-
-static u64 access_cce_cli0_async_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[6];
-}
-
-static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[5];
-}
-
-static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[4];
-}
-
-static u64 access_cce_trgt_async_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[3];
-}
-
-static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[2];
-}
-
-static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[1];
-}
-
-static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->cce_err_status_cnt[0];
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within RcvErrStatus
- */
-static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[63];
-}
-
-static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[62];
-}
-
-static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[61];
-}
-
-static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[60];
-}
-
-static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[59];
-}
-
-static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[58];
-}
-
-static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[57];
-}
-
-static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[56];
-}
-
-static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[55];
-}
-
-static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[54];
-}
-
-static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[53];
-}
-
-static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[52];
-}
-
-static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[51];
-}
-
-static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[50];
-}
-
-static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[49];
-}
-
-static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[48];
-}
-
-static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[47];
-}
-
-static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[46];
-}
-
-static u64 access_rx_hq_intr_csr_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[45];
-}
-
-static u64 access_rx_lookup_csr_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[44];
-}
-
-static u64 access_rx_lookup_rcv_array_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[43];
-}
-
-static u64 access_rx_lookup_rcv_array_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[42];
-}
-
-static u64 access_rx_lookup_des_part2_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[41];
-}
-
-static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[40];
-}
-
-static u64 access_rx_lookup_des_part1_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[39];
-}
-
-static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[38];
-}
-
-static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[37];
-}
-
-static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[36];
-}
-
-static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[35];
-}
-
-static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[34];
-}
-
-static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[33];
-}
-
-static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[32];
-}
-
-static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[31];
-}
-
-static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[30];
-}
-
-static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[29];
-}
-
-static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[28];
-}
-
-static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[27];
-}
-
-static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[26];
-}
-
-static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[25];
-}
-
-static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[24];
-}
-
-static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[23];
-}
-
-static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[22];
-}
-
-static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[21];
-}
-
-static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[20];
-}
-
-static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[19];
-}
-
-static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[18];
-}
-
-static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[17];
-}
-
-static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[16];
-}
-
-static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[15];
-}
-
-static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[14];
-}
-
-static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[13];
-}
-
-static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[12];
-}
-
-static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[11];
-}
-
-static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[10];
-}
-
-static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[9];
-}
-
-static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[8];
-}
-
-static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[7];
-}
-
-static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[6];
-}
-
-static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[5];
-}
-
-static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[4];
-}
-
-static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[3];
-}
-
-static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[2];
-}
-
-static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[1];
-}
-
-static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->rcv_err_status_cnt[0];
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within SendPioErrStatus
- */
-static u64 access_pio_pec_sop_head_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[35];
-}
-
-static u64 access_pio_pcc_sop_head_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[34];
-}
-
-static u64 access_pio_last_returned_cnt_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[33];
-}
-
-static u64 access_pio_current_free_cnt_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[32];
-}
-
-static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[31];
-}
-
-static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[30];
-}
-
-static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[29];
-}
-
-static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[28];
-}
-
-static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[27];
-}
-
-static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[26];
-}
-
-static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[25];
-}
-
-static u64 access_pio_block_qw_count_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[24];
-}
-
-static u64 access_pio_write_qw_valid_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[23];
-}
-
-static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[22];
-}
-
-static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[21];
-}
-
-static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[20];
-}
-
-static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[19];
-}
-
-static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[18];
-}
-
-static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[17];
-}
-
-static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[16];
-}
-
-static u64 access_pio_credit_ret_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[15];
-}
-
-static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[14];
-}
-
-static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[13];
-}
-
-static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[12];
-}
-
-static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[11];
-}
-
-static u64 access_pio_sm_pkt_reset_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[10];
-}
-
-static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[9];
-}
-
-static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[8];
-}
-
-static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[7];
-}
-
-static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[6];
-}
-
-static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[5];
-}
-
-static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[4];
-}
-
-static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[3];
-}
-
-static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[2];
-}
-
-static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[1];
-}
-
-static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_pio_err_status_cnt[0];
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within SendDmaErrStatus
- */
-static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_dma_err_status_cnt[3];
-}
-
-static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_dma_err_status_cnt[2];
-}
-
-static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_dma_err_status_cnt[1];
-}
-
-static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_dma_err_status_cnt[0];
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within SendEgressErrStatus
- */
-static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[63];
-}
-
-static u64 access_tx_read_sdma_memory_csr_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[62];
-}
-
-static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[61];
-}
-
-static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[60];
-}
-
-static u64 access_tx_read_sdma_memory_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[59];
-}
-
-static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[58];
-}
-
-static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[57];
-}
-
-static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[56];
-}
-
-static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[55];
-}
-
-static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[54];
-}
-
-static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[53];
-}
-
-static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[52];
-}
-
-static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[51];
-}
-
-static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[50];
-}
-
-static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[49];
-}
-
-static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[48];
-}
-
-static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[47];
-}
-
-static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[46];
-}
-
-static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[45];
-}
-
-static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[44];
-}
-
-static u64 access_tx_read_sdma_memory_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[43];
-}
-
-static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[42];
-}
-
-static u64 access_tx_credit_return_partiy_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[41];
-}
-
-static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[40];
-}
-
-static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[39];
-}
-
-static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[38];
-}
-
-static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[37];
-}
-
-static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[36];
-}
-
-static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[35];
-}
-
-static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[34];
-}
-
-static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[33];
-}
-
-static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[32];
-}
-
-static u64 access_tx_sdma15_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[31];
-}
-
-static u64 access_tx_sdma14_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[30];
-}
-
-static u64 access_tx_sdma13_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[29];
-}
-
-static u64 access_tx_sdma12_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[28];
-}
-
-static u64 access_tx_sdma11_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[27];
-}
-
-static u64 access_tx_sdma10_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[26];
-}
-
-static u64 access_tx_sdma9_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[25];
-}
-
-static u64 access_tx_sdma8_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[24];
-}
-
-static u64 access_tx_sdma7_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[23];
-}
-
-static u64 access_tx_sdma6_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[22];
-}
-
-static u64 access_tx_sdma5_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[21];
-}
-
-static u64 access_tx_sdma4_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[20];
-}
-
-static u64 access_tx_sdma3_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[19];
-}
-
-static u64 access_tx_sdma2_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[18];
-}
-
-static u64 access_tx_sdma1_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[17];
-}
-
-static u64 access_tx_sdma0_disallowed_packet_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[16];
-}
-
-static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[15];
-}
-
-static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[14];
-}
-
-static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[13];
-}
-
-static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[12];
-}
-
-static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[11];
-}
-
-static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[10];
-}
-
-static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[9];
-}
-
-static u64 access_tx_sdma_launch_intf_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[8];
-}
-
-static u64 access_tx_pio_launch_intf_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[7];
-}
-
-static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[6];
-}
-
-static u64 access_tx_incorrect_link_state_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[5];
-}
-
-static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[4];
-}
-
-static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[3];
-}
-
-static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[2];
-}
-
-static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[1];
-}
-
-static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_egress_err_status_cnt[0];
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within SendErrStatus
- */
-static u64 access_send_csr_write_bad_addr_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_err_status_cnt[2];
-}
-
-static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_err_status_cnt[1];
-}
-
-static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->send_err_status_cnt[0];
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within SendCtxtErrStatus
- */
-static u64 access_pio_write_out_of_bounds_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_ctxt_err_status_cnt[4];
-}
-
-static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_ctxt_err_status_cnt[3];
-}
-
-static u64 access_pio_write_crosses_boundary_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_ctxt_err_status_cnt[2];
-}
-
-static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_ctxt_err_status_cnt[1];
-}
-
-static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_ctxt_err_status_cnt[0];
-}
-
-/*
- * Software counters corresponding to each of the
- * error status bits within SendDmaEngErrStatus
- */
-static u64 access_sdma_header_request_fifo_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[23];
-}
-
-static u64 access_sdma_header_storage_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[22];
-}
-
-static u64 access_sdma_packet_tracking_cor_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[21];
-}
-
-static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[20];
-}
-
-static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[19];
-}
-
-static u64 access_sdma_header_request_fifo_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[18];
-}
-
-static u64 access_sdma_header_storage_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[17];
-}
-
-static u64 access_sdma_packet_tracking_unc_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[16];
-}
-
-static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[15];
-}
-
-static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[14];
-}
-
-static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[13];
-}
-
-static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[12];
-}
-
-static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[11];
-}
-
-static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[10];
-}
-
-static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[9];
-}
-
-static u64 access_sdma_packet_desc_overflow_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[8];
-}
-
-static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
- void *context, int vl,
- int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[7];
-}
-
-static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[6];
-}
-
-static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[5];
-}
-
-static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[4];
-}
-
-static u64 access_sdma_tail_out_of_bounds_err_cnt(
- const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[3];
-}
-
-static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[2];
-}
-
-static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[1];
-}
-
-static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
- void *context, int vl, int mode,
- u64 data)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
-
- return dd->sw_send_dma_eng_err_status_cnt[0];
-}
-
-#define def_access_sw_cpu(cntr) \
-static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
- void *context, int vl, int mode, u64 data) \
-{ \
- struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
- return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
- ppd->ibport_data.rvp.cntr, vl, \
- mode, data); \
-}
-
-def_access_sw_cpu(rc_acks);
-def_access_sw_cpu(rc_qacks);
-def_access_sw_cpu(rc_delayed_comp);
-
-#define def_access_ibp_counter(cntr) \
-static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
- void *context, int vl, int mode, u64 data) \
-{ \
- struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
- \
- if (vl != CNTR_INVALID_VL) \
- return 0; \
- \
- return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
- mode, data); \
-}
-
-def_access_ibp_counter(loop_pkts);
-def_access_ibp_counter(rc_resends);
-def_access_ibp_counter(rnr_naks);
-def_access_ibp_counter(other_naks);
-def_access_ibp_counter(rc_timeouts);
-def_access_ibp_counter(pkt_drops);
-def_access_ibp_counter(dmawait);
-def_access_ibp_counter(rc_seqnak);
-def_access_ibp_counter(rc_dupreq);
-def_access_ibp_counter(rdma_seq);
-def_access_ibp_counter(unaligned);
-def_access_ibp_counter(seq_naks);
-
-static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
-[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
-[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
- CNTR_NORMAL),
-[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
- CNTR_NORMAL),
-[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
- RCV_TID_FLOW_GEN_MISMATCH_CNT,
- CNTR_NORMAL),
-[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
- CNTR_NORMAL),
-[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
- RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
-[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
- CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
-[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
- CNTR_NORMAL),
-[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
- CNTR_NORMAL),
-[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
- CNTR_NORMAL),
-[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
- CNTR_NORMAL),
-[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
- CNTR_NORMAL),
-[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
- CNTR_NORMAL),
-[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
- CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
-[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
- CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
-[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
- CNTR_SYNTH),
-[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
-[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
- CNTR_SYNTH),
-[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
- CNTR_SYNTH),
-[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
- CNTR_SYNTH),
-[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
- DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
-[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
- DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
- CNTR_SYNTH),
-[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
- DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
-[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
- CNTR_SYNTH),
-[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
- CNTR_SYNTH),
-[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
- CNTR_SYNTH),
-[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
- CNTR_SYNTH),
-[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
- CNTR_SYNTH),
-[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
- CNTR_SYNTH),
-[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
- CNTR_SYNTH),
-[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
-[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
-[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
- CNTR_SYNTH),
-[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
- CNTR_SYNTH),
-[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_DC_TOTAL_CRC] =
- DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
- CNTR_SYNTH),
-[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
- CNTR_SYNTH),
-[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
- CNTR_SYNTH),
-[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
- CNTR_SYNTH),
-[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
- CNTR_SYNTH),
-[C_DC_CRC_MULT_LN] =
- DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
- CNTR_SYNTH),
-[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
- CNTR_SYNTH),
-[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
- CNTR_SYNTH),
-[C_DC_SEQ_CRC_CNT] =
- DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
- CNTR_SYNTH),
-[C_DC_ESC0_ONLY_CNT] =
- DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
- CNTR_SYNTH),
-[C_DC_ESC0_PLUS1_CNT] =
- DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
- CNTR_SYNTH),
-[C_DC_ESC0_PLUS2_CNT] =
- DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
- CNTR_SYNTH),
-[C_DC_REINIT_FROM_PEER_CNT] =
- DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
- CNTR_SYNTH),
-[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
- CNTR_SYNTH),
-[C_DC_MISC_FLG_CNT] =
- DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
- CNTR_SYNTH),
-[C_DC_PRF_GOOD_LTP_CNT] =
- DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
-[C_DC_PRF_ACCEPTED_LTP_CNT] =
- DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
- CNTR_SYNTH),
-[C_DC_PRF_RX_FLIT_CNT] =
- DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
-[C_DC_PRF_TX_FLIT_CNT] =
- DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
-[C_DC_PRF_CLK_CNTR] =
- DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
-[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
- DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
-[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
- DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
- CNTR_SYNTH),
-[C_DC_PG_STS_TX_SBE_CNT] =
- DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
-[C_DC_PG_STS_TX_MBE_CNT] =
- DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
- CNTR_SYNTH),
-[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
- access_sw_cpu_intr),
-[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
- access_sw_cpu_rcv_limit),
-[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
- access_sw_vtx_wait),
-[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
- access_sw_pio_wait),
-[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
- access_sw_pio_drain),
-[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
- access_sw_kmem_wait),
-[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
- access_sw_send_schedule),
-[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
- SEND_DMA_DESC_FETCHED_CNT, 0,
- CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
- dev_access_u32_csr),
-[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
- CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
- access_sde_int_cnt),
-[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
- CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
- access_sde_err_cnt),
-[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
- CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
- access_sde_idle_int_cnt),
-[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
- CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
- access_sde_progress_int_cnt),
-/* MISC_ERR_STATUS */
-[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_pll_lock_fail_err_cnt),
-[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_mbist_fail_err_cnt),
-[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_invalid_eep_cmd_err_cnt),
-[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_efuse_done_parity_err_cnt),
-[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_efuse_write_err_cnt),
-[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
- 0, CNTR_NORMAL,
- access_misc_efuse_read_bad_addr_err_cnt),
-[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_efuse_csr_parity_err_cnt),
-[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_fw_auth_failed_err_cnt),
-[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_key_mismatch_err_cnt),
-[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_sbus_write_failed_err_cnt),
-[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_csr_write_bad_addr_err_cnt),
-[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_csr_read_bad_addr_err_cnt),
-[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
- CNTR_NORMAL,
- access_misc_csr_parity_err_cnt),
-/* CceErrStatus */
-[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
- CNTR_NORMAL,
- access_sw_cce_err_status_aggregated_cnt),
-[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_cce_msix_csr_parity_err_cnt),
-[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
- CNTR_NORMAL,
- access_cce_int_map_unc_err_cnt),
-[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
- CNTR_NORMAL,
- access_cce_int_map_cor_err_cnt),
-[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
- CNTR_NORMAL,
- access_cce_msix_table_unc_err_cnt),
-[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
- CNTR_NORMAL,
- access_cce_msix_table_cor_err_cnt),
-[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
- 0, CNTR_NORMAL,
- access_cce_rxdma_conv_fifo_parity_err_cnt),
-[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
- 0, CNTR_NORMAL,
- access_cce_rcpl_async_fifo_parity_err_cnt),
-[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_cce_seg_write_bad_addr_err_cnt),
-[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_cce_seg_read_bad_addr_err_cnt),
-[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
- CNTR_NORMAL,
- access_la_triggered_cnt),
-[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
- CNTR_NORMAL,
- access_cce_trgt_cpl_timeout_err_cnt),
-[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_receive_parity_err_cnt),
-[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_transmit_back_parity_err_cnt),
-[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
- 0, CNTR_NORMAL,
- access_pcic_transmit_front_parity_err_cnt),
-[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_cpl_dat_q_unc_err_cnt),
-[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_cpl_hd_q_unc_err_cnt),
-[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_post_dat_q_unc_err_cnt),
-[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_post_hd_q_unc_err_cnt),
-[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_retry_sot_mem_unc_err_cnt),
-[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_retry_mem_unc_err),
-[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_n_post_dat_q_parity_err_cnt),
-[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_n_post_h_q_parity_err_cnt),
-[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_cpl_dat_q_cor_err_cnt),
-[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_cpl_hd_q_cor_err_cnt),
-[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_post_dat_q_cor_err_cnt),
-[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_post_hd_q_cor_err_cnt),
-[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_retry_sot_mem_cor_err_cnt),
-[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
- CNTR_NORMAL,
- access_pcic_retry_mem_cor_err_cnt),
-[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
- "CceCli1AsyncFifoDbgParityError", 0, 0,
- CNTR_NORMAL,
- access_cce_cli1_async_fifo_dbg_parity_err_cnt),
-[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
- "CceCli1AsyncFifoRxdmaParityError", 0, 0,
- CNTR_NORMAL,
- access_cce_cli1_async_fifo_rxdma_parity_err_cnt
- ),
-[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
- "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
- CNTR_NORMAL,
- access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
-[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
- "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
- CNTR_NORMAL,
- access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
-[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
- 0, CNTR_NORMAL,
- access_cce_cli2_async_fifo_parity_err_cnt),
-[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
- CNTR_NORMAL,
- access_cce_csr_cfg_bus_parity_err_cnt),
-[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
- 0, CNTR_NORMAL,
- access_cce_cli0_async_fifo_parity_err_cnt),
-[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
- CNTR_NORMAL,
- access_cce_rspd_data_parity_err_cnt),
-[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
- CNTR_NORMAL,
- access_cce_trgt_access_err_cnt),
-[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
- 0, CNTR_NORMAL,
- access_cce_trgt_async_fifo_parity_err_cnt),
-[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_cce_csr_write_bad_addr_err_cnt),
-[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_cce_csr_read_bad_addr_err_cnt),
-[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_ccs_csr_parity_err_cnt),
-
-/* RcvErrStatus */
-[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_csr_parity_err_cnt),
-[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_rx_csr_write_bad_addr_err_cnt),
-[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_rx_csr_read_bad_addr_err_cnt),
-[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_csr_unc_err_cnt),
-[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_dq_fsm_encoding_err_cnt),
-[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_eq_fsm_encoding_err_cnt),
-[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_csr_parity_err_cnt),
-[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_data_cor_err_cnt),
-[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_data_unc_err_cnt),
-[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_data_fifo_rd_cor_err_cnt),
-[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_data_fifo_rd_unc_err_cnt),
-[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_hdr_fifo_rd_cor_err_cnt),
-[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_hdr_fifo_rd_unc_err_cnt),
-[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_desc_part2_cor_err_cnt),
-[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_desc_part2_unc_err_cnt),
-[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_desc_part1_cor_err_cnt),
-[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_desc_part1_unc_err_cnt),
-[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
- CNTR_NORMAL,
- access_rx_hq_intr_fsm_err_cnt),
-[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_hq_intr_csr_parity_err_cnt),
-[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_lookup_csr_parity_err_cnt),
-[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_lookup_rcv_array_cor_err_cnt),
-[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_lookup_rcv_array_unc_err_cnt),
-[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
- 0, CNTR_NORMAL,
- access_rx_lookup_des_part2_parity_err_cnt),
-[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
- 0, CNTR_NORMAL,
- access_rx_lookup_des_part1_unc_cor_err_cnt),
-[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_lookup_des_part1_unc_err_cnt),
-[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_next_free_buf_cor_err_cnt),
-[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_next_free_buf_unc_err_cnt),
-[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
- "RxRbufFlInitWrAddrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rbuf_fl_init_wr_addr_parity_err_cnt),
-[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
- 0, CNTR_NORMAL,
- access_rx_rbuf_fl_initdone_parity_err_cnt),
-[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
- 0, CNTR_NORMAL,
- access_rx_rbuf_fl_write_addr_parity_err_cnt),
-[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_fl_rd_addr_parity_err_cnt),
-[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_empty_err_cnt),
-[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_full_err_cnt),
-[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
- CNTR_NORMAL,
- access_rbuf_bad_lookup_err_cnt),
-[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
- CNTR_NORMAL,
- access_rbuf_ctx_id_parity_err_cnt),
-[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
- CNTR_NORMAL,
- access_rbuf_csr_qeopdw_parity_err_cnt),
-[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
- "RxRbufCsrQNumOfPktParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
-[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
- "RxRbufCsrQTlPtrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
-[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
- 0, CNTR_NORMAL,
- access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
-[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
- 0, CNTR_NORMAL,
- access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
-[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
- 0, 0, CNTR_NORMAL,
- access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
-[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
- 0, CNTR_NORMAL,
- access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
-[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
- "RxRbufCsrQHeadBufNumParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
-[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
- 0, CNTR_NORMAL,
- access_rx_rbuf_block_list_read_cor_err_cnt),
-[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
- 0, CNTR_NORMAL,
- access_rx_rbuf_block_list_read_unc_err_cnt),
-[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_lookup_des_cor_err_cnt),
-[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_lookup_des_unc_err_cnt),
-[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
- "RxRbufLookupDesRegUncCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
-[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_lookup_des_reg_unc_err_cnt),
-[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_free_list_cor_err_cnt),
-[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rbuf_free_list_unc_err_cnt),
-[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_fsm_encoding_err_cnt),
-[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_flag_cor_err_cnt),
-[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_flag_unc_err_cnt),
-[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dc_sop_eop_parity_err_cnt),
-[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_csr_parity_err_cnt),
-[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_qp_map_table_cor_err_cnt),
-[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_qp_map_table_unc_err_cnt),
-[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_data_cor_err_cnt),
-[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_data_unc_err_cnt),
-[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_hdr_cor_err_cnt),
-[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
- CNTR_NORMAL,
- access_rx_rcv_hdr_unc_err_cnt),
-[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dc_intf_parity_err_cnt),
-[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
- CNTR_NORMAL,
- access_rx_dma_csr_cor_err_cnt),
-/* SendPioErrStatus */
-[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_pec_sop_head_parity_err_cnt),
-[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_pcc_sop_head_parity_err_cnt),
-[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
- 0, 0, CNTR_NORMAL,
- access_pio_last_returned_cnt_parity_err_cnt),
-[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
- 0, CNTR_NORMAL,
- access_pio_current_free_cnt_parity_err_cnt),
-[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
- CNTR_NORMAL,
- access_pio_reserved_31_err_cnt),
-[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
- CNTR_NORMAL,
- access_pio_reserved_30_err_cnt),
-[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
- CNTR_NORMAL,
- access_pio_ppmc_sop_len_err_cnt),
-[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_ppmc_bqc_mem_parity_err_cnt),
-[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_vl_fifo_parity_err_cnt),
-[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_vlf_sop_parity_err_cnt),
-[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_vlf_v1_len_parity_err_cnt),
-[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_block_qw_count_parity_err_cnt),
-[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_write_qw_valid_parity_err_cnt),
-[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
- CNTR_NORMAL,
- access_pio_state_machine_err_cnt),
-[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_write_data_parity_err_cnt),
-[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
- CNTR_NORMAL,
- access_pio_host_addr_mem_cor_err_cnt),
-[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
- CNTR_NORMAL,
- access_pio_host_addr_mem_unc_err_cnt),
-[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
- CNTR_NORMAL,
- access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
-[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
- CNTR_NORMAL,
- access_pio_init_sm_in_err_cnt),
-[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
- CNTR_NORMAL,
- access_pio_ppmc_pbl_fifo_err_cnt),
-[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
- 0, CNTR_NORMAL,
- access_pio_credit_ret_fifo_parity_err_cnt),
-[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
- CNTR_NORMAL,
- access_pio_v1_len_mem_bank1_cor_err_cnt),
-[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
- CNTR_NORMAL,
- access_pio_v1_len_mem_bank0_cor_err_cnt),
-[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
- CNTR_NORMAL,
- access_pio_v1_len_mem_bank1_unc_err_cnt),
-[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
- CNTR_NORMAL,
- access_pio_v1_len_mem_bank0_unc_err_cnt),
-[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_sm_pkt_reset_parity_err_cnt),
-[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_pkt_evict_fifo_parity_err_cnt),
-[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
- "PioSbrdctrlCrrelFifoParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
-[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_sbrdctl_crrel_parity_err_cnt),
-[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_pec_fifo_parity_err_cnt),
-[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_pcc_fifo_parity_err_cnt),
-[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
- CNTR_NORMAL,
- access_pio_sb_mem_fifo1_err_cnt),
-[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
- CNTR_NORMAL,
- access_pio_sb_mem_fifo0_err_cnt),
-[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_csr_parity_err_cnt),
-[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
- CNTR_NORMAL,
- access_pio_write_addr_parity_err_cnt),
-[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
- CNTR_NORMAL,
- access_pio_write_bad_ctxt_err_cnt),
-/* SendDmaErrStatus */
-[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
- 0, CNTR_NORMAL,
- access_sdma_pcie_req_tracking_cor_err_cnt),
-[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
- 0, CNTR_NORMAL,
- access_sdma_pcie_req_tracking_unc_err_cnt),
-[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_csr_parity_err_cnt),
-[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_rpy_tag_err_cnt),
-/* SendEgressErrStatus */
-[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
- CNTR_NORMAL,
- access_tx_read_pio_memory_csr_unc_err_cnt),
-[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
- 0, CNTR_NORMAL,
- access_tx_read_sdma_memory_csr_err_cnt),
-[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_egress_fifo_cor_err_cnt),
-[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_read_pio_memory_cor_err_cnt),
-[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_read_sdma_memory_cor_err_cnt),
-[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_sb_hdr_cor_err_cnt),
-[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
- CNTR_NORMAL,
- access_tx_credit_overrun_err_cnt),
-[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo8_cor_err_cnt),
-[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo7_cor_err_cnt),
-[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo6_cor_err_cnt),
-[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo5_cor_err_cnt),
-[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo4_cor_err_cnt),
-[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo3_cor_err_cnt),
-[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo2_cor_err_cnt),
-[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo1_cor_err_cnt),
-[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_fifo0_cor_err_cnt),
-[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
- CNTR_NORMAL,
- access_tx_credit_return_vl_err_cnt),
-[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
- CNTR_NORMAL,
- access_tx_hcrc_insertion_err_cnt),
-[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
- CNTR_NORMAL,
- access_tx_egress_fifo_unc_err_cnt),
-[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
- CNTR_NORMAL,
- access_tx_read_pio_memory_unc_err_cnt),
-[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
- CNTR_NORMAL,
- access_tx_read_sdma_memory_unc_err_cnt),
-[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
- CNTR_NORMAL,
- access_tx_sb_hdr_unc_err_cnt),
-[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
- CNTR_NORMAL,
- access_tx_credit_return_partiy_err_cnt),
-[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo8_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo7_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo6_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo5_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo4_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo3_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo2_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo1_unc_or_parity_err_cnt),
-[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_launch_fifo0_unc_or_parity_err_cnt),
-[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma15_disallowed_packet_err_cnt),
-[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma14_disallowed_packet_err_cnt),
-[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma13_disallowed_packet_err_cnt),
-[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma12_disallowed_packet_err_cnt),
-[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma11_disallowed_packet_err_cnt),
-[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma10_disallowed_packet_err_cnt),
-[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma9_disallowed_packet_err_cnt),
-[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma8_disallowed_packet_err_cnt),
-[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma7_disallowed_packet_err_cnt),
-[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma6_disallowed_packet_err_cnt),
-[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma5_disallowed_packet_err_cnt),
-[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma4_disallowed_packet_err_cnt),
-[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma3_disallowed_packet_err_cnt),
-[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma2_disallowed_packet_err_cnt),
-[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma1_disallowed_packet_err_cnt),
-[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma0_disallowed_packet_err_cnt),
-[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
- CNTR_NORMAL,
- access_tx_config_parity_err_cnt),
-[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_tx_sbrd_ctl_csr_parity_err_cnt),
-[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_tx_launch_csr_parity_err_cnt),
-[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
- CNTR_NORMAL,
- access_tx_illegal_vl_err_cnt),
-[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
- "TxSbrdCtlStateMachineParityErr", 0, 0,
- CNTR_NORMAL,
- access_tx_sbrd_ctl_state_machine_parity_err_cnt),
-[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
- CNTR_NORMAL,
- access_egress_reserved_10_err_cnt),
-[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
- CNTR_NORMAL,
- access_egress_reserved_9_err_cnt),
-[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
- 0, 0, CNTR_NORMAL,
- access_tx_sdma_launch_intf_parity_err_cnt),
-[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
- CNTR_NORMAL,
- access_tx_pio_launch_intf_parity_err_cnt),
-[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
- CNTR_NORMAL,
- access_egress_reserved_6_err_cnt),
-[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
- CNTR_NORMAL,
- access_tx_incorrect_link_state_err_cnt),
-[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
- CNTR_NORMAL,
- access_tx_linkdown_err_cnt),
-[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
- "EgressFifoUnderrunOrParityErr", 0, 0,
- CNTR_NORMAL,
- access_tx_egress_fifi_underrun_or_parity_err_cnt),
-[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
- CNTR_NORMAL,
- access_egress_reserved_2_err_cnt),
-[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
- CNTR_NORMAL,
- access_tx_pkt_integrity_mem_unc_err_cnt),
-[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
- CNTR_NORMAL,
- access_tx_pkt_integrity_mem_cor_err_cnt),
-/* SendErrStatus */
-[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_send_csr_write_bad_addr_err_cnt),
-[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
- CNTR_NORMAL,
- access_send_csr_read_bad_addr_err_cnt),
-[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
- CNTR_NORMAL,
- access_send_csr_parity_cnt),
-/* SendCtxtErrStatus */
-[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
- CNTR_NORMAL,
- access_pio_write_out_of_bounds_err_cnt),
-[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
- CNTR_NORMAL,
- access_pio_write_overflow_err_cnt),
-[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
- 0, 0, CNTR_NORMAL,
- access_pio_write_crosses_boundary_err_cnt),
-[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
- CNTR_NORMAL,
- access_pio_disallowed_packet_err_cnt),
-[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
- CNTR_NORMAL,
- access_pio_inconsistent_sop_err_cnt),
-/* SendDmaEngErrStatus */
-[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
- 0, 0, CNTR_NORMAL,
- access_sdma_header_request_fifo_cor_err_cnt),
-[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_header_storage_cor_err_cnt),
-[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_packet_tracking_cor_err_cnt),
-[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_assembly_cor_err_cnt),
-[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_desc_table_cor_err_cnt),
-[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
- 0, 0, CNTR_NORMAL,
- access_sdma_header_request_fifo_unc_err_cnt),
-[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_header_storage_unc_err_cnt),
-[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_packet_tracking_unc_err_cnt),
-[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_assembly_unc_err_cnt),
-[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_desc_table_unc_err_cnt),
-[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_timeout_err_cnt),
-[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_header_length_err_cnt),
-[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_header_address_err_cnt),
-[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_header_select_err_cnt),
-[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
- CNTR_NORMAL,
- access_sdma_reserved_9_err_cnt),
-[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_packet_desc_overflow_err_cnt),
-[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_length_mismatch_err_cnt),
-[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_halt_err_cnt),
-[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_mem_read_err_cnt),
-[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_first_desc_err_cnt),
-[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_tail_out_of_bounds_err_cnt),
-[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_too_long_err_cnt),
-[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_gen_mismatch_err_cnt),
-[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
- CNTR_NORMAL,
- access_sdma_wrong_dw_err_cnt),
-};
-
-static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
-[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
- CNTR_NORMAL),
-[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
- CNTR_NORMAL),
-[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
- CNTR_NORMAL),
-[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
- CNTR_NORMAL),
-[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
- CNTR_NORMAL),
-[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
- CNTR_NORMAL),
-[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
- CNTR_NORMAL),
-[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
-[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
-[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
-[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
- CNTR_SYNTH | CNTR_VL),
-[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
-[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
-[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
- access_sw_link_dn_cnt),
-[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
- access_sw_link_up_cnt),
-[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
- access_sw_unknown_frame_cnt),
-[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
- access_sw_xmit_discards),
-[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
- CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
- access_sw_xmit_discards),
-[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
- access_xmit_constraint_errs),
-[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
- access_rcv_constraint_errs),
-[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
-[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
-[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
-[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
-[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
-[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
-[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
-[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
-[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
-[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
-[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
-[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
-[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
- access_sw_cpu_rc_acks),
-[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
- access_sw_cpu_rc_qacks),
-[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
- access_sw_cpu_rc_delayed_comp),
-[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
-[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
-[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
-[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
-[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
-[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
-[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
-[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
-[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
-[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
-[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
-[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
-[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
-[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
-[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
-[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
-[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
-[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
-[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
-[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
-[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
-[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
-[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
-[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
-[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
-[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
-[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
-[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
-[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
-[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
-[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
-[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
-[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
-[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
-[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
-[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
-[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
-[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
-[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
-[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
-[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
-[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
-[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
-[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
-[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
-[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
-[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
-[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
-[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
-[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
-[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
-[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
-[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
-[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
-[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
-[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
-[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
-[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
-[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
-[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
-[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
-[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
-[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
-[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
-[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
-[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
-[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
-[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
-[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
-[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
-[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
-[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
-[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
-[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
-[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
-[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
-[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
-[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
-[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
-[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
-};
-
-/* ======================================================================== */
-
-/* return true if this is chip revision revision a */
-int is_ax(struct hfi1_devdata *dd)
-{
- u8 chip_rev_minor =
- dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
- & CCE_REVISION_CHIP_REV_MINOR_MASK;
- return (chip_rev_minor & 0xf0) == 0;
-}
-
-/* return true if this is chip revision revision b */
-int is_bx(struct hfi1_devdata *dd)
-{
- u8 chip_rev_minor =
- dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
- & CCE_REVISION_CHIP_REV_MINOR_MASK;
- return (chip_rev_minor & 0xF0) == 0x10;
-}
-
-/*
- * Append string s to buffer buf. Arguments curp and len are the current
- * position and remaining length, respectively.
- *
- * return 0 on success, 1 on out of room
- */
-static int append_str(char *buf, char **curp, int *lenp, const char *s)
-{
- char *p = *curp;
- int len = *lenp;
- int result = 0; /* success */
- char c;
-
- /* add a comma, if first in the buffer */
- if (p != buf) {
- if (len == 0) {
- result = 1; /* out of room */
- goto done;
- }
- *p++ = ',';
- len--;
- }
-
- /* copy the string */
- while ((c = *s++) != 0) {
- if (len == 0) {
- result = 1; /* out of room */
- goto done;
- }
- *p++ = c;
- len--;
- }
-
-done:
- /* write return values */
- *curp = p;
- *lenp = len;
-
- return result;
-}
-
-/*
- * Using the given flag table, print a comma separated string into
- * the buffer. End in '*' if the buffer is too short.
- */
-static char *flag_string(char *buf, int buf_len, u64 flags,
- struct flag_table *table, int table_size)
-{
- char extra[32];
- char *p = buf;
- int len = buf_len;
- int no_room = 0;
- int i;
-
- /* make sure there is at least 2 so we can form "*" */
- if (len < 2)
- return "";
-
- len--; /* leave room for a nul */
- for (i = 0; i < table_size; i++) {
- if (flags & table[i].flag) {
- no_room = append_str(buf, &p, &len, table[i].str);
- if (no_room)
- break;
- flags &= ~table[i].flag;
- }
- }
-
- /* any undocumented bits left? */
- if (!no_room && flags) {
- snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
- no_room = append_str(buf, &p, &len, extra);
- }
-
- /* add * if ran out of room */
- if (no_room) {
- /* may need to back up to add space for a '*' */
- if (len == 0)
- --p;
- *p++ = '*';
- }
-
- /* add final nul - space already allocated above */
- *p = 0;
- return buf;
-}
-
-/* first 8 CCE error interrupt source names */
-static const char * const cce_misc_names[] = {
- "CceErrInt", /* 0 */
- "RxeErrInt", /* 1 */
- "MiscErrInt", /* 2 */
- "Reserved3", /* 3 */
- "PioErrInt", /* 4 */
- "SDmaErrInt", /* 5 */
- "EgressErrInt", /* 6 */
- "TxeErrInt" /* 7 */
-};
-
-/*
- * Return the miscellaneous error interrupt name.
- */
-static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
-{
- if (source < ARRAY_SIZE(cce_misc_names))
- strncpy(buf, cce_misc_names[source], bsize);
- else
- snprintf(buf, bsize, "Reserved%u",
- source + IS_GENERAL_ERR_START);
-
- return buf;
-}
-
-/*
- * Return the SDMA engine error interrupt name.
- */
-static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
-{
- snprintf(buf, bsize, "SDmaEngErrInt%u", source);
- return buf;
-}
-
-/*
- * Return the send context error interrupt name.
- */
-static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
-{
- snprintf(buf, bsize, "SendCtxtErrInt%u", source);
- return buf;
-}
-
-static const char * const various_names[] = {
- "PbcInt",
- "GpioAssertInt",
- "Qsfp1Int",
- "Qsfp2Int",
- "TCritInt"
-};
-
-/*
- * Return the various interrupt name.
- */
-static char *is_various_name(char *buf, size_t bsize, unsigned int source)
-{
- if (source < ARRAY_SIZE(various_names))
- strncpy(buf, various_names[source], bsize);
- else
- snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
- return buf;
-}
-
-/*
- * Return the DC interrupt name.
- */
-static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
-{
- static const char * const dc_int_names[] = {
- "common",
- "lcb",
- "8051",
- "lbm" /* local block merge */
- };
-
- if (source < ARRAY_SIZE(dc_int_names))
- snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
- else
- snprintf(buf, bsize, "DCInt%u", source);
- return buf;
-}
-
-static const char * const sdma_int_names[] = {
- "SDmaInt",
- "SdmaIdleInt",
- "SdmaProgressInt",
-};
-
-/*
- * Return the SDMA engine interrupt name.
- */
-static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
-{
- /* what interrupt */
- unsigned int what = source / TXE_NUM_SDMA_ENGINES;
- /* which engine */
- unsigned int which = source % TXE_NUM_SDMA_ENGINES;
-
- if (likely(what < 3))
- snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
- else
- snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
- return buf;
-}
-
-/*
- * Return the receive available interrupt name.
- */
-static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
-{
- snprintf(buf, bsize, "RcvAvailInt%u", source);
- return buf;
-}
-
-/*
- * Return the receive urgent interrupt name.
- */
-static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
-{
- snprintf(buf, bsize, "RcvUrgentInt%u", source);
- return buf;
-}
-
-/*
- * Return the send credit interrupt name.
- */
-static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
-{
- snprintf(buf, bsize, "SendCreditInt%u", source);
- return buf;
-}
-
-/*
- * Return the reserved interrupt name.
- */
-static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
-{
- snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
- return buf;
-}
-
-static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- cce_err_status_flags,
- ARRAY_SIZE(cce_err_status_flags));
-}
-
-static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- rxe_err_status_flags,
- ARRAY_SIZE(rxe_err_status_flags));
-}
-
-static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags, misc_err_status_flags,
- ARRAY_SIZE(misc_err_status_flags));
-}
-
-static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- pio_err_status_flags,
- ARRAY_SIZE(pio_err_status_flags));
-}
-
-static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- sdma_err_status_flags,
- ARRAY_SIZE(sdma_err_status_flags));
-}
-
-static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- egress_err_status_flags,
- ARRAY_SIZE(egress_err_status_flags));
-}
-
-static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- egress_err_info_flags,
- ARRAY_SIZE(egress_err_info_flags));
-}
-
-static char *send_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- send_err_status_flags,
- ARRAY_SIZE(send_err_status_flags));
-}
-
-static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- char buf[96];
- int i = 0;
-
- /*
- * For most these errors, there is nothing that can be done except
- * report or record it.
- */
- dd_dev_info(dd, "CCE Error: %s\n",
- cce_err_status_string(buf, sizeof(buf), reg));
-
- if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
- is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
- /* this error requires a manual drop into SPC freeze mode */
- /* then a fix up */
- start_freeze_handling(dd->pport, FREEZE_SELF);
- }
-
- for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
- if (reg & (1ull << i)) {
- incr_cntr64(&dd->cce_err_status_cnt[i]);
- /* maintain a counter over all cce_err_status errors */
- incr_cntr64(&dd->sw_cce_err_status_aggregate);
- }
- }
-}
-
-/*
- * Check counters for receive errors that do not have an interrupt
- * associated with them.
- */
-#define RCVERR_CHECK_TIME 10
-static void update_rcverr_timer(unsigned long opaque)
-{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
- struct hfi1_pportdata *ppd = dd->pport;
- u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
-
- if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
- ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
- dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
- set_link_down_reason(
- ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
- OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
- queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
- }
- dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
-
- mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
-}
-
-static int init_rcverr(struct hfi1_devdata *dd)
-{
- setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
- /* Assume the hardware counter has been reset */
- dd->rcv_ovfl_cnt = 0;
- return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
-}
-
-static void free_rcverr(struct hfi1_devdata *dd)
-{
- if (dd->rcverr_timer.data)
- del_timer_sync(&dd->rcverr_timer);
- dd->rcverr_timer.data = 0;
-}
-
-static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- char buf[96];
- int i = 0;
-
- dd_dev_info(dd, "Receive Error: %s\n",
- rxe_err_status_string(buf, sizeof(buf), reg));
-
- if (reg & ALL_RXE_FREEZE_ERR) {
- int flags = 0;
-
- /*
- * Freeze mode recovery is disabled for the errors
- * in RXE_FREEZE_ABORT_MASK
- */
- if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
- flags = FREEZE_ABORT;
-
- start_freeze_handling(dd->pport, flags);
- }
-
- for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
- if (reg & (1ull << i))
- incr_cntr64(&dd->rcv_err_status_cnt[i]);
- }
-}
-
-static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- char buf[96];
- int i = 0;
-
- dd_dev_info(dd, "Misc Error: %s",
- misc_err_status_string(buf, sizeof(buf), reg));
- for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
- if (reg & (1ull << i))
- incr_cntr64(&dd->misc_err_status_cnt[i]);
- }
-}
-
-static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- char buf[96];
- int i = 0;
-
- dd_dev_info(dd, "PIO Error: %s\n",
- pio_err_status_string(buf, sizeof(buf), reg));
-
- if (reg & ALL_PIO_FREEZE_ERR)
- start_freeze_handling(dd->pport, 0);
-
- for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
- if (reg & (1ull << i))
- incr_cntr64(&dd->send_pio_err_status_cnt[i]);
- }
-}
-
-static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- char buf[96];
- int i = 0;
-
- dd_dev_info(dd, "SDMA Error: %s\n",
- sdma_err_status_string(buf, sizeof(buf), reg));
-
- if (reg & ALL_SDMA_FREEZE_ERR)
- start_freeze_handling(dd->pport, 0);
-
- for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
- if (reg & (1ull << i))
- incr_cntr64(&dd->send_dma_err_status_cnt[i]);
- }
-}
-
-static inline void __count_port_discards(struct hfi1_pportdata *ppd)
-{
- incr_cntr64(&ppd->port_xmit_discards);
-}
-
-static void count_port_inactive(struct hfi1_devdata *dd)
-{
- __count_port_discards(dd->pport);
-}
-
-/*
- * We have had a "disallowed packet" error during egress. Determine the
- * integrity check which failed, and update relevant error counter, etc.
- *
- * Note that the SEND_EGRESS_ERR_INFO register has only a single
- * bit of state per integrity check, and so we can miss the reason for an
- * egress error if more than one packet fails the same integrity check
- * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
- */
-static void handle_send_egress_err_info(struct hfi1_devdata *dd,
- int vl)
-{
- struct hfi1_pportdata *ppd = dd->pport;
- u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
- u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
- char buf[96];
-
- /* clear down all observed info as quickly as possible after read */
- write_csr(dd, SEND_EGRESS_ERR_INFO, info);
-
- dd_dev_info(dd,
- "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
- info, egress_err_info_string(buf, sizeof(buf), info), src);
-
- /* Eventually add other counters for each bit */
- if (info & PORT_DISCARD_EGRESS_ERRS) {
- int weight, i;
-
- /*
- * Count all applicable bits as individual errors and
- * attribute them to the packet that triggered this handler.
- * This may not be completely accurate due to limitations
- * on the available hardware error information. There is
- * a single information register and any number of error
- * packets may have occurred and contributed to it before
- * this routine is called. This means that:
- * a) If multiple packets with the same error occur before
- * this routine is called, earlier packets are missed.
- * There is only a single bit for each error type.
- * b) Errors may not be attributed to the correct VL.
- * The driver is attributing all bits in the info register
- * to the packet that triggered this call, but bits
- * could be an accumulation of different packets with
- * different VLs.
- * c) A single error packet may have multiple counts attached
- * to it. There is no way for the driver to know if
- * multiple bits set in the info register are due to a
- * single packet or multiple packets. The driver assumes
- * multiple packets.
- */
- weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
- for (i = 0; i < weight; i++) {
- __count_port_discards(ppd);
- if (vl >= 0 && vl < TXE_NUM_DATA_VL)
- incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
- else if (vl == 15)
- incr_cntr64(&ppd->port_xmit_discards_vl
- [C_VL_15]);
- }
- }
-}
-
-/*
- * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
- * register. Does it represent a 'port inactive' error?
- */
-static inline int port_inactive_err(u64 posn)
-{
- return (posn >= SEES(TX_LINKDOWN) &&
- posn <= SEES(TX_INCORRECT_LINK_STATE));
-}
-
-/*
- * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
- * register. Does it represent a 'disallowed packet' error?
- */
-static inline int disallowed_pkt_err(int posn)
-{
- return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
- posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
-}
-
-/*
- * Input value is a bit position of one of the SDMA engine disallowed
- * packet errors. Return which engine. Use of this must be guarded by
- * disallowed_pkt_err().
- */
-static inline int disallowed_pkt_engine(int posn)
-{
- return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
-}
-
-/*
- * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
- * be done.
- */
-static int engine_to_vl(struct hfi1_devdata *dd, int engine)
-{
- struct sdma_vl_map *m;
- int vl;
-
- /* range check */
- if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
- return -1;
-
- rcu_read_lock();
- m = rcu_dereference(dd->sdma_map);
- vl = m->engine_to_vl[engine];
- rcu_read_unlock();
-
- return vl;
-}
-
-/*
- * Translate the send context (sofware index) into a VL. Return -1 if the
- * translation cannot be done.
- */
-static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
-{
- struct send_context_info *sci;
- struct send_context *sc;
- int i;
-
- sci = &dd->send_contexts[sw_index];
-
- /* there is no information for user (PSM) and ack contexts */
- if (sci->type != SC_KERNEL)
- return -1;
-
- sc = sci->sc;
- if (!sc)
- return -1;
- if (dd->vld[15].sc == sc)
- return 15;
- for (i = 0; i < num_vls; i++)
- if (dd->vld[i].sc == sc)
- return i;
-
- return -1;
-}
-
-static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- u64 reg_copy = reg, handled = 0;
- char buf[96];
- int i = 0;
-
- if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
- start_freeze_handling(dd->pport, 0);
- else if (is_ax(dd) &&
- (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
- (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
- start_freeze_handling(dd->pport, 0);
-
- while (reg_copy) {
- int posn = fls64(reg_copy);
- /* fls64() returns a 1-based offset, we want it zero based */
- int shift = posn - 1;
- u64 mask = 1ULL << shift;
-
- if (port_inactive_err(shift)) {
- count_port_inactive(dd);
- handled |= mask;
- } else if (disallowed_pkt_err(shift)) {
- int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
-
- handle_send_egress_err_info(dd, vl);
- handled |= mask;
- }
- reg_copy &= ~mask;
- }
-
- reg &= ~handled;
-
- if (reg)
- dd_dev_info(dd, "Egress Error: %s\n",
- egress_err_status_string(buf, sizeof(buf), reg));
-
- for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
- if (reg & (1ull << i))
- incr_cntr64(&dd->send_egress_err_status_cnt[i]);
- }
-}
-
-static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- char buf[96];
- int i = 0;
-
- dd_dev_info(dd, "Send Error: %s\n",
- send_err_status_string(buf, sizeof(buf), reg));
-
- for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
- if (reg & (1ull << i))
- incr_cntr64(&dd->send_err_status_cnt[i]);
- }
-}
-
-/*
- * The maximum number of times the error clear down will loop before
- * blocking a repeating error. This value is arbitrary.
- */
-#define MAX_CLEAR_COUNT 20
-
-/*
- * Clear and handle an error register. All error interrupts are funneled
- * through here to have a central location to correctly handle single-
- * or multi-shot errors.
- *
- * For non per-context registers, call this routine with a context value
- * of 0 so the per-context offset is zero.
- *
- * If the handler loops too many times, assume that something is wrong
- * and can't be fixed, so mask the error bits.
- */
-static void interrupt_clear_down(struct hfi1_devdata *dd,
- u32 context,
- const struct err_reg_info *eri)
-{
- u64 reg;
- u32 count;
-
- /* read in a loop until no more errors are seen */
- count = 0;
- while (1) {
- reg = read_kctxt_csr(dd, context, eri->status);
- if (reg == 0)
- break;
- write_kctxt_csr(dd, context, eri->clear, reg);
- if (likely(eri->handler))
- eri->handler(dd, context, reg);
- count++;
- if (count > MAX_CLEAR_COUNT) {
- u64 mask;
-
- dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
- eri->desc, reg);
- /*
- * Read-modify-write so any other masked bits
- * remain masked.
- */
- mask = read_kctxt_csr(dd, context, eri->mask);
- mask &= ~reg;
- write_kctxt_csr(dd, context, eri->mask, mask);
- break;
- }
- }
-}
-
-/*
- * CCE block "misc" interrupt. Source is < 16.
- */
-static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
-{
- const struct err_reg_info *eri = &misc_errs[source];
-
- if (eri->handler) {
- interrupt_clear_down(dd, 0, eri);
- } else {
- dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
- source);
- }
-}
-
-static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags,
- sc_err_status_flags,
- ARRAY_SIZE(sc_err_status_flags));
-}
-
-/*
- * Send context error interrupt. Source (hw_context) is < 160.
- *
- * All send context errors cause the send context to halt. The normal
- * clear-down mechanism cannot be used because we cannot clear the
- * error bits until several other long-running items are done first.
- * This is OK because with the context halted, nothing else is going
- * to happen on it anyway.
- */
-static void is_sendctxt_err_int(struct hfi1_devdata *dd,
- unsigned int hw_context)
-{
- struct send_context_info *sci;
- struct send_context *sc;
- char flags[96];
- u64 status;
- u32 sw_index;
- int i = 0;
-
- sw_index = dd->hw_to_sw[hw_context];
- if (sw_index >= dd->num_send_contexts) {
- dd_dev_err(dd,
- "out of range sw index %u for send context %u\n",
- sw_index, hw_context);
- return;
- }
- sci = &dd->send_contexts[sw_index];
- sc = sci->sc;
- if (!sc) {
- dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
- sw_index, hw_context);
- return;
- }
-
- /* tell the software that a halt has begun */
- sc_stop(sc, SCF_HALTED);
-
- status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
-
- dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
- send_context_err_status_string(flags, sizeof(flags),
- status));
-
- if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
- handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
-
- /*
- * Automatically restart halted kernel contexts out of interrupt
- * context. User contexts must ask the driver to restart the context.
- */
- if (sc->type != SC_USER)
- queue_work(dd->pport->hfi1_wq, &sc->halt_work);
-
- /*
- * Update the counters for the corresponding status bits.
- * Note that these particular counters are aggregated over all
- * 160 contexts.
- */
- for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
- if (status & (1ull << i))
- incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
- }
-}
-
-static void handle_sdma_eng_err(struct hfi1_devdata *dd,
- unsigned int source, u64 status)
-{
- struct sdma_engine *sde;
- int i = 0;
-
- sde = &dd->per_sdma[source];
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
- slashstrip(__FILE__), __LINE__, __func__);
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
- sde->this_idx, source, (unsigned long long)status);
-#endif
- sde->err_cnt++;
- sdma_engine_error(sde, status);
-
- /*
- * Update the counters for the corresponding status bits.
- * Note that these particular counters are aggregated over
- * all 16 DMA engines.
- */
- for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
- if (status & (1ull << i))
- incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
- }
-}
-
-/*
- * CCE block SDMA error interrupt. Source is < 16.
- */
-static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
-{
-#ifdef CONFIG_SDMA_VERBOSITY
- struct sdma_engine *sde = &dd->per_sdma[source];
-
- dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
- slashstrip(__FILE__), __LINE__, __func__);
- dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
- source);
- sdma_dumpstate(sde);
-#endif
- interrupt_clear_down(dd, source, &sdma_eng_err);
-}
-
-/*
- * CCE block "various" interrupt. Source is < 8.
- */
-static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
-{
- const struct err_reg_info *eri = &various_err[source];
-
- /*
- * TCritInt cannot go through interrupt_clear_down()
- * because it is not a second tier interrupt. The handler
- * should be called directly.
- */
- if (source == TCRIT_INT_SOURCE)
- handle_temp_err(dd);
- else if (eri->handler)
- interrupt_clear_down(dd, 0, eri);
- else
- dd_dev_info(dd,
- "%s: Unimplemented/reserved interrupt %d\n",
- __func__, source);
-}
-
-static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
-{
- /* src_ctx is always zero */
- struct hfi1_pportdata *ppd = dd->pport;
- unsigned long flags;
- u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
-
- if (reg & QSFP_HFI0_MODPRST_N) {
- if (!qsfp_mod_present(ppd)) {
- dd_dev_info(dd, "%s: QSFP module removed\n",
- __func__);
-
- ppd->driver_link_ready = 0;
- /*
- * Cable removed, reset all our information about the
- * cache and cable capabilities
- */
-
- spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
- /*
- * We don't set cache_refresh_required here as we expect
- * an interrupt when a cable is inserted
- */
- ppd->qsfp_info.cache_valid = 0;
- ppd->qsfp_info.reset_needed = 0;
- ppd->qsfp_info.limiting_active = 0;
- spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
- flags);
- /* Invert the ModPresent pin now to detect plug-in */
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
- ASIC_QSFP1_INVERT, qsfp_int_mgmt);
-
- if ((ppd->offline_disabled_reason >
- HFI1_ODR_MASK(
- OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
- (ppd->offline_disabled_reason ==
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(
- OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
-
- if (ppd->host_link_state == HLS_DN_POLL) {
- /*
- * The link is still in POLL. This means
- * that the normal link down processing
- * will not happen. We have to do it here
- * before turning the DC off.
- */
- queue_work(ppd->hfi1_wq, &ppd->link_down_work);
- }
- } else {
- dd_dev_info(dd, "%s: QSFP module inserted\n",
- __func__);
-
- spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
- ppd->qsfp_info.cache_valid = 0;
- ppd->qsfp_info.cache_refresh_required = 1;
- spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
- flags);
-
- /*
- * Stop inversion of ModPresent pin to detect
- * removal of the cable
- */
- qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
- ASIC_QSFP1_INVERT, qsfp_int_mgmt);
-
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
- }
- }
-
- if (reg & QSFP_HFI0_INT_N) {
- dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
- __func__);
- spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
- ppd->qsfp_info.check_interrupt_flags = 1;
- spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
- }
-
- /* Schedule the QSFP work only if there is a cable attached. */
- if (qsfp_mod_present(ppd))
- queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
-}
-
-static int request_host_lcb_access(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = do_8051_command(dd, HCMD_MISC,
- (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
- LOAD_DATA_FIELD_ID_SHIFT, NULL);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd, "%s: command failed with error %d\n",
- __func__, ret);
- }
- return ret == HCMD_SUCCESS ? 0 : -EBUSY;
-}
-
-static int request_8051_lcb_access(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = do_8051_command(dd, HCMD_MISC,
- (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
- LOAD_DATA_FIELD_ID_SHIFT, NULL);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd, "%s: command failed with error %d\n",
- __func__, ret);
- }
- return ret == HCMD_SUCCESS ? 0 : -EBUSY;
-}
-
-/*
- * Set the LCB selector - allow host access. The DCC selector always
- * points to the host.
- */
-static inline void set_host_lcb_access(struct hfi1_devdata *dd)
-{
- write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
- DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
- DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
-}
-
-/*
- * Clear the LCB selector - allow 8051 access. The DCC selector always
- * points to the host.
- */
-static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
-{
- write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
- DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
-}
-
-/*
- * Acquire LCB access from the 8051. If the host already has access,
- * just increment a counter. Otherwise, inform the 8051 that the
- * host is taking access.
- *
- * Returns:
- * 0 on success
- * -EBUSY if the 8051 has control and cannot be disturbed
- * -errno if unable to acquire access from the 8051
- */
-int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
-{
- struct hfi1_pportdata *ppd = dd->pport;
- int ret = 0;
-
- /*
- * Use the host link state lock so the operation of this routine
- * { link state check, selector change, count increment } can occur
- * as a unit against a link state change. Otherwise there is a
- * race between the state change and the count increment.
- */
- if (sleep_ok) {
- mutex_lock(&ppd->hls_lock);
- } else {
- while (!mutex_trylock(&ppd->hls_lock))
- udelay(1);
- }
-
- /* this access is valid only when the link is up */
- if ((ppd->host_link_state & HLS_UP) == 0) {
- dd_dev_info(dd, "%s: link state %s not up\n",
- __func__, link_state_name(ppd->host_link_state));
- ret = -EBUSY;
- goto done;
- }
-
- if (dd->lcb_access_count == 0) {
- ret = request_host_lcb_access(dd);
- if (ret) {
- dd_dev_err(dd,
- "%s: unable to acquire LCB access, err %d\n",
- __func__, ret);
- goto done;
- }
- set_host_lcb_access(dd);
- }
- dd->lcb_access_count++;
-done:
- mutex_unlock(&ppd->hls_lock);
- return ret;
-}
-
-/*
- * Release LCB access by decrementing the use count. If the count is moving
- * from 1 to 0, inform 8051 that it has control back.
- *
- * Returns:
- * 0 on success
- * -errno if unable to release access to the 8051
- */
-int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
-{
- int ret = 0;
-
- /*
- * Use the host link state lock because the acquire needed it.
- * Here, we only need to keep { selector change, count decrement }
- * as a unit.
- */
- if (sleep_ok) {
- mutex_lock(&dd->pport->hls_lock);
- } else {
- while (!mutex_trylock(&dd->pport->hls_lock))
- udelay(1);
- }
-
- if (dd->lcb_access_count == 0) {
- dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
- __func__);
- goto done;
- }
-
- if (dd->lcb_access_count == 1) {
- set_8051_lcb_access(dd);
- ret = request_8051_lcb_access(dd);
- if (ret) {
- dd_dev_err(dd,
- "%s: unable to release LCB access, err %d\n",
- __func__, ret);
- /* restore host access if the grant didn't work */
- set_host_lcb_access(dd);
- goto done;
- }
- }
- dd->lcb_access_count--;
-done:
- mutex_unlock(&dd->pport->hls_lock);
- return ret;
-}
-
-/*
- * Initialize LCB access variables and state. Called during driver load,
- * after most of the initialization is finished.
- *
- * The DC default is LCB access on for the host. The driver defaults to
- * leaving access to the 8051. Assign access now - this constrains the call
- * to this routine to be after all LCB set-up is done. In particular, after
- * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
- */
-static void init_lcb_access(struct hfi1_devdata *dd)
-{
- dd->lcb_access_count = 0;
-}
-
-/*
- * Write a response back to a 8051 request.
- */
-static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
-{
- write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
- DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
- (u64)return_code <<
- DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
- (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
-}
-
-/*
- * Handle host requests from the 8051.
- *
- * This is a work-queue function outside of the interrupt.
- */
-void handle_8051_request(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- dc_host_req_work);
- struct hfi1_devdata *dd = ppd->dd;
- u64 reg;
- u16 data = 0;
- u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
- u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
-
- reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
- if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
- return; /* no request */
-
- /* zero out COMPLETED so the response is seen */
- write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
-
- /* extract request details */
- type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
- & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
- data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
- & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
-
- switch (type) {
- case HREQ_LOAD_CONFIG:
- case HREQ_SAVE_CONFIG:
- case HREQ_READ_CONFIG:
- case HREQ_SET_TX_EQ_ABS:
- case HREQ_SET_TX_EQ_REL:
- dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
- type);
- hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
- break;
-
- case HREQ_ENABLE:
- lanes = data & 0xF;
- for (i = 0; lanes; lanes >>= 1, i++) {
- if (!(lanes & 1))
- continue;
- if (data & 0x200) {
- /* enable TX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x80)
- cdr_ctrl_byte |= (1 << (i + 4));
- } else {
- /* disable TX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x80)
- cdr_ctrl_byte &= ~(1 << (i + 4));
- }
-
- if (data & 0x800) {
- /* enable RX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x40)
- cdr_ctrl_byte |= (1 << i);
- } else {
- /* disable RX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x40)
- cdr_ctrl_byte &= ~(1 << i);
- }
- }
- one_qsfp_write(ppd, dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
- &cdr_ctrl_byte, 1);
- hreq_response(dd, HREQ_SUCCESS, data);
- refresh_qsfp_cache(ppd, &ppd->qsfp_info);
- break;
-
- case HREQ_CONFIG_DONE:
- hreq_response(dd, HREQ_SUCCESS, 0);
- break;
-
- case HREQ_INTERFACE_TEST:
- hreq_response(dd, HREQ_SUCCESS, data);
- break;
-
- default:
- dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
- hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
- break;
- }
-}
-
-static void write_global_credit(struct hfi1_devdata *dd,
- u8 vau, u16 total, u16 shared)
-{
- write_csr(dd, SEND_CM_GLOBAL_CREDIT,
- ((u64)total <<
- SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
- ((u64)shared <<
- SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
- ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
-}
-
-/*
- * Set up initial VL15 credits of the remote. Assumes the rest of
- * the CM credit registers are zero from a previous global or credit reset .
- */
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
-{
- /* leave shared count at zero for both global and VL15 */
- write_global_credit(dd, vau, vl15buf, 0);
-
- /* We may need some credits for another VL when sending packets
- * with the snoop interface. Dividing it down the middle for VL15
- * and VL0 should suffice.
- */
- if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
- << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
- } else {
- write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
- << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
- }
-}
-
-/*
- * Zero all credit details from the previous connection and
- * reset the CM manager's internal counters.
- */
-void reset_link_credits(struct hfi1_devdata *dd)
-{
- int i;
-
- /* remove all previous VL credit limits */
- for (i = 0; i < TXE_NUM_DATA_VL; i++)
- write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
- write_csr(dd, SEND_CM_CREDIT_VL15, 0);
- write_global_credit(dd, 0, 0, 0);
- /* reset the CM block */
- pio_send_control(dd, PSC_CM_RESET);
-}
-
-/* convert a vCU to a CU */
-static u32 vcu_to_cu(u8 vcu)
-{
- return 1 << vcu;
-}
-
-/* convert a CU to a vCU */
-static u8 cu_to_vcu(u32 cu)
-{
- return ilog2(cu);
-}
-
-/* convert a vAU to an AU */
-static u32 vau_to_au(u8 vau)
-{
- return 8 * (1 << vau);
-}
-
-static void set_linkup_defaults(struct hfi1_pportdata *ppd)
-{
- ppd->sm_trap_qp = 0x0;
- ppd->sa_qp = 0x1;
-}
-
-/*
- * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
- */
-static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
-{
- u64 reg;
-
- /* clear lcb run: LCB_CFG_RUN.EN = 0 */
- write_csr(dd, DC_LCB_CFG_RUN, 0);
- /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
- write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
- 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
- /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
- dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
- reg = read_csr(dd, DCC_CFG_RESET);
- write_csr(dd, DCC_CFG_RESET, reg |
- (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
- (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
- (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
- if (!abort) {
- udelay(1); /* must hold for the longer of 16cclks or 20ns */
- write_csr(dd, DCC_CFG_RESET, reg);
- write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
- }
-}
-
-/*
- * This routine should be called after the link has been transitioned to
- * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
- * reset).
- *
- * The expectation is that the caller of this routine would have taken
- * care of properly transitioning the link into the correct state.
- */
-static void dc_shutdown(struct hfi1_devdata *dd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->dc8051_lock, flags);
- if (dd->dc_shutdown) {
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
- return;
- }
- dd->dc_shutdown = 1;
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
- /* Shutdown the LCB */
- lcb_shutdown(dd, 1);
- /*
- * Going to OFFLINE would have causes the 8051 to put the
- * SerDes into reset already. Just need to shut down the 8051,
- * itself.
- */
- write_csr(dd, DC_DC8051_CFG_RST, 0x1);
-}
-
-/*
- * Calling this after the DC has been brought out of reset should not
- * do any damage.
- */
-static void dc_start(struct hfi1_devdata *dd)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dd->dc8051_lock, flags);
- if (!dd->dc_shutdown)
- goto done;
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
- /* Take the 8051 out of reset */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
- /* Wait until 8051 is ready */
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) {
- dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
- __func__);
- }
- /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
- write_csr(dd, DCC_CFG_RESET, 0x10);
- /* lcb_shutdown() with abort=1 does not restore these */
- write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
- spin_lock_irqsave(&dd->dc8051_lock, flags);
- dd->dc_shutdown = 0;
-done:
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
-}
-
-/*
- * These LCB adjustments are for the Aurora SerDes core in the FPGA.
- */
-static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
-{
- u64 rx_radr, tx_radr;
- u32 version;
-
- if (dd->icode != ICODE_FPGA_EMULATION)
- return;
-
- /*
- * These LCB defaults on emulator _s are good, nothing to do here:
- * LCB_CFG_TX_FIFOS_RADR
- * LCB_CFG_RX_FIFOS_RADR
- * LCB_CFG_LN_DCLK
- * LCB_CFG_IGNORE_LOST_RCLK
- */
- if (is_emulator_s(dd))
- return;
- /* else this is _p */
-
- version = emulator_rev(dd);
- if (!is_ax(dd))
- version = 0x2d; /* all B0 use 0x2d or higher settings */
-
- if (version <= 0x12) {
- /* release 0x12 and below */
-
- /*
- * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
- * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
- * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
- */
- rx_radr =
- 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
- | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
- | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
- /*
- * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
- * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
- */
- tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
- } else if (version <= 0x18) {
- /* release 0x13 up to 0x18 */
- /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
- rx_radr =
- 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
- | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
- | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
- tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
- } else if (version == 0x19) {
- /* release 0x19 */
- /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
- rx_radr =
- 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
- | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
- | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
- tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
- } else if (version == 0x1a) {
- /* release 0x1a */
- /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
- rx_radr =
- 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
- | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
- | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
- tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
- write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
- } else {
- /* release 0x1b and higher */
- /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
- rx_radr =
- 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
- | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
- | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
- tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
- }
-
- write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
- /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
- write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
- DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
- write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
-}
-
-/*
- * Handle a SMA idle message
- *
- * This is a work-queue function outside of the interrupt.
- */
-void handle_sma_message(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- sma_message_work);
- struct hfi1_devdata *dd = ppd->dd;
- u64 msg;
- int ret;
-
- /*
- * msg is bytes 1-4 of the 40-bit idle message - the command code
- * is stripped off
- */
- ret = read_idle_sma(dd, &msg);
- if (ret)
- return;
- dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
- /*
- * React to the SMA message. Byte[1] (0 for us) is the command.
- */
- switch (msg & 0xff) {
- case SMA_IDLE_ARM:
- /*
- * See OPAv1 table 9-14 - HFI and External Switch Ports Key
- * State Transitions
- *
- * Only expected in INIT or ARMED, discard otherwise.
- */
- if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
- ppd->neighbor_normal = 1;
- break;
- case SMA_IDLE_ACTIVE:
- /*
- * See OPAv1 table 9-14 - HFI and External Switch Ports Key
- * State Transitions
- *
- * Can activate the node. Discard otherwise.
- */
- if (ppd->host_link_state == HLS_UP_ARMED &&
- ppd->is_active_optimize_enabled) {
- ppd->neighbor_normal = 1;
- ret = set_link_state(ppd, HLS_UP_ACTIVE);
- if (ret)
- dd_dev_err(
- dd,
- "%s: received Active SMA idle message, couldn't set link to Active\n",
- __func__);
- }
- break;
- default:
- dd_dev_err(dd,
- "%s: received unexpected SMA idle message 0x%llx\n",
- __func__, msg);
- break;
- }
-}
-
-static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
-{
- u64 rcvctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->rcvctrl_lock, flags);
- rcvctrl = read_csr(dd, RCV_CTRL);
- rcvctrl |= add;
- rcvctrl &= ~clear;
- write_csr(dd, RCV_CTRL, rcvctrl);
- spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
-}
-
-static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
-{
- adjust_rcvctrl(dd, add, 0);
-}
-
-static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
-{
- adjust_rcvctrl(dd, 0, clear);
-}
-
-/*
- * Called from all interrupt handlers to start handling an SPC freeze.
- */
-void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
-{
- struct hfi1_devdata *dd = ppd->dd;
- struct send_context *sc;
- int i;
-
- if (flags & FREEZE_SELF)
- write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
-
- /* enter frozen mode */
- dd->flags |= HFI1_FROZEN;
-
- /* notify all SDMA engines that they are going into a freeze */
- sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
-
- /* do halt pre-handling on all enabled send contexts */
- for (i = 0; i < dd->num_send_contexts; i++) {
- sc = dd->send_contexts[i].sc;
- if (sc && (sc->flags & SCF_ENABLED))
- sc_stop(sc, SCF_FROZEN | SCF_HALTED);
- }
-
- /* Send context are frozen. Notify user space */
- hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
-
- if (flags & FREEZE_ABORT) {
- dd_dev_err(dd,
- "Aborted freeze recovery. Please REBOOT system\n");
- return;
- }
- /* queue non-interrupt handler */
- queue_work(ppd->hfi1_wq, &ppd->freeze_work);
-}
-
-/*
- * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
- * depending on the "freeze" parameter.
- *
- * No need to return an error if it times out, our only option
- * is to proceed anyway.
- */
-static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
-{
- unsigned long timeout;
- u64 reg;
-
- timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
- while (1) {
- reg = read_csr(dd, CCE_STATUS);
- if (freeze) {
- /* waiting until all indicators are set */
- if ((reg & ALL_FROZE) == ALL_FROZE)
- return; /* all done */
- } else {
- /* waiting until all indicators are clear */
- if ((reg & ALL_FROZE) == 0)
- return; /* all done */
- }
-
- if (time_after(jiffies, timeout)) {
- dd_dev_err(dd,
- "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
- freeze ? "" : "un", reg & ALL_FROZE,
- freeze ? ALL_FROZE : 0ull);
- return;
- }
- usleep_range(80, 120);
- }
-}
-
-/*
- * Do all freeze handling for the RXE block.
- */
-static void rxe_freeze(struct hfi1_devdata *dd)
-{
- int i;
-
- /* disable port */
- clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
-
- /* disable all receive contexts */
- for (i = 0; i < dd->num_rcv_contexts; i++)
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
-}
-
-/*
- * Unfreeze handling for the RXE block - kernel contexts only.
- * This will also enable the port. User contexts will do unfreeze
- * handling on a per-context basis as they call into the driver.
- *
- */
-static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
-{
- u32 rcvmask;
- int i;
-
- /* enable all kernel contexts */
- for (i = 0; i < dd->n_krcv_queues; i++) {
- rcvmask = HFI1_RCVCTRL_CTXT_ENB;
- /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
- rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
- HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
- hfi1_rcvctrl(dd, rcvmask, i);
- }
-
- /* enable port */
- add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
-}
-
-/*
- * Non-interrupt SPC freeze handling.
- *
- * This is a work-queue function outside of the triggering interrupt.
- */
-void handle_freeze(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- freeze_work);
- struct hfi1_devdata *dd = ppd->dd;
-
- /* wait for freeze indicators on all affected blocks */
- wait_for_freeze_status(dd, 1);
-
- /* SPC is now frozen */
-
- /* do send PIO freeze steps */
- pio_freeze(dd);
-
- /* do send DMA freeze steps */
- sdma_freeze(dd);
-
- /* do send egress freeze steps - nothing to do */
-
- /* do receive freeze steps */
- rxe_freeze(dd);
-
- /*
- * Unfreeze the hardware - clear the freeze, wait for each
- * block's frozen bit to clear, then clear the frozen flag.
- */
- write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
- wait_for_freeze_status(dd, 0);
-
- if (is_ax(dd)) {
- write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
- wait_for_freeze_status(dd, 1);
- write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
- wait_for_freeze_status(dd, 0);
- }
-
- /* do send PIO unfreeze steps for kernel contexts */
- pio_kernel_unfreeze(dd);
-
- /* do send DMA unfreeze steps */
- sdma_unfreeze(dd);
-
- /* do send egress unfreeze steps - nothing to do */
-
- /* do receive unfreeze steps for kernel contexts */
- rxe_kernel_unfreeze(dd);
-
- /*
- * The unfreeze procedure touches global device registers when
- * it disables and re-enables RXE. Mark the device unfrozen
- * after all that is done so other parts of the driver waiting
- * for the device to unfreeze don't do things out of order.
- *
- * The above implies that the meaning of HFI1_FROZEN flag is
- * "Device has gone into freeze mode and freeze mode handling
- * is still in progress."
- *
- * The flag will be removed when freeze mode processing has
- * completed.
- */
- dd->flags &= ~HFI1_FROZEN;
- wake_up(&dd->event_queue);
-
- /* no longer frozen */
-}
-
-/*
- * Handle a link up interrupt from the 8051.
- *
- * This is a work-queue function outside of the interrupt.
- */
-void handle_link_up(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- link_up_work);
- set_link_state(ppd, HLS_UP_INIT);
-
- /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
- read_ltp_rtt(ppd->dd);
- /*
- * OPA specifies that certain counters are cleared on a transition
- * to link up, so do that.
- */
- clear_linkup_counters(ppd->dd);
- /*
- * And (re)set link up default values.
- */
- set_linkup_defaults(ppd);
-
- /* enforce link speed enabled */
- if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
- /* oops - current speed is not enabled, bounce */
- dd_dev_err(ppd->dd,
- "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
- ppd->link_speed_active, ppd->link_speed_enabled);
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
- OPA_LINKDOWN_REASON_SPEED_POLICY);
- set_link_state(ppd, HLS_DN_OFFLINE);
- tune_serdes(ppd);
- start_link(ppd);
- }
-}
-
-/*
- * Several pieces of LNI information were cached for SMA in ppd.
- * Reset these on link down
- */
-static void reset_neighbor_info(struct hfi1_pportdata *ppd)
-{
- ppd->neighbor_guid = 0;
- ppd->neighbor_port_number = 0;
- ppd->neighbor_type = 0;
- ppd->neighbor_fm_security = 0;
-}
-
-/*
- * Handle a link down interrupt from the 8051.
- *
- * This is a work-queue function outside of the interrupt.
- */
-void handle_link_down(struct work_struct *work)
-{
- u8 lcl_reason, neigh_reason = 0;
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- link_down_work);
-
- if ((ppd->host_link_state &
- (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
- ppd->port_type == PORT_TYPE_FIXED)
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
-
- /* Go offline first, then deal with reading/writing through 8051 */
- set_link_state(ppd, HLS_DN_OFFLINE);
-
- lcl_reason = 0;
- read_planned_down_reason_code(ppd->dd, &neigh_reason);
-
- /*
- * If no reason, assume peer-initiated but missed
- * LinkGoingDown idle flits.
- */
- if (neigh_reason == 0)
- lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
-
- set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
-
- reset_neighbor_info(ppd);
-
- /* disable the port */
- clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
-
- /*
- * If there is no cable attached, turn the DC off. Otherwise,
- * start the link bring up.
- */
- if (!qsfp_mod_present(ppd)) {
- dc_shutdown(ppd->dd);
- } else {
- tune_serdes(ppd);
- start_link(ppd);
- }
-}
-
-void handle_link_bounce(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- link_bounce_work);
-
- /*
- * Only do something if the link is currently up.
- */
- if (ppd->host_link_state & HLS_UP) {
- set_link_state(ppd, HLS_DN_OFFLINE);
- tune_serdes(ppd);
- start_link(ppd);
- } else {
- dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
- __func__, link_state_name(ppd->host_link_state));
- }
-}
-
-/*
- * Mask conversion: Capability exchange to Port LTP. The capability
- * exchange has an implicit 16b CRC that is mandatory.
- */
-static int cap_to_port_ltp(int cap)
-{
- int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
-
- if (cap & CAP_CRC_14B)
- port_ltp |= PORT_LTP_CRC_MODE_14;
- if (cap & CAP_CRC_48B)
- port_ltp |= PORT_LTP_CRC_MODE_48;
- if (cap & CAP_CRC_12B_16B_PER_LANE)
- port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
-
- return port_ltp;
-}
-
-/*
- * Convert an OPA Port LTP mask to capability mask
- */
-int port_ltp_to_cap(int port_ltp)
-{
- int cap_mask = 0;
-
- if (port_ltp & PORT_LTP_CRC_MODE_14)
- cap_mask |= CAP_CRC_14B;
- if (port_ltp & PORT_LTP_CRC_MODE_48)
- cap_mask |= CAP_CRC_48B;
- if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
- cap_mask |= CAP_CRC_12B_16B_PER_LANE;
-
- return cap_mask;
-}
-
-/*
- * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
- */
-static int lcb_to_port_ltp(int lcb_crc)
-{
- int port_ltp = 0;
-
- if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
- port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
- else if (lcb_crc == LCB_CRC_48B)
- port_ltp = PORT_LTP_CRC_MODE_48;
- else if (lcb_crc == LCB_CRC_14B)
- port_ltp = PORT_LTP_CRC_MODE_14;
- else
- port_ltp = PORT_LTP_CRC_MODE_16;
-
- return port_ltp;
-}
-
-/*
- * Our neighbor has indicated that we are allowed to act as a fabric
- * manager, so place the full management partition key in the second
- * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
- * that we should already have the limited management partition key in
- * array element 1, and also that the port is not yet up when
- * add_full_mgmt_pkey() is invoked.
- */
-static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
- if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
- dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
- __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
- ppd->pkeys[2] = FULL_MGMT_P_KEY;
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-}
-
-/*
- * Convert the given link width to the OPA link width bitmask.
- */
-static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
-{
- switch (width) {
- case 0:
- /*
- * Simulator and quick linkup do not set the width.
- * Just set it to 4x without complaint.
- */
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
- return OPA_LINK_WIDTH_4X;
- return 0; /* no lanes up */
- case 1: return OPA_LINK_WIDTH_1X;
- case 2: return OPA_LINK_WIDTH_2X;
- case 3: return OPA_LINK_WIDTH_3X;
- default:
- dd_dev_info(dd, "%s: invalid width %d, using 4\n",
- __func__, width);
- /* fall through */
- case 4: return OPA_LINK_WIDTH_4X;
- }
-}
-
-/*
- * Do a population count on the bottom nibble.
- */
-static const u8 bit_counts[16] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
-};
-
-static inline u8 nibble_to_count(u8 nibble)
-{
- return bit_counts[nibble & 0xf];
-}
-
-/*
- * Read the active lane information from the 8051 registers and return
- * their widths.
- *
- * Active lane information is found in these 8051 registers:
- * enable_lane_tx
- * enable_lane_rx
- */
-static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
- u16 *rx_width)
-{
- u16 tx, rx;
- u8 enable_lane_rx;
- u8 enable_lane_tx;
- u8 tx_polarity_inversion;
- u8 rx_polarity_inversion;
- u8 max_rate;
-
- /* read the active lanes */
- read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
- &rx_polarity_inversion, &max_rate);
- read_local_lni(dd, &enable_lane_rx);
-
- /* convert to counts */
- tx = nibble_to_count(enable_lane_tx);
- rx = nibble_to_count(enable_lane_rx);
-
- /*
- * Set link_speed_active here, overriding what was set in
- * handle_verify_cap(). The ASIC 8051 firmware does not correctly
- * set the max_rate field in handle_verify_cap until v0.19.
- */
- if ((dd->icode == ICODE_RTL_SILICON) &&
- (dd->dc8051_ver < dc8051_ver(0, 19))) {
- /* max_rate: 0 = 12.5G, 1 = 25G */
- switch (max_rate) {
- case 0:
- dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
- break;
- default:
- dd_dev_err(dd,
- "%s: unexpected max rate %d, using 25Gb\n",
- __func__, (int)max_rate);
- /* fall through */
- case 1:
- dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
- break;
- }
- }
-
- dd_dev_info(dd,
- "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
- enable_lane_tx, tx, enable_lane_rx, rx);
- *tx_width = link_width_to_bits(dd, tx);
- *rx_width = link_width_to_bits(dd, rx);
-}
-
-/*
- * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
- * Valid after the end of VerifyCap and during LinkUp. Does not change
- * after link up. I.e. look elsewhere for downgrade information.
- *
- * Bits are:
- * + bits [7:4] contain the number of active transmitters
- * + bits [3:0] contain the number of active receivers
- * These are numbers 1 through 4 and can be different values if the
- * link is asymmetric.
- *
- * verify_cap_local_fm_link_width[0] retains its original value.
- */
-static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
- u16 *rx_width)
-{
- u16 widths, tx, rx;
- u8 misc_bits, local_flags;
- u16 active_tx, active_rx;
-
- read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
- tx = widths >> 12;
- rx = (widths >> 8) & 0xf;
-
- *tx_width = link_width_to_bits(dd, tx);
- *rx_width = link_width_to_bits(dd, rx);
-
- /* print the active widths */
- get_link_widths(dd, &active_tx, &active_rx);
-}
-
-/*
- * Set ppd->link_width_active and ppd->link_width_downgrade_active using
- * hardware information when the link first comes up.
- *
- * The link width is not available until after VerifyCap.AllFramesReceived
- * (the trigger for handle_verify_cap), so this is outside that routine
- * and should be called when the 8051 signals linkup.
- */
-void get_linkup_link_widths(struct hfi1_pportdata *ppd)
-{
- u16 tx_width, rx_width;
-
- /* get end-of-LNI link widths */
- get_linkup_widths(ppd->dd, &tx_width, &rx_width);
-
- /* use tx_width as the link is supposed to be symmetric on link up */
- ppd->link_width_active = tx_width;
- /* link width downgrade active (LWD.A) starts out matching LW.A */
- ppd->link_width_downgrade_tx_active = ppd->link_width_active;
- ppd->link_width_downgrade_rx_active = ppd->link_width_active;
- /* per OPA spec, on link up LWD.E resets to LWD.S */
- ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
- /* cache the active egress rate (units {10^6 bits/sec]) */
- ppd->current_egress_rate = active_egress_rate(ppd);
-}
-
-/*
- * Handle a verify capabilities interrupt from the 8051.
- *
- * This is a work-queue function outside of the interrupt.
- */
-void handle_verify_cap(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- link_vc_work);
- struct hfi1_devdata *dd = ppd->dd;
- u64 reg;
- u8 power_management;
- u8 continious;
- u8 vcu;
- u8 vau;
- u8 z;
- u16 vl15buf;
- u16 link_widths;
- u16 crc_mask;
- u16 crc_val;
- u16 device_id;
- u16 active_tx, active_rx;
- u8 partner_supported_crc;
- u8 remote_tx_rate;
- u8 device_rev;
-
- set_link_state(ppd, HLS_VERIFY_CAP);
-
- lcb_shutdown(dd, 0);
- adjust_lcb_for_fpga_serdes(dd);
-
- /*
- * These are now valid:
- * remote VerifyCap fields in the general LNI config
- * CSR DC8051_STS_REMOTE_GUID
- * CSR DC8051_STS_REMOTE_NODE_TYPE
- * CSR DC8051_STS_REMOTE_FM_SECURITY
- * CSR DC8051_STS_REMOTE_PORT_NO
- */
-
- read_vc_remote_phy(dd, &power_management, &continious);
- read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
- &partner_supported_crc);
- read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
- read_remote_device_id(dd, &device_id, &device_rev);
- /*
- * And the 'MgmtAllowed' information, which is exchanged during
- * LNI, is also be available at this point.
- */
- read_mgmt_allowed(dd, &ppd->mgmt_allowed);
- /* print the active widths */
- get_link_widths(dd, &active_tx, &active_rx);
- dd_dev_info(dd,
- "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
- (int)power_management, (int)continious);
- dd_dev_info(dd,
- "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
- (int)vau, (int)z, (int)vcu, (int)vl15buf,
- (int)partner_supported_crc);
- dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
- (u32)remote_tx_rate, (u32)link_widths);
- dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
- (u32)device_id, (u32)device_rev);
- /*
- * The peer vAU value just read is the peer receiver value. HFI does
- * not support a transmit vAU of 0 (AU == 8). We advertised that
- * with Z=1 in the fabric capabilities sent to the peer. The peer
- * will see our Z=1, and, if it advertised a vAU of 0, will move its
- * receive to vAU of 1 (AU == 16). Do the same here. We do not care
- * about the peer Z value - our sent vAU is 3 (hardwired) and is not
- * subject to the Z value exception.
- */
- if (vau == 0)
- vau = 1;
- set_up_vl15(dd, vau, vl15buf);
-
- /* set up the LCB CRC mode */
- crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
-
- /* order is important: use the lowest bit in common */
- if (crc_mask & CAP_CRC_14B)
- crc_val = LCB_CRC_14B;
- else if (crc_mask & CAP_CRC_48B)
- crc_val = LCB_CRC_48B;
- else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
- crc_val = LCB_CRC_12B_16B_PER_LANE;
- else
- crc_val = LCB_CRC_16B;
-
- dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
- write_csr(dd, DC_LCB_CFG_CRC_MODE,
- (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
-
- /* set (14b only) or clear sideband credit */
- reg = read_csr(dd, SEND_CM_CTRL);
- if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
- write_csr(dd, SEND_CM_CTRL,
- reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
- } else {
- write_csr(dd, SEND_CM_CTRL,
- reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
- }
-
- ppd->link_speed_active = 0; /* invalid value */
- if (dd->dc8051_ver < dc8051_ver(0, 20)) {
- /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
- switch (remote_tx_rate) {
- case 0:
- ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
- break;
- case 1:
- ppd->link_speed_active = OPA_LINK_SPEED_25G;
- break;
- }
- } else {
- /* actual rate is highest bit of the ANDed rates */
- u8 rate = remote_tx_rate & ppd->local_tx_rate;
-
- if (rate & 2)
- ppd->link_speed_active = OPA_LINK_SPEED_25G;
- else if (rate & 1)
- ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
- }
- if (ppd->link_speed_active == 0) {
- dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
- __func__, (int)remote_tx_rate);
- ppd->link_speed_active = OPA_LINK_SPEED_25G;
- }
-
- /*
- * Cache the values of the supported, enabled, and active
- * LTP CRC modes to return in 'portinfo' queries. But the bit
- * flags that are returned in the portinfo query differ from
- * what's in the link_crc_mask, crc_sizes, and crc_val
- * variables. Convert these here.
- */
- ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
- /* supported crc modes */
- ppd->port_ltp_crc_mode |=
- cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
- /* enabled crc modes */
- ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
- /* active crc mode */
-
- /* set up the remote credit return table */
- assign_remote_cm_au_table(dd, vcu);
-
- /*
- * The LCB is reset on entry to handle_verify_cap(), so this must
- * be applied on every link up.
- *
- * Adjust LCB error kill enable to kill the link if
- * these RBUF errors are seen:
- * REPLAY_BUF_MBE_SMASK
- * FLIT_INPUT_BUF_MBE_SMASK
- */
- if (is_ax(dd)) { /* fixed in B0 */
- reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
- reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
- | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
- write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
- }
-
- /* pull LCB fifos out of reset - all fifo clocks must be stable */
- write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
-
- /* give 8051 access to the LCB CSRs */
- write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
- set_8051_lcb_access(dd);
-
- ppd->neighbor_guid =
- read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
- ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
- DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
- ppd->neighbor_type =
- read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
- DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
- ppd->neighbor_fm_security =
- read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
- DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
- dd_dev_info(dd,
- "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
- ppd->neighbor_guid, ppd->neighbor_type,
- ppd->mgmt_allowed, ppd->neighbor_fm_security);
- if (ppd->mgmt_allowed)
- add_full_mgmt_pkey(ppd);
-
- /* tell the 8051 to go to LinkUp */
- set_link_state(ppd, HLS_GOING_UP);
-}
-
-/*
- * Apply the link width downgrade enabled policy against the current active
- * link widths.
- *
- * Called when the enabled policy changes or the active link widths change.
- */
-void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
-{
- int do_bounce = 0;
- int tries;
- u16 lwde;
- u16 tx, rx;
-
- /* use the hls lock to avoid a race with actual link up */
- tries = 0;
-retry:
- mutex_lock(&ppd->hls_lock);
- /* only apply if the link is up */
- if (!(ppd->host_link_state & HLS_UP)) {
- /* still going up..wait and retry */
- if (ppd->host_link_state & HLS_GOING_UP) {
- if (++tries < 1000) {
- mutex_unlock(&ppd->hls_lock);
- usleep_range(100, 120); /* arbitrary */
- goto retry;
- }
- dd_dev_err(ppd->dd,
- "%s: giving up waiting for link state change\n",
- __func__);
- }
- goto done;
- }
-
- lwde = ppd->link_width_downgrade_enabled;
-
- if (refresh_widths) {
- get_link_widths(ppd->dd, &tx, &rx);
- ppd->link_width_downgrade_tx_active = tx;
- ppd->link_width_downgrade_rx_active = rx;
- }
-
- if (lwde == 0) {
- /* downgrade is disabled */
-
- /* bounce if not at starting active width */
- if ((ppd->link_width_active !=
- ppd->link_width_downgrade_tx_active) ||
- (ppd->link_width_active !=
- ppd->link_width_downgrade_rx_active)) {
- dd_dev_err(ppd->dd,
- "Link downgrade is disabled and link has downgraded, downing link\n");
- dd_dev_err(ppd->dd,
- " original 0x%x, tx active 0x%x, rx active 0x%x\n",
- ppd->link_width_active,
- ppd->link_width_downgrade_tx_active,
- ppd->link_width_downgrade_rx_active);
- do_bounce = 1;
- }
- } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
- (lwde & ppd->link_width_downgrade_rx_active) == 0) {
- /* Tx or Rx is outside the enabled policy */
- dd_dev_err(ppd->dd,
- "Link is outside of downgrade allowed, downing link\n");
- dd_dev_err(ppd->dd,
- " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
- lwde, ppd->link_width_downgrade_tx_active,
- ppd->link_width_downgrade_rx_active);
- do_bounce = 1;
- }
-
-done:
- mutex_unlock(&ppd->hls_lock);
-
- if (do_bounce) {
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
- OPA_LINKDOWN_REASON_WIDTH_POLICY);
- set_link_state(ppd, HLS_DN_OFFLINE);
- tune_serdes(ppd);
- start_link(ppd);
- }
-}
-
-/*
- * Handle a link downgrade interrupt from the 8051.
- *
- * This is a work-queue function outside of the interrupt.
- */
-void handle_link_downgrade(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- link_downgrade_work);
-
- dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
- apply_link_downgrade_policy(ppd, 1);
-}
-
-static char *dcc_err_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags, dcc_err_flags,
- ARRAY_SIZE(dcc_err_flags));
-}
-
-static char *lcb_err_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags, lcb_err_flags,
- ARRAY_SIZE(lcb_err_flags));
-}
-
-static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags, dc8051_err_flags,
- ARRAY_SIZE(dc8051_err_flags));
-}
-
-static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
- ARRAY_SIZE(dc8051_info_err_flags));
-}
-
-static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
-{
- return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
- ARRAY_SIZE(dc8051_info_host_msg_flags));
-}
-
-static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- struct hfi1_pportdata *ppd = dd->pport;
- u64 info, err, host_msg;
- int queue_link_down = 0;
- char buf[96];
-
- /* look at the flags */
- if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
- /* 8051 information set by firmware */
- /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
- info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
- err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
- & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
- host_msg = (info >>
- DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
- & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
-
- /*
- * Handle error flags.
- */
- if (err & FAILED_LNI) {
- /*
- * LNI error indications are cleared by the 8051
- * only when starting polling. Only pay attention
- * to them when in the states that occur during
- * LNI.
- */
- if (ppd->host_link_state
- & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
- queue_link_down = 1;
- dd_dev_info(dd, "Link error: %s\n",
- dc8051_info_err_string(buf,
- sizeof(buf),
- err &
- FAILED_LNI));
- }
- err &= ~(u64)FAILED_LNI;
- }
- /* unknown frames can happen durning LNI, just count */
- if (err & UNKNOWN_FRAME) {
- ppd->unknown_frame_count++;
- err &= ~(u64)UNKNOWN_FRAME;
- }
- if (err) {
- /* report remaining errors, but do not do anything */
- dd_dev_err(dd, "8051 info error: %s\n",
- dc8051_info_err_string(buf, sizeof(buf),
- err));
- }
-
- /*
- * Handle host message flags.
- */
- if (host_msg & HOST_REQ_DONE) {
- /*
- * Presently, the driver does a busy wait for
- * host requests to complete. This is only an
- * informational message.
- * NOTE: The 8051 clears the host message
- * information *on the next 8051 command*.
- * Therefore, when linkup is achieved,
- * this flag will still be set.
- */
- host_msg &= ~(u64)HOST_REQ_DONE;
- }
- if (host_msg & BC_SMA_MSG) {
- queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
- host_msg &= ~(u64)BC_SMA_MSG;
- }
- if (host_msg & LINKUP_ACHIEVED) {
- dd_dev_info(dd, "8051: Link up\n");
- queue_work(ppd->hfi1_wq, &ppd->link_up_work);
- host_msg &= ~(u64)LINKUP_ACHIEVED;
- }
- if (host_msg & EXT_DEVICE_CFG_REQ) {
- queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
- host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
- }
- if (host_msg & VERIFY_CAP_FRAME) {
- queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
- host_msg &= ~(u64)VERIFY_CAP_FRAME;
- }
- if (host_msg & LINK_GOING_DOWN) {
- const char *extra = "";
- /* no downgrade action needed if going down */
- if (host_msg & LINK_WIDTH_DOWNGRADED) {
- host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
- extra = " (ignoring downgrade)";
- }
- dd_dev_info(dd, "8051: Link down%s\n", extra);
- queue_link_down = 1;
- host_msg &= ~(u64)LINK_GOING_DOWN;
- }
- if (host_msg & LINK_WIDTH_DOWNGRADED) {
- queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
- host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
- }
- if (host_msg) {
- /* report remaining messages, but do not do anything */
- dd_dev_info(dd, "8051 info host message: %s\n",
- dc8051_info_host_msg_string(buf,
- sizeof(buf),
- host_msg));
- }
-
- reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
- }
- if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
- /*
- * Lost the 8051 heartbeat. If this happens, we
- * receive constant interrupts about it. Disable
- * the interrupt after the first.
- */
- dd_dev_err(dd, "Lost 8051 heartbeat\n");
- write_csr(dd, DC_DC8051_ERR_EN,
- read_csr(dd, DC_DC8051_ERR_EN) &
- ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
-
- reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
- }
- if (reg) {
- /* report the error, but do not do anything */
- dd_dev_err(dd, "8051 error: %s\n",
- dc8051_err_string(buf, sizeof(buf), reg));
- }
-
- if (queue_link_down) {
- /*
- * if the link is already going down or disabled, do not
- * queue another
- */
- if ((ppd->host_link_state &
- (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
- ppd->link_enabled == 0) {
- dd_dev_info(dd, "%s: not queuing link down\n",
- __func__);
- } else {
- queue_work(ppd->hfi1_wq, &ppd->link_down_work);
- }
- }
-}
-
-static const char * const fm_config_txt[] = {
-[0] =
- "BadHeadDist: Distance violation between two head flits",
-[1] =
- "BadTailDist: Distance violation between two tail flits",
-[2] =
- "BadCtrlDist: Distance violation between two credit control flits",
-[3] =
- "BadCrdAck: Credits return for unsupported VL",
-[4] =
- "UnsupportedVLMarker: Received VL Marker",
-[5] =
- "BadPreempt: Exceeded the preemption nesting level",
-[6] =
- "BadControlFlit: Received unsupported control flit",
-/* no 7 */
-[8] =
- "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
-};
-
-static const char * const port_rcv_txt[] = {
-[1] =
- "BadPktLen: Illegal PktLen",
-[2] =
- "PktLenTooLong: Packet longer than PktLen",
-[3] =
- "PktLenTooShort: Packet shorter than PktLen",
-[4] =
- "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
-[5] =
- "BadDLID: Illegal DLID (0, doesn't match HFI)",
-[6] =
- "BadL2: Illegal L2 opcode",
-[7] =
- "BadSC: Unsupported SC",
-[9] =
- "BadRC: Illegal RC",
-[11] =
- "PreemptError: Preempting with same VL",
-[12] =
- "PreemptVL15: Preempting a VL15 packet",
-};
-
-#define OPA_LDR_FMCONFIG_OFFSET 16
-#define OPA_LDR_PORTRCV_OFFSET 0
-static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- u64 info, hdr0, hdr1;
- const char *extra;
- char buf[96];
- struct hfi1_pportdata *ppd = dd->pport;
- u8 lcl_reason = 0;
- int do_bounce = 0;
-
- if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
- if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
- info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
- dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
- /* set status bit */
- dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
- }
- reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
- }
-
- if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
- struct hfi1_pportdata *ppd = dd->pport;
- /* this counter saturates at (2^32) - 1 */
- if (ppd->link_downed < (u32)UINT_MAX)
- ppd->link_downed++;
- reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
- }
-
- if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
- u8 reason_valid = 1;
-
- info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
- if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
- dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
- /* set status bit */
- dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
- }
- switch (info) {
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 6:
- extra = fm_config_txt[info];
- break;
- case 8:
- extra = fm_config_txt[info];
- if (ppd->port_error_action &
- OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
- do_bounce = 1;
- /*
- * lcl_reason cannot be derived from info
- * for this error
- */
- lcl_reason =
- OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
- }
- break;
- default:
- reason_valid = 0;
- snprintf(buf, sizeof(buf), "reserved%lld", info);
- extra = buf;
- break;
- }
-
- if (reason_valid && !do_bounce) {
- do_bounce = ppd->port_error_action &
- (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
- lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
- }
-
- /* just report this */
- dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
- reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
- }
-
- if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
- u8 reason_valid = 1;
-
- info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
- hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
- hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
- if (!(dd->err_info_rcvport.status_and_code &
- OPA_EI_STATUS_SMASK)) {
- dd->err_info_rcvport.status_and_code =
- info & OPA_EI_CODE_SMASK;
- /* set status bit */
- dd->err_info_rcvport.status_and_code |=
- OPA_EI_STATUS_SMASK;
- /*
- * save first 2 flits in the packet that caused
- * the error
- */
- dd->err_info_rcvport.packet_flit1 = hdr0;
- dd->err_info_rcvport.packet_flit2 = hdr1;
- }
- switch (info) {
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 6:
- case 7:
- case 9:
- case 11:
- case 12:
- extra = port_rcv_txt[info];
- break;
- default:
- reason_valid = 0;
- snprintf(buf, sizeof(buf), "reserved%lld", info);
- extra = buf;
- break;
- }
-
- if (reason_valid && !do_bounce) {
- do_bounce = ppd->port_error_action &
- (1 << (OPA_LDR_PORTRCV_OFFSET + info));
- lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
- }
-
- /* just report this */
- dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
- dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
- hdr0, hdr1);
-
- reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
- }
-
- if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
- /* informative only */
- dd_dev_info(dd, "8051 access to LCB blocked\n");
- reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
- }
- if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
- /* informative only */
- dd_dev_info(dd, "host access to LCB blocked\n");
- reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
- }
-
- /* report any remaining errors */
- if (reg)
- dd_dev_info(dd, "DCC Error: %s\n",
- dcc_err_string(buf, sizeof(buf), reg));
-
- if (lcl_reason == 0)
- lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
-
- if (do_bounce) {
- dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
- set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
- queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
- }
-}
-
-static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
-{
- char buf[96];
-
- dd_dev_info(dd, "LCB Error: %s\n",
- lcb_err_string(buf, sizeof(buf), reg));
-}
-
-/*
- * CCE block DC interrupt. Source is < 8.
- */
-static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
-{
- const struct err_reg_info *eri = &dc_errs[source];
-
- if (eri->handler) {
- interrupt_clear_down(dd, 0, eri);
- } else if (source == 3 /* dc_lbm_int */) {
- /*
- * This indicates that a parity error has occurred on the
- * address/control lines presented to the LBM. The error
- * is a single pulse, there is no associated error flag,
- * and it is non-maskable. This is because if a parity
- * error occurs on the request the request is dropped.
- * This should never occur, but it is nice to know if it
- * ever does.
- */
- dd_dev_err(dd, "Parity error in DC LBM block\n");
- } else {
- dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
- }
-}
-
-/*
- * TX block send credit interrupt. Source is < 160.
- */
-static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
-{
- sc_group_release_update(dd, source);
-}
-
-/*
- * TX block SDMA interrupt. Source is < 48.
- *
- * SDMA interrupts are grouped by type:
- *
- * 0 - N-1 = SDma
- * N - 2N-1 = SDmaProgress
- * 2N - 3N-1 = SDmaIdle
- */
-static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
-{
- /* what interrupt */
- unsigned int what = source / TXE_NUM_SDMA_ENGINES;
- /* which engine */
- unsigned int which = source % TXE_NUM_SDMA_ENGINES;
-
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
- slashstrip(__FILE__), __LINE__, __func__);
- sdma_dumpstate(&dd->per_sdma[which]);
-#endif
-
- if (likely(what < 3 && which < dd->num_sdma)) {
- sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
- } else {
- /* should not happen */
- dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
- }
-}
-
-/*
- * RX block receive available interrupt. Source is < 160.
- */
-static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
-{
- struct hfi1_ctxtdata *rcd;
- char *err_detail;
-
- if (likely(source < dd->num_rcv_contexts)) {
- rcd = dd->rcd[source];
- if (rcd) {
- if (source < dd->first_user_ctxt)
- rcd->do_interrupt(rcd, 0);
- else
- handle_user_interrupt(rcd);
- return; /* OK */
- }
- /* received an interrupt, but no rcd */
- err_detail = "dataless";
- } else {
- /* received an interrupt, but are not using that context */
- err_detail = "out of range";
- }
- dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
- err_detail, source);
-}
-
-/*
- * RX block receive urgent interrupt. Source is < 160.
- */
-static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
-{
- struct hfi1_ctxtdata *rcd;
- char *err_detail;
-
- if (likely(source < dd->num_rcv_contexts)) {
- rcd = dd->rcd[source];
- if (rcd) {
- /* only pay attention to user urgent interrupts */
- if (source >= dd->first_user_ctxt)
- handle_user_interrupt(rcd);
- return; /* OK */
- }
- /* received an interrupt, but no rcd */
- err_detail = "dataless";
- } else {
- /* received an interrupt, but are not using that context */
- err_detail = "out of range";
- }
- dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
- err_detail, source);
-}
-
-/*
- * Reserved range interrupt. Should not be called in normal operation.
- */
-static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
-{
- char name[64];
-
- dd_dev_err(dd, "unexpected %s interrupt\n",
- is_reserved_name(name, sizeof(name), source));
-}
-
-static const struct is_table is_table[] = {
-/*
- * start end
- * name func interrupt func
- */
-{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
- is_misc_err_name, is_misc_err_int },
-{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
- is_sdma_eng_err_name, is_sdma_eng_err_int },
-{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
- is_sendctxt_err_name, is_sendctxt_err_int },
-{ IS_SDMA_START, IS_SDMA_END,
- is_sdma_eng_name, is_sdma_eng_int },
-{ IS_VARIOUS_START, IS_VARIOUS_END,
- is_various_name, is_various_int },
-{ IS_DC_START, IS_DC_END,
- is_dc_name, is_dc_int },
-{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
- is_rcv_avail_name, is_rcv_avail_int },
-{ IS_RCVURGENT_START, IS_RCVURGENT_END,
- is_rcv_urgent_name, is_rcv_urgent_int },
-{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
- is_send_credit_name, is_send_credit_int},
-{ IS_RESERVED_START, IS_RESERVED_END,
- is_reserved_name, is_reserved_int},
-};
-
-/*
- * Interrupt source interrupt - called when the given source has an interrupt.
- * Source is a bit index into an array of 64-bit integers.
- */
-static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
-{
- const struct is_table *entry;
-
- /* avoids a double compare by walking the table in-order */
- for (entry = &is_table[0]; entry->is_name; entry++) {
- if (source < entry->end) {
- trace_hfi1_interrupt(dd, entry, source);
- entry->is_int(dd, source - entry->start);
- return;
- }
- }
- /* fell off the end */
- dd_dev_err(dd, "invalid interrupt source %u\n", source);
-}
-
-/*
- * General interrupt handler. This is able to correctly handle
- * all interrupts in case INTx is used.
- */
-static irqreturn_t general_interrupt(int irq, void *data)
-{
- struct hfi1_devdata *dd = data;
- u64 regs[CCE_NUM_INT_CSRS];
- u32 bit;
- int i;
-
- this_cpu_inc(*dd->int_counter);
-
- /* phase 1: scan and clear all handled interrupts */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
- if (dd->gi_mask[i] == 0) {
- regs[i] = 0; /* used later */
- continue;
- }
- regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
- dd->gi_mask[i];
- /* only clear if anything is set */
- if (regs[i])
- write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
- }
-
- /* phase 2: call the appropriate handler */
- for_each_set_bit(bit, (unsigned long *)&regs[0],
- CCE_NUM_INT_CSRS * 64) {
- is_interrupt(dd, bit);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t sdma_interrupt(int irq, void *data)
-{
- struct sdma_engine *sde = data;
- struct hfi1_devdata *dd = sde->dd;
- u64 status;
-
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
- slashstrip(__FILE__), __LINE__, __func__);
- sdma_dumpstate(sde);
-#endif
-
- this_cpu_inc(*dd->int_counter);
-
- /* This read_csr is really bad in the hot path */
- status = read_csr(dd,
- CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
- & sde->imask;
- if (likely(status)) {
- /* clear the interrupt(s) */
- write_csr(dd,
- CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
- status);
-
- /* handle the interrupt(s) */
- sdma_engine_interrupt(sde, status);
- } else
- dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
- sde->this_idx);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Clear the receive interrupt. Use a read of the interrupt clear CSR
- * to insure that the write completed. This does NOT guarantee that
- * queued DMA writes to memory from the chip are pushed.
- */
-static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
-
- mmiowb(); /* make sure everything before is written */
- write_csr(dd, addr, rcd->imask);
- /* force the above write on the chip and get a value back */
- (void)read_csr(dd, addr);
-}
-
-/* force the receive interrupt */
-void force_recv_intr(struct hfi1_ctxtdata *rcd)
-{
- write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
-}
-
-/*
- * Return non-zero if a packet is present.
- *
- * This routine is called when rechecking for packets after the RcvAvail
- * interrupt has been cleared down. First, do a quick check of memory for
- * a packet present. If not found, use an expensive CSR read of the context
- * tail to determine the actual tail. The CSR read is necessary because there
- * is no method to push pending DMAs to memory other than an interrupt and we
- * are trying to determine if we need to force an interrupt.
- */
-static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
-{
- u32 tail;
- int present;
-
- if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
- present = (rcd->seq_cnt ==
- rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
- else /* is RDMA rtail */
- present = (rcd->head != get_rcvhdrtail(rcd));
-
- if (present)
- return 1;
-
- /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
- tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
- return rcd->head != tail;
-}
-
-/*
- * Receive packet IRQ handler. This routine expects to be on its own IRQ.
- * This routine will try to handle packets immediately (latency), but if
- * it finds too many, it will invoke the thread handler (bandwitdh). The
- * chip receive interrupt is *not* cleared down until this or the thread (if
- * invoked) is finished. The intent is to avoid extra interrupts while we
- * are processing packets anyway.
- */
-static irqreturn_t receive_context_interrupt(int irq, void *data)
-{
- struct hfi1_ctxtdata *rcd = data;
- struct hfi1_devdata *dd = rcd->dd;
- int disposition;
- int present;
-
- trace_hfi1_receive_interrupt(dd, rcd->ctxt);
- this_cpu_inc(*dd->int_counter);
- aspm_ctx_disable(rcd);
-
- /* receive interrupt remains blocked while processing packets */
- disposition = rcd->do_interrupt(rcd, 0);
-
- /*
- * Too many packets were seen while processing packets in this
- * IRQ handler. Invoke the handler thread. The receive interrupt
- * remains blocked.
- */
- if (disposition == RCV_PKT_LIMIT)
- return IRQ_WAKE_THREAD;
-
- /*
- * The packet processor detected no more packets. Clear the receive
- * interrupt and recheck for a packet packet that may have arrived
- * after the previous check and interrupt clear. If a packet arrived,
- * force another interrupt.
- */
- clear_recv_intr(rcd);
- present = check_packet_present(rcd);
- if (present)
- force_recv_intr(rcd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Receive packet thread handler. This expects to be invoked with the
- * receive interrupt still blocked.
- */
-static irqreturn_t receive_context_thread(int irq, void *data)
-{
- struct hfi1_ctxtdata *rcd = data;
- int present;
-
- /* receive interrupt is still blocked from the IRQ handler */
- (void)rcd->do_interrupt(rcd, 1);
-
- /*
- * The packet processor will only return if it detected no more
- * packets. Hold IRQs here so we can safely clear the interrupt and
- * recheck for a packet that may have arrived after the previous
- * check and the interrupt clear. If a packet arrived, force another
- * interrupt.
- */
- local_irq_disable();
- clear_recv_intr(rcd);
- present = check_packet_present(rcd);
- if (present)
- force_recv_intr(rcd);
- local_irq_enable();
-
- return IRQ_HANDLED;
-}
-
-/* ========================================================================= */
-
-u32 read_physical_state(struct hfi1_devdata *dd)
-{
- u64 reg;
-
- reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
- return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
- & DC_DC8051_STS_CUR_STATE_PORT_MASK;
-}
-
-u32 read_logical_state(struct hfi1_devdata *dd)
-{
- u64 reg;
-
- reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
- return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
- & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
-}
-
-static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
-{
- u64 reg;
-
- reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
- /* clear current state, set new state */
- reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
- reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
- write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
-}
-
-/*
- * Use the 8051 to read a LCB CSR.
- */
-static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
-{
- u32 regno;
- int ret;
-
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
- if (acquire_lcb_access(dd, 0) == 0) {
- *data = read_csr(dd, addr);
- release_lcb_access(dd, 0);
- return 0;
- }
- return -EBUSY;
- }
-
- /* register is an index of LCB registers: (offset - base) / 8 */
- regno = (addr - DC_LCB_CFG_RUN) >> 3;
- ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
- if (ret != HCMD_SUCCESS)
- return -EBUSY;
- return 0;
-}
-
-/*
- * Read an LCB CSR. Access may not be in host control, so check.
- * Return 0 on success, -EBUSY on failure.
- */
-int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
-{
- struct hfi1_pportdata *ppd = dd->pport;
-
- /* if up, go through the 8051 for the value */
- if (ppd->host_link_state & HLS_UP)
- return read_lcb_via_8051(dd, addr, data);
- /* if going up or down, no access */
- if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
- return -EBUSY;
- /* otherwise, host has access */
- *data = read_csr(dd, addr);
- return 0;
-}
-
-/*
- * Use the 8051 to write a LCB CSR.
- */
-static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
-{
- u32 regno;
- int ret;
-
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
- (dd->dc8051_ver < dc8051_ver(0, 20))) {
- if (acquire_lcb_access(dd, 0) == 0) {
- write_csr(dd, addr, data);
- release_lcb_access(dd, 0);
- return 0;
- }
- return -EBUSY;
- }
-
- /* register is an index of LCB registers: (offset - base) / 8 */
- regno = (addr - DC_LCB_CFG_RUN) >> 3;
- ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
- if (ret != HCMD_SUCCESS)
- return -EBUSY;
- return 0;
-}
-
-/*
- * Write an LCB CSR. Access may not be in host control, so check.
- * Return 0 on success, -EBUSY on failure.
- */
-int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
-{
- struct hfi1_pportdata *ppd = dd->pport;
-
- /* if up, go through the 8051 for the value */
- if (ppd->host_link_state & HLS_UP)
- return write_lcb_via_8051(dd, addr, data);
- /* if going up or down, no access */
- if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
- return -EBUSY;
- /* otherwise, host has access */
- write_csr(dd, addr, data);
- return 0;
-}
-
-/*
- * Returns:
- * < 0 = Linux error, not able to get access
- * > 0 = 8051 command RETURN_CODE
- */
-static int do_8051_command(
- struct hfi1_devdata *dd,
- u32 type,
- u64 in_data,
- u64 *out_data)
-{
- u64 reg, completed;
- int return_code;
- unsigned long flags;
- unsigned long timeout;
-
- hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
-
- /*
- * Alternative to holding the lock for a long time:
- * - keep busy wait - have other users bounce off
- */
- spin_lock_irqsave(&dd->dc8051_lock, flags);
-
- /* We can't send any commands to the 8051 if it's in reset */
- if (dd->dc_shutdown) {
- return_code = -ENODEV;
- goto fail;
- }
-
- /*
- * If an 8051 host command timed out previously, then the 8051 is
- * stuck.
- *
- * On first timeout, attempt to reset and restart the entire DC
- * block (including 8051). (Is this too big of a hammer?)
- *
- * If the 8051 times out a second time, the reset did not bring it
- * back to healthy life. In that case, fail any subsequent commands.
- */
- if (dd->dc8051_timed_out) {
- if (dd->dc8051_timed_out > 1) {
- dd_dev_err(dd,
- "Previous 8051 host command timed out, skipping command %u\n",
- type);
- return_code = -ENXIO;
- goto fail;
- }
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
- dc_shutdown(dd);
- dc_start(dd);
- spin_lock_irqsave(&dd->dc8051_lock, flags);
- }
-
- /*
- * If there is no timeout, then the 8051 command interface is
- * waiting for a command.
- */
-
- /*
- * When writing a LCB CSR, out_data contains the full value to
- * to be written, while in_data contains the relative LCB
- * address in 7:0. Do the work here, rather than the caller,
- * of distrubting the write data to where it needs to go:
- *
- * Write data
- * 39:00 -> in_data[47:8]
- * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
- * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
- */
- if (type == HCMD_WRITE_LCB_CSR) {
- in_data |= ((*out_data) & 0xffffffffffull) << 8;
- reg = ((((*out_data) >> 40) & 0xff) <<
- DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
- | ((((*out_data) >> 48) & 0xffff) <<
- DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
- write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
- }
-
- /*
- * Do two writes: the first to stabilize the type and req_data, the
- * second to activate.
- */
- reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
- << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
- | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
- << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
- write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
- reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
- write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
-
- /* wait for completion, alternate: interrupt */
- timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
- while (1) {
- reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
- completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
- if (completed)
- break;
- if (time_after(jiffies, timeout)) {
- dd->dc8051_timed_out++;
- dd_dev_err(dd, "8051 host command %u timeout\n", type);
- if (out_data)
- *out_data = 0;
- return_code = -ETIMEDOUT;
- goto fail;
- }
- udelay(2);
- }
-
- if (out_data) {
- *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
- & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
- if (type == HCMD_READ_LCB_CSR) {
- /* top 16 bits are in a different register */
- *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
- & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
- << (48
- - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
- }
- }
- return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
- & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
- dd->dc8051_timed_out = 0;
- /*
- * Clear command for next user.
- */
- write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
-
-fail:
- spin_unlock_irqrestore(&dd->dc8051_lock, flags);
-
- return return_code;
-}
-
-static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
-{
- return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
-}
-
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
-{
- u64 data;
- int ret;
-
- data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
- | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
- | (u64)config_data << LOAD_DATA_DATA_SHIFT;
- ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "load 8051 config: field id %d, lane %d, err %d\n",
- (int)field_id, (int)lane_id, ret);
- }
- return ret;
-}
-
-/*
- * Read the 8051 firmware "registers". Use the RAM directly. Always
- * set the result, even on error.
- * Return 0 on success, -errno on failure
- */
-int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
- u32 *result)
-{
- u64 big_data;
- u32 addr;
- int ret;
-
- /* address start depends on the lane_id */
- if (lane_id < 4)
- addr = (4 * NUM_GENERAL_FIELDS)
- + (lane_id * 4 * NUM_LANE_FIELDS);
- else
- addr = 0;
- addr += field_id * 4;
-
- /* read is in 8-byte chunks, hardware will truncate the address down */
- ret = read_8051_data(dd, addr, 8, &big_data);
-
- if (ret == 0) {
- /* extract the 4 bytes we want */
- if (addr & 0x4)
- *result = (u32)(big_data >> 32);
- else
- *result = (u32)big_data;
- } else {
- *result = 0;
- dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
- __func__, lane_id, field_id);
- }
-
- return ret;
-}
-
-static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
- u8 continuous)
-{
- u32 frame;
-
- frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
- | power_management << POWER_MANAGEMENT_SHIFT;
- return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
- GENERAL_CONFIG, frame);
-}
-
-static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
- u16 vl15buf, u8 crc_sizes)
-{
- u32 frame;
-
- frame = (u32)vau << VAU_SHIFT
- | (u32)z << Z_SHIFT
- | (u32)vcu << VCU_SHIFT
- | (u32)vl15buf << VL15BUF_SHIFT
- | (u32)crc_sizes << CRC_SIZES_SHIFT;
- return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
- GENERAL_CONFIG, frame);
-}
-
-static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
- u8 *flag_bits, u16 *link_widths)
-{
- u32 frame;
-
- read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
- &frame);
- *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
- *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
- *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
-}
-
-static int write_vc_local_link_width(struct hfi1_devdata *dd,
- u8 misc_bits,
- u8 flag_bits,
- u16 link_widths)
-{
- u32 frame;
-
- frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
- | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
- | (u32)link_widths << LINK_WIDTH_SHIFT;
- return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
- frame);
-}
-
-static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
- u8 device_rev)
-{
- u32 frame;
-
- frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
- | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
- return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
-}
-
-static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
- u8 *device_rev)
-{
- u32 frame;
-
- read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
- *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
- *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
- & REMOTE_DEVICE_REV_MASK;
-}
-
-void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
-{
- u32 frame;
-
- read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
- *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
- *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
-}
-
-static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
- u8 *continuous)
-{
- u32 frame;
-
- read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
- *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
- & POWER_MANAGEMENT_MASK;
- *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
- & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
-}
-
-static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
- u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
-{
- u32 frame;
-
- read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
- *vau = (frame >> VAU_SHIFT) & VAU_MASK;
- *z = (frame >> Z_SHIFT) & Z_MASK;
- *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
- *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
- *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
-}
-
-static void read_vc_remote_link_width(struct hfi1_devdata *dd,
- u8 *remote_tx_rate,
- u16 *link_widths)
-{
- u32 frame;
-
- read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
- &frame);
- *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
- & REMOTE_TX_RATE_MASK;
- *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
-}
-
-static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
-{
- u32 frame;
-
- read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
- *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
-}
-
-static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
-{
- u32 frame;
-
- read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
- *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
-}
-
-static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
-{
- read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
-}
-
-static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
-{
- read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
-}
-
-void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
-{
- u32 frame;
- int ret;
-
- *link_quality = 0;
- if (dd->pport->host_link_state & HLS_UP) {
- ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
- &frame);
- if (ret == 0)
- *link_quality = (frame >> LINK_QUALITY_SHIFT)
- & LINK_QUALITY_MASK;
- }
-}
-
-static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
-{
- u32 frame;
-
- read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
- *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
-}
-
-static int read_tx_settings(struct hfi1_devdata *dd,
- u8 *enable_lane_tx,
- u8 *tx_polarity_inversion,
- u8 *rx_polarity_inversion,
- u8 *max_rate)
-{
- u32 frame;
- int ret;
-
- ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
- *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
- & ENABLE_LANE_TX_MASK;
- *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
- & TX_POLARITY_INVERSION_MASK;
- *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
- & RX_POLARITY_INVERSION_MASK;
- *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
- return ret;
-}
-
-static int write_tx_settings(struct hfi1_devdata *dd,
- u8 enable_lane_tx,
- u8 tx_polarity_inversion,
- u8 rx_polarity_inversion,
- u8 max_rate)
-{
- u32 frame;
-
- /* no need to mask, all variable sizes match field widths */
- frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
- | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
- | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
- | max_rate << MAX_RATE_SHIFT;
- return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
-}
-
-static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
-{
- u32 frame, version, prod_id;
- int ret, lane;
-
- /* 4 lanes */
- for (lane = 0; lane < 4; lane++) {
- ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
- if (ret) {
- dd_dev_err(dd,
- "Unable to read lane %d firmware details\n",
- lane);
- continue;
- }
- version = (frame >> SPICO_ROM_VERSION_SHIFT)
- & SPICO_ROM_VERSION_MASK;
- prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
- & SPICO_ROM_PROD_ID_MASK;
- dd_dev_info(dd,
- "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
- lane, version, prod_id);
- }
-}
-
-/*
- * Read an idle LCB message.
- *
- * Returns 0 on success, -EINVAL on error
- */
-static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
-{
- int ret;
-
- ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd, "read idle message: type %d, err %d\n",
- (u32)type, ret);
- return -EINVAL;
- }
- dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
- /* return only the payload as we already know the type */
- *data_out >>= IDLE_PAYLOAD_SHIFT;
- return 0;
-}
-
-/*
- * Read an idle SMA message. To be done in response to a notification from
- * the 8051.
- *
- * Returns 0 on success, -EINVAL on error
- */
-static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
-{
- return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
- data);
-}
-
-/*
- * Send an idle LCB message.
- *
- * Returns 0 on success, -EINVAL on error
- */
-static int send_idle_message(struct hfi1_devdata *dd, u64 data)
-{
- int ret;
-
- dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
- ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
- data, ret);
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Send an idle SMA message.
- *
- * Returns 0 on success, -EINVAL on error
- */
-int send_idle_sma(struct hfi1_devdata *dd, u64 message)
-{
- u64 data;
-
- data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
- ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
- return send_idle_message(dd, data);
-}
-
-/*
- * Initialize the LCB then do a quick link up. This may or may not be
- * in loopback.
- *
- * return 0 on success, -errno on error
- */
-static int do_quick_linkup(struct hfi1_devdata *dd)
-{
- u64 reg;
- unsigned long timeout;
- int ret;
-
- lcb_shutdown(dd, 0);
-
- if (loopback) {
- /* LCB_CFG_LOOPBACK.VAL = 2 */
- /* LCB_CFG_LANE_WIDTH.VAL = 0 */
- write_csr(dd, DC_LCB_CFG_LOOPBACK,
- IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
- write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
- }
-
- /* start the LCBs */
- /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
- write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
-
- /* simulator only loopback steps */
- if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
- /* LCB_CFG_RUN.EN = 1 */
- write_csr(dd, DC_LCB_CFG_RUN,
- 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
-
- /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
- timeout = jiffies + msecs_to_jiffies(10);
- while (1) {
- reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
- if (reg)
- break;
- if (time_after(jiffies, timeout)) {
- dd_dev_err(dd,
- "timeout waiting for LINK_TRANSFER_ACTIVE\n");
- return -ETIMEDOUT;
- }
- udelay(2);
- }
-
- write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
- 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
- }
-
- if (!loopback) {
- /*
- * When doing quick linkup and not in loopback, both
- * sides must be done with LCB set-up before either
- * starts the quick linkup. Put a delay here so that
- * both sides can be started and have a chance to be
- * done with LCB set up before resuming.
- */
- dd_dev_err(dd,
- "Pausing for peer to be finished with LCB set up\n");
- msleep(5000);
- dd_dev_err(dd, "Continuing with quick linkup\n");
- }
-
- write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
- set_8051_lcb_access(dd);
-
- /*
- * State "quick" LinkUp request sets the physical link state to
- * LinkUp without a verify capability sequence.
- * This state is in simulator v37 and later.
- */
- ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "%s: set physical link state to quick LinkUp failed with return %d\n",
- __func__, ret);
-
- set_host_lcb_access(dd);
- write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
-
- if (ret >= 0)
- ret = -EINVAL;
- return ret;
- }
-
- return 0; /* success */
-}
-
-/*
- * Set the SerDes to internal loopback mode.
- * Returns 0 on success, -errno on error.
- */
-static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
- if (ret == HCMD_SUCCESS)
- return 0;
- dd_dev_err(dd,
- "Set physical link state to SerDes Loopback failed with return %d\n",
- ret);
- if (ret >= 0)
- ret = -EINVAL;
- return ret;
-}
-
-/*
- * Do all special steps to set up loopback.
- */
-static int init_loopback(struct hfi1_devdata *dd)
-{
- dd_dev_info(dd, "Entering loopback mode\n");
-
- /* all loopbacks should disable self GUID check */
- write_csr(dd, DC_DC8051_CFG_MODE,
- (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
-
- /*
- * The simulator has only one loopback option - LCB. Switch
- * to that option, which includes quick link up.
- *
- * Accept all valid loopback values.
- */
- if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
- (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
- loopback == LOOPBACK_CABLE)) {
- loopback = LOOPBACK_LCB;
- quick_linkup = 1;
- return 0;
- }
-
- /* handle serdes loopback */
- if (loopback == LOOPBACK_SERDES) {
- /* internal serdes loopack needs quick linkup on RTL */
- if (dd->icode == ICODE_RTL_SILICON)
- quick_linkup = 1;
- return set_serdes_loopback_mode(dd);
- }
-
- /* LCB loopback - handled at poll time */
- if (loopback == LOOPBACK_LCB) {
- quick_linkup = 1; /* LCB is always quick linkup */
-
- /* not supported in emulation due to emulation RTL changes */
- if (dd->icode == ICODE_FPGA_EMULATION) {
- dd_dev_err(dd,
- "LCB loopback not supported in emulation\n");
- return -EINVAL;
- }
- return 0;
- }
-
- /* external cable loopback requires no extra steps */
- if (loopback == LOOPBACK_CABLE)
- return 0;
-
- dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
- return -EINVAL;
-}
-
-/*
- * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
- * used in the Verify Capability link width attribute.
- */
-static u16 opa_to_vc_link_widths(u16 opa_widths)
-{
- int i;
- u16 result = 0;
-
- static const struct link_bits {
- u16 from;
- u16 to;
- } opa_link_xlate[] = {
- { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
- { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
- { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
- { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
- };
-
- for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
- if (opa_widths & opa_link_xlate[i].from)
- result |= opa_link_xlate[i].to;
- }
- return result;
-}
-
-/*
- * Set link attributes before moving to polling.
- */
-static int set_local_link_attributes(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u8 enable_lane_tx;
- u8 tx_polarity_inversion;
- u8 rx_polarity_inversion;
- int ret;
-
- /* reset our fabric serdes to clear any lingering problems */
- fabric_serdes_reset(dd);
-
- /* set the local tx rate - need to read-modify-write */
- ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
- &rx_polarity_inversion, &ppd->local_tx_rate);
- if (ret)
- goto set_local_link_attributes_fail;
-
- if (dd->dc8051_ver < dc8051_ver(0, 20)) {
- /* set the tx rate to the fastest enabled */
- if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
- ppd->local_tx_rate = 1;
- else
- ppd->local_tx_rate = 0;
- } else {
- /* set the tx rate to all enabled */
- ppd->local_tx_rate = 0;
- if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
- ppd->local_tx_rate |= 2;
- if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
- ppd->local_tx_rate |= 1;
- }
-
- enable_lane_tx = 0xF; /* enable all four lanes */
- ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
- rx_polarity_inversion, ppd->local_tx_rate);
- if (ret != HCMD_SUCCESS)
- goto set_local_link_attributes_fail;
-
- /*
- * DC supports continuous updates.
- */
- ret = write_vc_local_phy(dd,
- 0 /* no power management */,
- 1 /* continuous updates */);
- if (ret != HCMD_SUCCESS)
- goto set_local_link_attributes_fail;
-
- /* z=1 in the next call: AU of 0 is not supported by the hardware */
- ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
- ppd->port_crc_mode_enabled);
- if (ret != HCMD_SUCCESS)
- goto set_local_link_attributes_fail;
-
- ret = write_vc_local_link_width(dd, 0, 0,
- opa_to_vc_link_widths(
- ppd->link_width_enabled));
- if (ret != HCMD_SUCCESS)
- goto set_local_link_attributes_fail;
-
- /* let peer know who we are */
- ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
- if (ret == HCMD_SUCCESS)
- return 0;
-
-set_local_link_attributes_fail:
- dd_dev_err(dd,
- "Failed to set local link attributes, return 0x%x\n",
- ret);
- return ret;
-}
-
-/*
- * Call this to start the link. Schedule a retry if the cable is not
- * present or if unable to start polling. Do not do anything if the
- * link is disabled. Returns 0 if link is disabled or moved to polling
- */
-int start_link(struct hfi1_pportdata *ppd)
-{
- if (!ppd->link_enabled) {
- dd_dev_info(ppd->dd,
- "%s: stopping link start because link is disabled\n",
- __func__);
- return 0;
- }
- if (!ppd->driver_link_ready) {
- dd_dev_info(ppd->dd,
- "%s: stopping link start because driver is not ready\n",
- __func__);
- return 0;
- }
-
- if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
- loopback == LOOPBACK_LCB ||
- ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
- return set_link_state(ppd, HLS_DN_POLL);
-
- dd_dev_info(ppd->dd,
- "%s: stopping link start because no cable is present\n",
- __func__);
- return -EAGAIN;
-}
-
-static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 mask;
- unsigned long timeout;
-
- /*
- * Check for QSFP interrupt for t_init (SFF 8679)
- */
- timeout = jiffies + msecs_to_jiffies(2000);
- while (1) {
- mask = read_csr(dd, dd->hfi1_id ?
- ASIC_QSFP2_IN : ASIC_QSFP1_IN);
- if (!(mask & QSFP_HFI0_INT_N)) {
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
- ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
- break;
- }
- if (time_after(jiffies, timeout)) {
- dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
- __func__);
- break;
- }
- udelay(2);
- }
-}
-
-static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 mask;
-
- mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
- if (enable)
- mask |= (u64)QSFP_HFI0_INT_N;
- else
- mask &= ~(u64)QSFP_HFI0_INT_N;
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
-}
-
-void reset_qsfp(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 mask, qsfp_mask;
-
- /* Disable INT_N from triggering QSFP interrupts */
- set_qsfp_int_n(ppd, 0);
-
- /* Reset the QSFP */
- mask = (u64)QSFP_HFI0_RESET_N;
- qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
- qsfp_mask |= mask;
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
-
- qsfp_mask = read_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
- qsfp_mask &= ~mask;
- write_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
-
- udelay(10);
-
- qsfp_mask |= mask;
- write_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
-
- wait_for_qsfp_init(ppd);
-
- /*
- * Allow INT_N to trigger the QSFP interrupt to watch
- * for alarms and warnings
- */
- set_qsfp_int_n(ppd, 1);
-}
-
-static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
- u8 *qsfp_interrupt_status)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
- (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
- dd_dev_info(dd, "%s: QSFP cable on fire\n",
- __func__);
-
- if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
- (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
- dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
- __func__);
-
- if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
- (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
- dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
- __func__);
-
- if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
- (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
- dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
- __func__);
-
- /* Byte 2 is vendor specific */
-
- if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
- __func__);
-
- if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
- __func__);
-
- if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
- __func__);
-
- if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
- __func__);
-
- if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
- (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
- __func__);
-
- if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
- (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
- __func__);
-
- if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
- (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
- __func__);
-
- if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
- (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
- __func__);
-
- if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
- __func__);
-
- if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
- __func__);
-
- if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
- __func__);
-
- if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
- __func__);
-
- /* Bytes 9-10 and 11-12 are reserved */
- /* Bytes 13-15 are vendor specific */
-
- return 0;
-}
-
-/* This routine will only be scheduled if the QSFP module is present */
-void qsfp_event(struct work_struct *work)
-{
- struct qsfp_data *qd;
- struct hfi1_pportdata *ppd;
- struct hfi1_devdata *dd;
-
- qd = container_of(work, struct qsfp_data, qsfp_work);
- ppd = qd->ppd;
- dd = ppd->dd;
-
- /* Sanity check */
- if (!qsfp_mod_present(ppd))
- return;
-
- /*
- * Turn DC back on after cables has been
- * re-inserted. Up until now, the DC has been in
- * reset to save power.
- */
- dc_start(dd);
-
- if (qd->cache_refresh_required) {
- set_qsfp_int_n(ppd, 0);
-
- wait_for_qsfp_init(ppd);
-
- /*
- * Allow INT_N to trigger the QSFP interrupt to watch
- * for alarms and warnings
- */
- set_qsfp_int_n(ppd, 1);
-
- tune_serdes(ppd);
-
- start_link(ppd);
- }
-
- if (qd->check_interrupt_flags) {
- u8 qsfp_interrupt_status[16] = {0,};
-
- if (one_qsfp_read(ppd, dd->hfi1_id, 6,
- &qsfp_interrupt_status[0], 16) != 16) {
- dd_dev_info(dd,
- "%s: Failed to read status of QSFP module\n",
- __func__);
- } else {
- unsigned long flags;
-
- handle_qsfp_error_conditions(
- ppd, qsfp_interrupt_status);
- spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
- ppd->qsfp_info.check_interrupt_flags = 0;
- spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
- flags);
- }
- }
-}
-
-static void init_qsfp_int(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd = dd->pport;
- u64 qsfp_mask, cce_int_mask;
- const int qsfp1_int_smask = QSFP1_INT % 64;
- const int qsfp2_int_smask = QSFP2_INT % 64;
-
- /*
- * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
- * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
- * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
- * the index of the appropriate CSR in the CCEIntMask CSR array
- */
- cce_int_mask = read_csr(dd, CCE_INT_MASK +
- (8 * (QSFP1_INT / 64)));
- if (dd->hfi1_id) {
- cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
- cce_int_mask);
- } else {
- cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
- cce_int_mask);
- }
-
- qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
- /* Clear current status to avoid spurious interrupts */
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
- qsfp_mask);
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
- qsfp_mask);
-
- set_qsfp_int_n(ppd, 0);
-
- /* Handle active low nature of INT_N and MODPRST_N pins */
- if (qsfp_mod_present(ppd))
- qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
- write_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
- qsfp_mask);
-}
-
-/*
- * Do a one-time initialize of the LCB block.
- */
-static void init_lcb(struct hfi1_devdata *dd)
-{
- /* simulator does not correctly handle LCB cclk loopback, skip */
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
- return;
-
- /* the DC has been reset earlier in the driver load */
-
- /* set LCB for cclk loopback on the port */
- write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
- write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
- write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
- write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
- write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
- write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
- write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
-}
-
-int bringup_serdes(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 guid;
- int ret;
-
- if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
- add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
-
- guid = ppd->guid;
- if (!guid) {
- if (dd->base_guid)
- guid = dd->base_guid + ppd->port - 1;
- ppd->guid = guid;
- }
-
- /* Set linkinit_reason on power up per OPA spec */
- ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
-
- /* one-time init of the LCB */
- init_lcb(dd);
-
- if (loopback) {
- ret = init_loopback(dd);
- if (ret < 0)
- return ret;
- }
-
- /* tune the SERDES to a ballpark setting for
- * optimal signal and bit error rate
- * Needs to be done before starting the link
- */
- tune_serdes(ppd);
-
- return start_link(ppd);
-}
-
-void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- /*
- * Shut down the link and keep it down. First turn off that the
- * driver wants to allow the link to be up (driver_link_ready).
- * Then make sure the link is not automatically restarted
- * (link_enabled). Cancel any pending restart. And finally
- * go offline.
- */
- ppd->driver_link_ready = 0;
- ppd->link_enabled = 0;
-
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
- OPA_LINKDOWN_REASON_SMA_DISABLED);
- set_link_state(ppd, HLS_DN_OFFLINE);
-
- /* disable the port */
- clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
-}
-
-static inline int init_cpu_counters(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd;
- int i;
-
- ppd = (struct hfi1_pportdata *)(dd + 1);
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- ppd->ibport_data.rvp.rc_acks = NULL;
- ppd->ibport_data.rvp.rc_qacks = NULL;
- ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
- ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
- ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
- if (!ppd->ibport_data.rvp.rc_acks ||
- !ppd->ibport_data.rvp.rc_delayed_comp ||
- !ppd->ibport_data.rvp.rc_qacks)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static const char * const pt_names[] = {
- "expected",
- "eager",
- "invalid"
-};
-
-static const char *pt_name(u32 type)
-{
- return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
-}
-
-/*
- * index is the index into the receive array
- */
-void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
- u32 type, unsigned long pa, u16 order)
-{
- u64 reg;
- void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
- (dd->kregbase + RCV_ARRAY));
-
- if (!(dd->flags & HFI1_PRESENT))
- goto done;
-
- if (type == PT_INVALID) {
- pa = 0;
- } else if (type > PT_INVALID) {
- dd_dev_err(dd,
- "unexpected receive array type %u for index %u, not handled\n",
- type, index);
- goto done;
- }
-
- hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
- pt_name(type), index, pa, (unsigned long)order);
-
-#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
- reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
- | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
- | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
- << RCV_ARRAY_RT_ADDR_SHIFT;
- writeq(reg, base + (index * 8));
-
- if (type == PT_EAGER)
- /*
- * Eager entries are written one-by-one so we have to push them
- * after we write the entry.
- */
- flush_wc();
-done:
- return;
-}
-
-void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- u32 i;
-
- /* this could be optimized */
- for (i = rcd->eager_base; i < rcd->eager_base +
- rcd->egrbufs.alloced; i++)
- hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
-
- for (i = rcd->expected_base;
- i < rcd->expected_base + rcd->expected_count; i++)
- hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
-}
-
-int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
- struct hfi1_ctxt_info *kinfo)
-{
- kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
- HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
- return 0;
-}
-
-struct hfi1_message_header *hfi1_get_msgheader(
- struct hfi1_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
-
- return (struct hfi1_message_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
-static const char * const ib_cfg_name_strings[] = {
- "HFI1_IB_CFG_LIDLMC",
- "HFI1_IB_CFG_LWID_DG_ENB",
- "HFI1_IB_CFG_LWID_ENB",
- "HFI1_IB_CFG_LWID",
- "HFI1_IB_CFG_SPD_ENB",
- "HFI1_IB_CFG_SPD",
- "HFI1_IB_CFG_RXPOL_ENB",
- "HFI1_IB_CFG_LREV_ENB",
- "HFI1_IB_CFG_LINKLATENCY",
- "HFI1_IB_CFG_HRTBT",
- "HFI1_IB_CFG_OP_VLS",
- "HFI1_IB_CFG_VL_HIGH_CAP",
- "HFI1_IB_CFG_VL_LOW_CAP",
- "HFI1_IB_CFG_OVERRUN_THRESH",
- "HFI1_IB_CFG_PHYERR_THRESH",
- "HFI1_IB_CFG_LINKDEFAULT",
- "HFI1_IB_CFG_PKEYS",
- "HFI1_IB_CFG_MTU",
- "HFI1_IB_CFG_LSTATE",
- "HFI1_IB_CFG_VL_HIGH_LIMIT",
- "HFI1_IB_CFG_PMA_TICKS",
- "HFI1_IB_CFG_PORT"
-};
-
-static const char *ib_cfg_name(int which)
-{
- if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
- return "invalid";
- return ib_cfg_name_strings[which];
-}
-
-int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
-{
- struct hfi1_devdata *dd = ppd->dd;
- int val = 0;
-
- switch (which) {
- case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
- val = ppd->link_width_enabled;
- break;
- case HFI1_IB_CFG_LWID: /* currently active Link-width */
- val = ppd->link_width_active;
- break;
- case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
- val = ppd->link_speed_enabled;
- break;
- case HFI1_IB_CFG_SPD: /* current Link speed */
- val = ppd->link_speed_active;
- break;
-
- case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
- case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
- case HFI1_IB_CFG_LINKLATENCY:
- goto unimplemented;
-
- case HFI1_IB_CFG_OP_VLS:
- val = ppd->vls_operational;
- break;
- case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
- val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
- break;
- case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
- val = VL_ARB_LOW_PRIO_TABLE_SIZE;
- break;
- case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- val = ppd->overrun_threshold;
- break;
- case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- val = ppd->phy_error_threshold;
- break;
- case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- val = dd->link_default;
- break;
-
- case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
- case HFI1_IB_CFG_PMA_TICKS:
- default:
-unimplemented:
- if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
- dd_dev_info(
- dd,
- "%s: which %s: not implemented\n",
- __func__,
- ib_cfg_name(which));
- break;
- }
-
- return val;
-}
-
-/*
- * The largest MAD packet size.
- */
-#define MAX_MAD_PACKET 2048
-
-/*
- * Return the maximum header bytes that can go on the _wire_
- * for this device. This count includes the ICRC which is
- * not part of the packet held in memory but it is appended
- * by the HW.
- * This is dependent on the device's receive header entry size.
- * HFI allows this to be set per-receive context, but the
- * driver presently enforces a global value.
- */
-u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
-{
- /*
- * The maximum non-payload (MTU) bytes in LRH.PktLen are
- * the Receive Header Entry Size minus the PBC (or RHF) size
- * plus one DW for the ICRC appended by HW.
- *
- * dd->rcd[0].rcvhdrqentsize is in DW.
- * We use rcd[0] as all context will have the same value. Also,
- * the first kernel context would have been allocated by now so
- * we are guaranteed a valid value.
- */
- return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
-}
-
-/*
- * Set Send Length
- * @ppd - per port data
- *
- * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
- * registers compare against LRH.PktLen, so use the max bytes included
- * in the LRH.
- *
- * This routine changes all VL values except VL15, which it maintains at
- * the same value.
- */
-static void set_send_length(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
- u32 maxvlmtu = dd->vld[15].mtu;
- u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
- & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
- SEND_LEN_CHECK1_LEN_VL15_SHIFT;
- int i;
-
- for (i = 0; i < ppd->vls_supported; i++) {
- if (dd->vld[i].mtu > maxvlmtu)
- maxvlmtu = dd->vld[i].mtu;
- if (i <= 3)
- len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
- & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
- ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
- else
- len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
- & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
- ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
- }
- write_csr(dd, SEND_LEN_CHECK0, len1);
- write_csr(dd, SEND_LEN_CHECK1, len2);
- /* adjust kernel credit return thresholds based on new MTUs */
- /* all kernel receive contexts have the same hdrqentsize */
- for (i = 0; i < ppd->vls_supported; i++) {
- sc_set_cr_threshold(dd->vld[i].sc,
- sc_mtu_to_threshold(dd->vld[i].sc,
- dd->vld[i].mtu,
- dd->rcd[0]->
- rcvhdrqentsize));
- }
- sc_set_cr_threshold(dd->vld[15].sc,
- sc_mtu_to_threshold(dd->vld[15].sc,
- dd->vld[15].mtu,
- dd->rcd[0]->rcvhdrqentsize));
-
- /* Adjust maximum MTU for the port in DC */
- dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
- (ilog2(maxvlmtu >> 8) + 1);
- len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
- len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
- len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
- DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
- write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
-}
-
-static void set_lidlmc(struct hfi1_pportdata *ppd)
-{
- int i;
- u64 sreg = 0;
- struct hfi1_devdata *dd = ppd->dd;
- u32 mask = ~((1U << ppd->lmc) - 1);
- u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
-
- if (dd->hfi1_snoop.mode_flag)
- dd_dev_info(dd, "Set lid/lmc while snooping");
-
- c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
- | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
- c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
- << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
- ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
- << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
- write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
-
- /*
- * Iterate over all the send contexts and set their SLID check
- */
- sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
- SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
- (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
- SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
-
- for (i = 0; i < dd->chip_send_contexts; i++) {
- hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
- i, (u32)sreg);
- write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
- }
-
- /* Now we have to do the same thing for the sdma engines */
- sdma_update_lmc(dd, mask, ppd->lid);
-}
-
-static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
-{
- unsigned long timeout;
- u32 curr_state;
-
- timeout = jiffies + msecs_to_jiffies(msecs);
- while (1) {
- curr_state = read_physical_state(dd);
- if (curr_state == state)
- break;
- if (time_after(jiffies, timeout)) {
- dd_dev_err(dd,
- "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
- state, curr_state);
- return -ETIMEDOUT;
- }
- usleep_range(1950, 2050); /* sleep 2ms-ish */
- }
-
- return 0;
-}
-
-/*
- * Helper for set_link_state(). Do not call except from that routine.
- * Expects ppd->hls_mutex to be held.
- *
- * @rem_reason value to be sent to the neighbor
- *
- * LinkDownReasons only set if transition succeeds.
- */
-static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u32 pstate, previous_state;
- u32 last_local_state;
- u32 last_remote_state;
- int ret;
- int do_transition;
- int do_wait;
-
- previous_state = ppd->host_link_state;
- ppd->host_link_state = HLS_GOING_OFFLINE;
- pstate = read_physical_state(dd);
- if (pstate == PLS_OFFLINE) {
- do_transition = 0; /* in right state */
- do_wait = 0; /* ...no need to wait */
- } else if ((pstate & 0xff) == PLS_OFFLINE) {
- do_transition = 0; /* in an offline transient state */
- do_wait = 1; /* ...wait for it to settle */
- } else {
- do_transition = 1; /* need to move to offline */
- do_wait = 1; /* ...will need to wait */
- }
-
- if (do_transition) {
- ret = set_physical_link_state(dd,
- (rem_reason << 8) | PLS_OFFLINE);
-
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to transition to Offline link state, return %d\n",
- ret);
- return -EINVAL;
- }
- if (ppd->offline_disabled_reason ==
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
- }
-
- if (do_wait) {
- /* it can take a while for the link to go down */
- ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
- if (ret < 0)
- return ret;
- }
-
- /* make sure the logical state is also down */
- wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
-
- /*
- * Now in charge of LCB - must be after the physical state is
- * offline.quiet and before host_link_state is changed.
- */
- set_host_lcb_access(dd);
- write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
- ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
-
- if (ppd->port_type == PORT_TYPE_QSFP &&
- ppd->qsfp_info.limiting_active &&
- qsfp_mod_present(ppd)) {
- int ret;
-
- ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
- if (ret == 0) {
- set_qsfp_tx(ppd, 0);
- release_chip_resource(dd, qsfp_resource(dd));
- } else {
- /* not fatal, but should warn */
- dd_dev_err(dd,
- "Unable to acquire lock to turn off QSFP TX\n");
- }
- }
-
- /*
- * The LNI has a mandatory wait time after the physical state
- * moves to Offline.Quiet. The wait time may be different
- * depending on how the link went down. The 8051 firmware
- * will observe the needed wait time and only move to ready
- * when that is completed. The largest of the quiet timeouts
- * is 6s, so wait that long and then at least 0.5s more for
- * other transitions, and another 0.5s for a buffer.
- */
- ret = wait_fm_ready(dd, 7000);
- if (ret) {
- dd_dev_err(dd,
- "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
- /* state is really offline, so make it so */
- ppd->host_link_state = HLS_DN_OFFLINE;
- return ret;
- }
-
- /*
- * The state is now offline and the 8051 is ready to accept host
- * requests.
- * - change our state
- * - notify others if we were previously in a linkup state
- */
- ppd->host_link_state = HLS_DN_OFFLINE;
- if (previous_state & HLS_UP) {
- /* went down while link was up */
- handle_linkup_change(dd, 0);
- } else if (previous_state
- & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
- /* went down while attempting link up */
- /* byte 1 of last_*_state is the failure reason */
- read_last_local_state(dd, &last_local_state);
- read_last_remote_state(dd, &last_remote_state);
- dd_dev_err(dd,
- "LNI failure last states: local 0x%08x, remote 0x%08x\n",
- last_local_state, last_remote_state);
- }
-
- /* the active link width (downgrade) is 0 on link down */
- ppd->link_width_active = 0;
- ppd->link_width_downgrade_tx_active = 0;
- ppd->link_width_downgrade_rx_active = 0;
- ppd->current_egress_rate = 0;
- return 0;
-}
-
-/* return the link state name */
-static const char *link_state_name(u32 state)
-{
- const char *name;
- int n = ilog2(state);
- static const char * const names[] = {
- [__HLS_UP_INIT_BP] = "INIT",
- [__HLS_UP_ARMED_BP] = "ARMED",
- [__HLS_UP_ACTIVE_BP] = "ACTIVE",
- [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
- [__HLS_DN_POLL_BP] = "POLL",
- [__HLS_DN_DISABLE_BP] = "DISABLE",
- [__HLS_DN_OFFLINE_BP] = "OFFLINE",
- [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
- [__HLS_GOING_UP_BP] = "GOING_UP",
- [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
- [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
- };
-
- name = n < ARRAY_SIZE(names) ? names[n] : NULL;
- return name ? name : "unknown";
-}
-
-/* return the link state reason name */
-static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
-{
- if (state == HLS_UP_INIT) {
- switch (ppd->linkinit_reason) {
- case OPA_LINKINIT_REASON_LINKUP:
- return "(LINKUP)";
- case OPA_LINKINIT_REASON_FLAPPING:
- return "(FLAPPING)";
- case OPA_LINKINIT_OUTSIDE_POLICY:
- return "(OUTSIDE_POLICY)";
- case OPA_LINKINIT_QUARANTINED:
- return "(QUARANTINED)";
- case OPA_LINKINIT_INSUFIC_CAPABILITY:
- return "(INSUFIC_CAPABILITY)";
- default:
- break;
- }
- }
- return "";
-}
-
-/*
- * driver_physical_state - convert the driver's notion of a port's
- * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
- * Return -1 (converted to a u32) to indicate error.
- */
-u32 driver_physical_state(struct hfi1_pportdata *ppd)
-{
- switch (ppd->host_link_state) {
- case HLS_UP_INIT:
- case HLS_UP_ARMED:
- case HLS_UP_ACTIVE:
- return IB_PORTPHYSSTATE_LINKUP;
- case HLS_DN_POLL:
- return IB_PORTPHYSSTATE_POLLING;
- case HLS_DN_DISABLE:
- return IB_PORTPHYSSTATE_DISABLED;
- case HLS_DN_OFFLINE:
- return OPA_PORTPHYSSTATE_OFFLINE;
- case HLS_VERIFY_CAP:
- return IB_PORTPHYSSTATE_POLLING;
- case HLS_GOING_UP:
- return IB_PORTPHYSSTATE_POLLING;
- case HLS_GOING_OFFLINE:
- return OPA_PORTPHYSSTATE_OFFLINE;
- case HLS_LINK_COOLDOWN:
- return OPA_PORTPHYSSTATE_OFFLINE;
- case HLS_DN_DOWNDEF:
- default:
- dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
- ppd->host_link_state);
- return -1;
- }
-}
-
-/*
- * driver_logical_state - convert the driver's notion of a port's
- * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
- * (converted to a u32) to indicate error.
- */
-u32 driver_logical_state(struct hfi1_pportdata *ppd)
-{
- if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
- return IB_PORT_DOWN;
-
- switch (ppd->host_link_state & HLS_UP) {
- case HLS_UP_INIT:
- return IB_PORT_INIT;
- case HLS_UP_ARMED:
- return IB_PORT_ARMED;
- case HLS_UP_ACTIVE:
- return IB_PORT_ACTIVE;
- default:
- dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
- ppd->host_link_state);
- return -1;
- }
-}
-
-void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
- u8 neigh_reason, u8 rem_reason)
-{
- if (ppd->local_link_down_reason.latest == 0 &&
- ppd->neigh_link_down_reason.latest == 0) {
- ppd->local_link_down_reason.latest = lcl_reason;
- ppd->neigh_link_down_reason.latest = neigh_reason;
- ppd->remote_link_down_reason = rem_reason;
- }
-}
-
-/*
- * Change the physical and/or logical link state.
- *
- * Do not call this routine while inside an interrupt. It contains
- * calls to routines that can take multiple seconds to finish.
- *
- * Returns 0 on success, -errno on failure.
- */
-int set_link_state(struct hfi1_pportdata *ppd, u32 state)
-{
- struct hfi1_devdata *dd = ppd->dd;
- struct ib_event event = {.device = NULL};
- int ret1, ret = 0;
- int was_up, is_down;
- int orig_new_state, poll_bounce;
-
- mutex_lock(&ppd->hls_lock);
-
- orig_new_state = state;
- if (state == HLS_DN_DOWNDEF)
- state = dd->link_default;
-
- /* interpret poll -> poll as a link bounce */
- poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
- state == HLS_DN_POLL;
-
- dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
- link_state_name(ppd->host_link_state),
- link_state_name(orig_new_state),
- poll_bounce ? "(bounce) " : "",
- link_state_reason_name(ppd, state));
-
- was_up = !!(ppd->host_link_state & HLS_UP);
-
- /*
- * If we're going to a (HLS_*) link state that implies the logical
- * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
- * reset is_sm_config_started to 0.
- */
- if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
- ppd->is_sm_config_started = 0;
-
- /*
- * Do nothing if the states match. Let a poll to poll link bounce
- * go through.
- */
- if (ppd->host_link_state == state && !poll_bounce)
- goto done;
-
- switch (state) {
- case HLS_UP_INIT:
- if (ppd->host_link_state == HLS_DN_POLL &&
- (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
- /*
- * Quick link up jumps from polling to here.
- *
- * Whether in normal or loopback mode, the
- * simulator jumps from polling to link up.
- * Accept that here.
- */
- /* OK */
- } else if (ppd->host_link_state != HLS_GOING_UP) {
- goto unexpected;
- }
-
- ppd->host_link_state = HLS_UP_INIT;
- ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
- if (ret) {
- /* logical state didn't change, stay at going_up */
- ppd->host_link_state = HLS_GOING_UP;
- dd_dev_err(dd,
- "%s: logical state did not change to INIT\n",
- __func__);
- } else {
- /* clear old transient LINKINIT_REASON code */
- if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
- ppd->linkinit_reason =
- OPA_LINKINIT_REASON_LINKUP;
-
- /* enable the port */
- add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
-
- handle_linkup_change(dd, 1);
- }
- break;
- case HLS_UP_ARMED:
- if (ppd->host_link_state != HLS_UP_INIT)
- goto unexpected;
-
- ppd->host_link_state = HLS_UP_ARMED;
- set_logical_state(dd, LSTATE_ARMED);
- ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
- if (ret) {
- /* logical state didn't change, stay at init */
- ppd->host_link_state = HLS_UP_INIT;
- dd_dev_err(dd,
- "%s: logical state did not change to ARMED\n",
- __func__);
- }
- /*
- * The simulator does not currently implement SMA messages,
- * so neighbor_normal is not set. Set it here when we first
- * move to Armed.
- */
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
- ppd->neighbor_normal = 1;
- break;
- case HLS_UP_ACTIVE:
- if (ppd->host_link_state != HLS_UP_ARMED)
- goto unexpected;
-
- ppd->host_link_state = HLS_UP_ACTIVE;
- set_logical_state(dd, LSTATE_ACTIVE);
- ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
- if (ret) {
- /* logical state didn't change, stay at armed */
- ppd->host_link_state = HLS_UP_ARMED;
- dd_dev_err(dd,
- "%s: logical state did not change to ACTIVE\n",
- __func__);
- } else {
- /* tell all engines to go running */
- sdma_all_running(dd);
-
- /* Signal the IB layer that the port has went active */
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = ppd->port;
- event.event = IB_EVENT_PORT_ACTIVE;
- }
- break;
- case HLS_DN_POLL:
- if ((ppd->host_link_state == HLS_DN_DISABLE ||
- ppd->host_link_state == HLS_DN_OFFLINE) &&
- dd->dc_shutdown)
- dc_start(dd);
- /* Hand LED control to the DC */
- write_csr(dd, DCC_CFG_LED_CNTRL, 0);
-
- if (ppd->host_link_state != HLS_DN_OFFLINE) {
- u8 tmp = ppd->link_enabled;
-
- ret = goto_offline(ppd, ppd->remote_link_down_reason);
- if (ret) {
- ppd->link_enabled = tmp;
- break;
- }
- ppd->remote_link_down_reason = 0;
-
- if (ppd->driver_link_ready)
- ppd->link_enabled = 1;
- }
-
- set_all_slowpath(ppd->dd);
- ret = set_local_link_attributes(ppd);
- if (ret)
- break;
-
- ppd->port_error_action = 0;
- ppd->host_link_state = HLS_DN_POLL;
-
- if (quick_linkup) {
- /* quick linkup does not go into polling */
- ret = do_quick_linkup(dd);
- } else {
- ret1 = set_physical_link_state(dd, PLS_POLLING);
- if (ret1 != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to transition to Polling link state, return 0x%x\n",
- ret1);
- ret = -EINVAL;
- }
- }
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
- /*
- * If an error occurred above, go back to offline. The
- * caller may reschedule another attempt.
- */
- if (ret)
- goto_offline(ppd, 0);
- break;
- case HLS_DN_DISABLE:
- /* link is disabled */
- ppd->link_enabled = 0;
-
- /* allow any state to transition to disabled */
-
- /* must transition to offline first */
- if (ppd->host_link_state != HLS_DN_OFFLINE) {
- ret = goto_offline(ppd, ppd->remote_link_down_reason);
- if (ret)
- break;
- ppd->remote_link_down_reason = 0;
- }
-
- ret1 = set_physical_link_state(dd, PLS_DISABLED);
- if (ret1 != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to transition to Disabled link state, return 0x%x\n",
- ret1);
- ret = -EINVAL;
- break;
- }
- ppd->host_link_state = HLS_DN_DISABLE;
- dc_shutdown(dd);
- break;
- case HLS_DN_OFFLINE:
- if (ppd->host_link_state == HLS_DN_DISABLE)
- dc_start(dd);
-
- /* allow any state to transition to offline */
- ret = goto_offline(ppd, ppd->remote_link_down_reason);
- if (!ret)
- ppd->remote_link_down_reason = 0;
- break;
- case HLS_VERIFY_CAP:
- if (ppd->host_link_state != HLS_DN_POLL)
- goto unexpected;
- ppd->host_link_state = HLS_VERIFY_CAP;
- break;
- case HLS_GOING_UP:
- if (ppd->host_link_state != HLS_VERIFY_CAP)
- goto unexpected;
-
- ret1 = set_physical_link_state(dd, PLS_LINKUP);
- if (ret1 != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to transition to link up state, return 0x%x\n",
- ret1);
- ret = -EINVAL;
- break;
- }
- ppd->host_link_state = HLS_GOING_UP;
- break;
-
- case HLS_GOING_OFFLINE: /* transient within goto_offline() */
- case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
- default:
- dd_dev_info(dd, "%s: state 0x%x: not supported\n",
- __func__, state);
- ret = -EINVAL;
- break;
- }
-
- is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
- HLS_DN_DISABLE | HLS_DN_OFFLINE));
-
- if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
- ppd->neigh_link_down_reason.sma == 0) {
- ppd->local_link_down_reason.sma =
- ppd->local_link_down_reason.latest;
- ppd->neigh_link_down_reason.sma =
- ppd->neigh_link_down_reason.latest;
- }
-
- goto done;
-
-unexpected:
- dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
- __func__, link_state_name(ppd->host_link_state),
- link_state_name(state));
- ret = -EINVAL;
-
-done:
- mutex_unlock(&ppd->hls_lock);
-
- if (event.device)
- ib_dispatch_event(&event);
-
- return ret;
-}
-
-int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
-{
- u64 reg;
- int ret = 0;
-
- switch (which) {
- case HFI1_IB_CFG_LIDLMC:
- set_lidlmc(ppd);
- break;
- case HFI1_IB_CFG_VL_HIGH_LIMIT:
- /*
- * The VL Arbitrator high limit is sent in units of 4k
- * bytes, while HFI stores it in units of 64 bytes.
- */
- val *= 4096 / 64;
- reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
- << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
- write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
- break;
- case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* HFI only supports POLL as the default link down state */
- if (val != HLS_DN_POLL)
- ret = -EINVAL;
- break;
- case HFI1_IB_CFG_OP_VLS:
- if (ppd->vls_operational != val) {
- ppd->vls_operational = val;
- if (!ppd->port)
- ret = -EINVAL;
- }
- break;
- /*
- * For link width, link width downgrade, and speed enable, always AND
- * the setting with what is actually supported. This has two benefits.
- * First, enabled can't have unsupported values, no matter what the
- * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
- * "fill in with your supported value" have all the bits in the
- * field set, so simply ANDing with supported has the desired result.
- */
- case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
- ppd->link_width_enabled = val & ppd->link_width_supported;
- break;
- case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
- ppd->link_width_downgrade_enabled =
- val & ppd->link_width_downgrade_supported;
- break;
- case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
- ppd->link_speed_enabled = val & ppd->link_speed_supported;
- break;
- case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- /*
- * HFI does not follow IB specs, save this value
- * so we can report it, if asked.
- */
- ppd->overrun_threshold = val;
- break;
- case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- /*
- * HFI does not follow IB specs, save this value
- * so we can report it, if asked.
- */
- ppd->phy_error_threshold = val;
- break;
-
- case HFI1_IB_CFG_MTU:
- set_send_length(ppd);
- break;
-
- case HFI1_IB_CFG_PKEYS:
- if (HFI1_CAP_IS_KSET(PKEY_CHECK))
- set_partition_keys(ppd);
- break;
-
- default:
- if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
- dd_dev_info(ppd->dd,
- "%s: which %s, val 0x%x: not implemented\n",
- __func__, ib_cfg_name(which), val);
- break;
- }
- return ret;
-}
-
-/* begin functions related to vl arbitration table caching */
-static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
-{
- int i;
-
- BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
- VL_ARB_LOW_PRIO_TABLE_SIZE);
- BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
- VL_ARB_HIGH_PRIO_TABLE_SIZE);
-
- /*
- * Note that we always return values directly from the
- * 'vl_arb_cache' (and do no CSR reads) in response to a
- * 'Get(VLArbTable)'. This is obviously correct after a
- * 'Set(VLArbTable)', since the cache will then be up to
- * date. But it's also correct prior to any 'Set(VLArbTable)'
- * since then both the cache, and the relevant h/w registers
- * will be zeroed.
- */
-
- for (i = 0; i < MAX_PRIO_TABLE; i++)
- spin_lock_init(&ppd->vl_arb_cache[i].lock);
-}
-
-/*
- * vl_arb_lock_cache
- *
- * All other vl_arb_* functions should be called only after locking
- * the cache.
- */
-static inline struct vl_arb_cache *
-vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
-{
- if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
- return NULL;
- spin_lock(&ppd->vl_arb_cache[idx].lock);
- return &ppd->vl_arb_cache[idx];
-}
-
-static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
-{
- spin_unlock(&ppd->vl_arb_cache[idx].lock);
-}
-
-static void vl_arb_get_cache(struct vl_arb_cache *cache,
- struct ib_vl_weight_elem *vl)
-{
- memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
-}
-
-static void vl_arb_set_cache(struct vl_arb_cache *cache,
- struct ib_vl_weight_elem *vl)
-{
- memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
-}
-
-static int vl_arb_match_cache(struct vl_arb_cache *cache,
- struct ib_vl_weight_elem *vl)
-{
- return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
-}
-
-/* end functions related to vl arbitration table caching */
-
-static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
- u32 size, struct ib_vl_weight_elem *vl)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 reg;
- unsigned int i, is_up = 0;
- int drain, ret = 0;
-
- mutex_lock(&ppd->hls_lock);
-
- if (ppd->host_link_state & HLS_UP)
- is_up = 1;
-
- drain = !is_ax(dd) && is_up;
-
- if (drain)
- /*
- * Before adjusting VL arbitration weights, empty per-VL
- * FIFOs, otherwise a packet whose VL weight is being
- * set to 0 could get stuck in a FIFO with no chance to
- * egress.
- */
- ret = stop_drain_data_vls(dd);
-
- if (ret) {
- dd_dev_err(
- dd,
- "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
- __func__);
- goto err;
- }
-
- for (i = 0; i < size; i++, vl++) {
- /*
- * NOTE: The low priority shift and mask are used here, but
- * they are the same for both the low and high registers.
- */
- reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
- << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
- | (((u64)vl->weight
- & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
- << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
- write_csr(dd, target + (i * 8), reg);
- }
- pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
-
- if (drain)
- open_fill_data_vls(dd); /* reopen all VLs */
-
-err:
- mutex_unlock(&ppd->hls_lock);
-
- return ret;
-}
-
-/*
- * Read one credit merge VL register.
- */
-static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
- struct vl_limit *vll)
-{
- u64 reg = read_csr(dd, csr);
-
- vll->dedicated = cpu_to_be16(
- (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
- & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
- vll->shared = cpu_to_be16(
- (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
- & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
-}
-
-/*
- * Read the current credit merge limits.
- */
-static int get_buffer_control(struct hfi1_devdata *dd,
- struct buffer_control *bc, u16 *overall_limit)
-{
- u64 reg;
- int i;
-
- /* not all entries are filled in */
- memset(bc, 0, sizeof(*bc));
-
- /* OPA and HFI have a 1-1 mapping */
- for (i = 0; i < TXE_NUM_DATA_VL; i++)
- read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
-
- /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
- read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
-
- reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
- bc->overall_shared_limit = cpu_to_be16(
- (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
- & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
- if (overall_limit)
- *overall_limit = (reg
- >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
- & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
- return sizeof(struct buffer_control);
-}
-
-static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
-{
- u64 reg;
- int i;
-
- /* each register contains 16 SC->VLnt mappings, 4 bits each */
- reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
- for (i = 0; i < sizeof(u64); i++) {
- u8 byte = *(((u8 *)&reg) + i);
-
- dp->vlnt[2 * i] = byte & 0xf;
- dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
- }
-
- reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
- for (i = 0; i < sizeof(u64); i++) {
- u8 byte = *(((u8 *)&reg) + i);
-
- dp->vlnt[16 + (2 * i)] = byte & 0xf;
- dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
- }
- return sizeof(struct sc2vlnt);
-}
-
-static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
- struct ib_vl_weight_elem *vl)
-{
- unsigned int i;
-
- for (i = 0; i < nelems; i++, vl++) {
- vl->vl = 0xf;
- vl->weight = 0;
- }
-}
-
-static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
-{
- write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
- DC_SC_VL_VAL(15_0,
- 0, dp->vlnt[0] & 0xf,
- 1, dp->vlnt[1] & 0xf,
- 2, dp->vlnt[2] & 0xf,
- 3, dp->vlnt[3] & 0xf,
- 4, dp->vlnt[4] & 0xf,
- 5, dp->vlnt[5] & 0xf,
- 6, dp->vlnt[6] & 0xf,
- 7, dp->vlnt[7] & 0xf,
- 8, dp->vlnt[8] & 0xf,
- 9, dp->vlnt[9] & 0xf,
- 10, dp->vlnt[10] & 0xf,
- 11, dp->vlnt[11] & 0xf,
- 12, dp->vlnt[12] & 0xf,
- 13, dp->vlnt[13] & 0xf,
- 14, dp->vlnt[14] & 0xf,
- 15, dp->vlnt[15] & 0xf));
- write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
- DC_SC_VL_VAL(31_16,
- 16, dp->vlnt[16] & 0xf,
- 17, dp->vlnt[17] & 0xf,
- 18, dp->vlnt[18] & 0xf,
- 19, dp->vlnt[19] & 0xf,
- 20, dp->vlnt[20] & 0xf,
- 21, dp->vlnt[21] & 0xf,
- 22, dp->vlnt[22] & 0xf,
- 23, dp->vlnt[23] & 0xf,
- 24, dp->vlnt[24] & 0xf,
- 25, dp->vlnt[25] & 0xf,
- 26, dp->vlnt[26] & 0xf,
- 27, dp->vlnt[27] & 0xf,
- 28, dp->vlnt[28] & 0xf,
- 29, dp->vlnt[29] & 0xf,
- 30, dp->vlnt[30] & 0xf,
- 31, dp->vlnt[31] & 0xf));
-}
-
-static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
- u16 limit)
-{
- if (limit != 0)
- dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
- what, (int)limit, idx);
-}
-
-/* change only the shared limit portion of SendCmGLobalCredit */
-static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
-{
- u64 reg;
-
- reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
- reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
- reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
- write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
-}
-
-/* change only the total credit limit portion of SendCmGLobalCredit */
-static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
-{
- u64 reg;
-
- reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
- reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
- reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
- write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
-}
-
-/* set the given per-VL shared limit */
-static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
-{
- u64 reg;
- u32 addr;
-
- if (vl < TXE_NUM_DATA_VL)
- addr = SEND_CM_CREDIT_VL + (8 * vl);
- else
- addr = SEND_CM_CREDIT_VL15;
-
- reg = read_csr(dd, addr);
- reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
- reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
- write_csr(dd, addr, reg);
-}
-
-/* set the given per-VL dedicated limit */
-static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
-{
- u64 reg;
- u32 addr;
-
- if (vl < TXE_NUM_DATA_VL)
- addr = SEND_CM_CREDIT_VL + (8 * vl);
- else
- addr = SEND_CM_CREDIT_VL15;
-
- reg = read_csr(dd, addr);
- reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
- reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
- write_csr(dd, addr, reg);
-}
-
-/* spin until the given per-VL status mask bits clear */
-static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
- const char *which)
-{
- unsigned long timeout;
- u64 reg;
-
- timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
- while (1) {
- reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
-
- if (reg == 0)
- return; /* success */
- if (time_after(jiffies, timeout))
- break; /* timed out */
- udelay(1);
- }
-
- dd_dev_err(dd,
- "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
- which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
- /*
- * If this occurs, it is likely there was a credit loss on the link.
- * The only recovery from that is a link bounce.
- */
- dd_dev_err(dd,
- "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
-}
-
-/*
- * The number of credits on the VLs may be changed while everything
- * is "live", but the following algorithm must be followed due to
- * how the hardware is actually implemented. In particular,
- * Return_Credit_Status[] is the only correct status check.
- *
- * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
- * set Global_Shared_Credit_Limit = 0
- * use_all_vl = 1
- * mask0 = all VLs that are changing either dedicated or shared limits
- * set Shared_Limit[mask0] = 0
- * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
- * if (changing any dedicated limit)
- * mask1 = all VLs that are lowering dedicated limits
- * lower Dedicated_Limit[mask1]
- * spin until Return_Credit_Status[mask1] == 0
- * raise Dedicated_Limits
- * raise Shared_Limits
- * raise Global_Shared_Credit_Limit
- *
- * lower = if the new limit is lower, set the limit to the new value
- * raise = if the new limit is higher than the current value (may be changed
- * earlier in the algorithm), set the new limit to the new value
- */
-int set_buffer_control(struct hfi1_pportdata *ppd,
- struct buffer_control *new_bc)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 changing_mask, ld_mask, stat_mask;
- int change_count;
- int i, use_all_mask;
- int this_shared_changing;
- int vl_count = 0, ret;
- /*
- * A0: add the variable any_shared_limit_changing below and in the
- * algorithm above. If removing A0 support, it can be removed.
- */
- int any_shared_limit_changing;
- struct buffer_control cur_bc;
- u8 changing[OPA_MAX_VLS];
- u8 lowering_dedicated[OPA_MAX_VLS];
- u16 cur_total;
- u32 new_total = 0;
- const u64 all_mask =
- SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
- | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
-
-#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
-#define NUM_USABLE_VLS 16 /* look at VL15 and less */
-
- /* find the new total credits, do sanity check on unused VLs */
- for (i = 0; i < OPA_MAX_VLS; i++) {
- if (valid_vl(i)) {
- new_total += be16_to_cpu(new_bc->vl[i].dedicated);
- continue;
- }
- nonzero_msg(dd, i, "dedicated",
- be16_to_cpu(new_bc->vl[i].dedicated));
- nonzero_msg(dd, i, "shared",
- be16_to_cpu(new_bc->vl[i].shared));
- new_bc->vl[i].dedicated = 0;
- new_bc->vl[i].shared = 0;
- }
- new_total += be16_to_cpu(new_bc->overall_shared_limit);
-
- /* fetch the current values */
- get_buffer_control(dd, &cur_bc, &cur_total);
-
- /*
- * Create the masks we will use.
- */
- memset(changing, 0, sizeof(changing));
- memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
- /*
- * NOTE: Assumes that the individual VL bits are adjacent and in
- * increasing order
- */
- stat_mask =
- SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
- changing_mask = 0;
- ld_mask = 0;
- change_count = 0;
- any_shared_limit_changing = 0;
- for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
- if (!valid_vl(i))
- continue;
- this_shared_changing = new_bc->vl[i].shared
- != cur_bc.vl[i].shared;
- if (this_shared_changing)
- any_shared_limit_changing = 1;
- if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
- this_shared_changing) {
- changing[i] = 1;
- changing_mask |= stat_mask;
- change_count++;
- }
- if (be16_to_cpu(new_bc->vl[i].dedicated) <
- be16_to_cpu(cur_bc.vl[i].dedicated)) {
- lowering_dedicated[i] = 1;
- ld_mask |= stat_mask;
- }
- }
-
- /* bracket the credit change with a total adjustment */
- if (new_total > cur_total)
- set_global_limit(dd, new_total);
-
- /*
- * Start the credit change algorithm.
- */
- use_all_mask = 0;
- if ((be16_to_cpu(new_bc->overall_shared_limit) <
- be16_to_cpu(cur_bc.overall_shared_limit)) ||
- (is_ax(dd) && any_shared_limit_changing)) {
- set_global_shared(dd, 0);
- cur_bc.overall_shared_limit = 0;
- use_all_mask = 1;
- }
-
- for (i = 0; i < NUM_USABLE_VLS; i++) {
- if (!valid_vl(i))
- continue;
-
- if (changing[i]) {
- set_vl_shared(dd, i, 0);
- cur_bc.vl[i].shared = 0;
- }
- }
-
- wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
- "shared");
-
- if (change_count > 0) {
- for (i = 0; i < NUM_USABLE_VLS; i++) {
- if (!valid_vl(i))
- continue;
-
- if (lowering_dedicated[i]) {
- set_vl_dedicated(dd, i,
- be16_to_cpu(new_bc->
- vl[i].dedicated));
- cur_bc.vl[i].dedicated =
- new_bc->vl[i].dedicated;
- }
- }
-
- wait_for_vl_status_clear(dd, ld_mask, "dedicated");
-
- /* now raise all dedicated that are going up */
- for (i = 0; i < NUM_USABLE_VLS; i++) {
- if (!valid_vl(i))
- continue;
-
- if (be16_to_cpu(new_bc->vl[i].dedicated) >
- be16_to_cpu(cur_bc.vl[i].dedicated))
- set_vl_dedicated(dd, i,
- be16_to_cpu(new_bc->
- vl[i].dedicated));
- }
- }
-
- /* next raise all shared that are going up */
- for (i = 0; i < NUM_USABLE_VLS; i++) {
- if (!valid_vl(i))
- continue;
-
- if (be16_to_cpu(new_bc->vl[i].shared) >
- be16_to_cpu(cur_bc.vl[i].shared))
- set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
- }
-
- /* finally raise the global shared */
- if (be16_to_cpu(new_bc->overall_shared_limit) >
- be16_to_cpu(cur_bc.overall_shared_limit))
- set_global_shared(dd,
- be16_to_cpu(new_bc->overall_shared_limit));
-
- /* bracket the credit change with a total adjustment */
- if (new_total < cur_total)
- set_global_limit(dd, new_total);
-
- /*
- * Determine the actual number of operational VLS using the number of
- * dedicated and shared credits for each VL.
- */
- if (change_count > 0) {
- for (i = 0; i < TXE_NUM_DATA_VL; i++)
- if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
- be16_to_cpu(new_bc->vl[i].shared) > 0)
- vl_count++;
- ppd->actual_vls_operational = vl_count;
- ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
- ppd->actual_vls_operational :
- ppd->vls_operational,
- NULL);
- if (ret == 0)
- ret = pio_map_init(dd, ppd->port - 1, vl_count ?
- ppd->actual_vls_operational :
- ppd->vls_operational, NULL);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/*
- * Read the given fabric manager table. Return the size of the
- * table (in bytes) on success, and a negative error code on
- * failure.
- */
-int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
-
-{
- int size;
- struct vl_arb_cache *vlc;
-
- switch (which) {
- case FM_TBL_VL_HIGH_ARB:
- size = 256;
- /*
- * OPA specifies 128 elements (of 2 bytes each), though
- * HFI supports only 16 elements in h/w.
- */
- vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
- vl_arb_get_cache(vlc, t);
- vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
- break;
- case FM_TBL_VL_LOW_ARB:
- size = 256;
- /*
- * OPA specifies 128 elements (of 2 bytes each), though
- * HFI supports only 16 elements in h/w.
- */
- vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
- vl_arb_get_cache(vlc, t);
- vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
- break;
- case FM_TBL_BUFFER_CONTROL:
- size = get_buffer_control(ppd->dd, t, NULL);
- break;
- case FM_TBL_SC2VLNT:
- size = get_sc2vlnt(ppd->dd, t);
- break;
- case FM_TBL_VL_PREEMPT_ELEMS:
- size = 256;
- /* OPA specifies 128 elements, of 2 bytes each */
- get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
- break;
- case FM_TBL_VL_PREEMPT_MATRIX:
- size = 256;
- /*
- * OPA specifies that this is the same size as the VL
- * arbitration tables (i.e., 256 bytes).
- */
- break;
- default:
- return -EINVAL;
- }
- return size;
-}
-
-/*
- * Write the given fabric manager table.
- */
-int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
-{
- int ret = 0;
- struct vl_arb_cache *vlc;
-
- switch (which) {
- case FM_TBL_VL_HIGH_ARB:
- vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
- if (vl_arb_match_cache(vlc, t)) {
- vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
- break;
- }
- vl_arb_set_cache(vlc, t);
- vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
- ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
- VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
- break;
- case FM_TBL_VL_LOW_ARB:
- vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
- if (vl_arb_match_cache(vlc, t)) {
- vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
- break;
- }
- vl_arb_set_cache(vlc, t);
- vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
- ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
- VL_ARB_LOW_PRIO_TABLE_SIZE, t);
- break;
- case FM_TBL_BUFFER_CONTROL:
- ret = set_buffer_control(ppd, t);
- break;
- case FM_TBL_SC2VLNT:
- set_sc2vlnt(ppd->dd, t);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-/*
- * Disable all data VLs.
- *
- * Return 0 if disabled, non-zero if the VLs cannot be disabled.
- */
-static int disable_data_vls(struct hfi1_devdata *dd)
-{
- if (is_ax(dd))
- return 1;
-
- pio_send_control(dd, PSC_DATA_VL_DISABLE);
-
- return 0;
-}
-
-/*
- * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
- * Just re-enables all data VLs (the "fill" part happens
- * automatically - the name was chosen for symmetry with
- * stop_drain_data_vls()).
- *
- * Return 0 if successful, non-zero if the VLs cannot be enabled.
- */
-int open_fill_data_vls(struct hfi1_devdata *dd)
-{
- if (is_ax(dd))
- return 1;
-
- pio_send_control(dd, PSC_DATA_VL_ENABLE);
-
- return 0;
-}
-
-/*
- * drain_data_vls() - assumes that disable_data_vls() has been called,
- * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
- * engines to drop to 0.
- */
-static void drain_data_vls(struct hfi1_devdata *dd)
-{
- sc_wait(dd);
- sdma_wait(dd);
- pause_for_credit_return(dd);
-}
-
-/*
- * stop_drain_data_vls() - disable, then drain all per-VL fifos.
- *
- * Use open_fill_data_vls() to resume using data VLs. This pair is
- * meant to be used like this:
- *
- * stop_drain_data_vls(dd);
- * // do things with per-VL resources
- * open_fill_data_vls(dd);
- */
-int stop_drain_data_vls(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = disable_data_vls(dd);
- if (ret == 0)
- drain_data_vls(dd);
-
- return ret;
-}
-
-/*
- * Convert a nanosecond time to a cclock count. No matter how slow
- * the cclock, a non-zero ns will always have a non-zero result.
- */
-u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
-{
- u32 cclocks;
-
- if (dd->icode == ICODE_FPGA_EMULATION)
- cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
- else /* simulation pretends to be ASIC */
- cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
- if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
- cclocks = 1;
- return cclocks;
-}
-
-/*
- * Convert a cclock count to nanoseconds. Not matter how slow
- * the cclock, a non-zero cclocks will always have a non-zero result.
- */
-u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
-{
- u32 ns;
-
- if (dd->icode == ICODE_FPGA_EMULATION)
- ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
- else /* simulation pretends to be ASIC */
- ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
- if (cclocks && !ns)
- ns = 1;
- return ns;
-}
-
-/*
- * Dynamically adjust the receive interrupt timeout for a context based on
- * incoming packet rate.
- *
- * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
- */
-static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
-{
- struct hfi1_devdata *dd = rcd->dd;
- u32 timeout = rcd->rcvavail_timeout;
-
- /*
- * This algorithm doubles or halves the timeout depending on whether
- * the number of packets received in this interrupt were less than or
- * greater equal the interrupt count.
- *
- * The calculations below do not allow a steady state to be achieved.
- * Only at the endpoints it is possible to have an unchanging
- * timeout.
- */
- if (npkts < rcv_intr_count) {
- /*
- * Not enough packets arrived before the timeout, adjust
- * timeout downward.
- */
- if (timeout < 2) /* already at minimum? */
- return;
- timeout >>= 1;
- } else {
- /*
- * More than enough packets arrived before the timeout, adjust
- * timeout upward.
- */
- if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
- return;
- timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
- }
-
- rcd->rcvavail_timeout = timeout;
- /*
- * timeout cannot be larger than rcv_intr_timeout_csr which has already
- * been verified to be in range
- */
- write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
- (u64)timeout <<
- RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
-}
-
-void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
- u32 intr_adjust, u32 npkts)
-{
- struct hfi1_devdata *dd = rcd->dd;
- u64 reg;
- u32 ctxt = rcd->ctxt;
-
- /*
- * Need to write timeout register before updating RcvHdrHead to ensure
- * that a new value is used when the HW decides to restart counting.
- */
- if (intr_adjust)
- adjust_rcv_timeout(rcd, npkts);
- if (updegr) {
- reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
- << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
- write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
- }
- mmiowb();
- reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
- (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
- << RCV_HDR_HEAD_HEAD_SHIFT);
- write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
- mmiowb();
-}
-
-u32 hdrqempty(struct hfi1_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
- & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
-
- if (rcd->rcvhdrtail_kvaddr)
- tail = get_rcvhdrtail(rcd);
- else
- tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
-
- return head == tail;
-}
-
-/*
- * Context Control and Receive Array encoding for buffer size:
- * 0x0 invalid
- * 0x1 4 KB
- * 0x2 8 KB
- * 0x3 16 KB
- * 0x4 32 KB
- * 0x5 64 KB
- * 0x6 128 KB
- * 0x7 256 KB
- * 0x8 512 KB (Receive Array only)
- * 0x9 1 MB (Receive Array only)
- * 0xa 2 MB (Receive Array only)
- *
- * 0xB-0xF - reserved (Receive Array only)
- *
- *
- * This routine assumes that the value has already been sanity checked.
- */
-static u32 encoded_size(u32 size)
-{
- switch (size) {
- case 4 * 1024: return 0x1;
- case 8 * 1024: return 0x2;
- case 16 * 1024: return 0x3;
- case 32 * 1024: return 0x4;
- case 64 * 1024: return 0x5;
- case 128 * 1024: return 0x6;
- case 256 * 1024: return 0x7;
- case 512 * 1024: return 0x8;
- case 1 * 1024 * 1024: return 0x9;
- case 2 * 1024 * 1024: return 0xa;
- }
- return 0x1; /* if invalid, go with the minimum size */
-}
-
-void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
-{
- struct hfi1_ctxtdata *rcd;
- u64 rcvctrl, reg;
- int did_enable = 0;
-
- rcd = dd->rcd[ctxt];
- if (!rcd)
- return;
-
- hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
-
- rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
- /* if the context already enabled, don't do the extra steps */
- if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
- !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
- /* reset the tail and hdr addresses, and sequence count */
- write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
- rcd->rcvhdrq_phys);
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
- write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- rcd->rcvhdrqtailaddr_phys);
- rcd->seq_cnt = 1;
-
- /* reset the cached receive header queue head value */
- rcd->head = 0;
-
- /*
- * Zero the receive header queue so we don't get false
- * positives when checking the sequence number. The
- * sequence numbers could land exactly on the same spot.
- * E.g. a rcd restart before the receive header wrapped.
- */
- memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
-
- /* starting timeout */
- rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
-
- /* enable the context */
- rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
-
- /* clean the egr buffer size first */
- rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
- rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
- & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
- << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
-
- /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
- write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
- did_enable = 1;
-
- /* zero RcvEgrIndexHead */
- write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
-
- /* set eager count and base index */
- reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
- & RCV_EGR_CTRL_EGR_CNT_MASK)
- << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
- (((rcd->eager_base >> RCV_SHIFT)
- & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
- << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
- write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
-
- /*
- * Set TID (expected) count and base index.
- * rcd->expected_count is set to individual RcvArray entries,
- * not pairs, and the CSR takes a pair-count in groups of
- * four, so divide by 8.
- */
- reg = (((rcd->expected_count >> RCV_SHIFT)
- & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
- << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
- (((rcd->expected_base >> RCV_SHIFT)
- & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
- << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
- write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
- if (ctxt == HFI1_CTRL_CTXT)
- write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
- }
- if (op & HFI1_RCVCTRL_CTXT_DIS) {
- write_csr(dd, RCV_VL15, 0);
- /*
- * When receive context is being disabled turn on tail
- * update with a dummy tail address and then disable
- * receive context.
- */
- if (dd->rcvhdrtail_dummy_physaddr) {
- write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
- /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
- rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
- }
-
- rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
- }
- if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
- rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
- rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
- rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
- /* See comment on RcvCtxtCtrl.TailUpd above */
- if (!(op & HFI1_RCVCTRL_CTXT_DIS))
- rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
- }
- if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
- rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
- if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
- rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
- if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
- /*
- * In one-packet-per-eager mode, the size comes from
- * the RcvArray entry.
- */
- rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
- rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
- }
- if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
- rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
- if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
- rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
- if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
- rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
- if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
- rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
- if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
- rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
- rcd->rcvctrl = rcvctrl;
- hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
- write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
-
- /* work around sticky RcvCtxtStatus.BlockedRHQFull */
- if (did_enable &&
- (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
- reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
- if (reg != 0) {
- dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
- ctxt, reg);
- read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
- write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
- write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
- read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
- reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
- dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
- ctxt, reg, reg == 0 ? "not" : "still");
- }
- }
-
- if (did_enable) {
- /*
- * The interrupt timeout and count must be set after
- * the context is enabled to take effect.
- */
- /* set interrupt timeout */
- write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
- (u64)rcd->rcvavail_timeout <<
- RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
-
- /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
- reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
- write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
- }
-
- if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
- /*
- * If the context has been disabled and the Tail Update has
- * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
- * so it doesn't contain an address that is invalid.
- */
- write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
-}
-
-u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
-{
- int ret;
- u64 val = 0;
-
- if (namep) {
- ret = dd->cntrnameslen;
- *namep = dd->cntrnames;
- } else {
- const struct cntr_entry *entry;
- int i, j;
-
- ret = (dd->ndevcntrs) * sizeof(u64);
-
- /* Get the start of the block of counters */
- *cntrp = dd->cntrs;
-
- /*
- * Now go and fill in each counter in the block.
- */
- for (i = 0; i < DEV_CNTR_LAST; i++) {
- entry = &dev_cntrs[i];
- hfi1_cdbg(CNTR, "reading %s", entry->name);
- if (entry->flags & CNTR_DISABLED) {
- /* Nothing */
- hfi1_cdbg(CNTR, "\tDisabled\n");
- } else {
- if (entry->flags & CNTR_VL) {
- hfi1_cdbg(CNTR, "\tPer VL\n");
- for (j = 0; j < C_VL_COUNT; j++) {
- val = entry->rw_cntr(entry,
- dd, j,
- CNTR_MODE_R,
- 0);
- hfi1_cdbg(
- CNTR,
- "\t\tRead 0x%llx for %d\n",
- val, j);
- dd->cntrs[entry->offset + j] =
- val;
- }
- } else if (entry->flags & CNTR_SDMA) {
- hfi1_cdbg(CNTR,
- "\t Per SDMA Engine\n");
- for (j = 0; j < dd->chip_sdma_engines;
- j++) {
- val =
- entry->rw_cntr(entry, dd, j,
- CNTR_MODE_R, 0);
- hfi1_cdbg(CNTR,
- "\t\tRead 0x%llx for %d\n",
- val, j);
- dd->cntrs[entry->offset + j] =
- val;
- }
- } else {
- val = entry->rw_cntr(entry, dd,
- CNTR_INVALID_VL,
- CNTR_MODE_R, 0);
- dd->cntrs[entry->offset] = val;
- hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
- }
- }
- }
- }
- return ret;
-}
-
-/*
- * Used by sysfs to create files for hfi stats to read
- */
-u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
-{
- int ret;
- u64 val = 0;
-
- if (namep) {
- ret = ppd->dd->portcntrnameslen;
- *namep = ppd->dd->portcntrnames;
- } else {
- const struct cntr_entry *entry;
- int i, j;
-
- ret = ppd->dd->nportcntrs * sizeof(u64);
- *cntrp = ppd->cntrs;
-
- for (i = 0; i < PORT_CNTR_LAST; i++) {
- entry = &port_cntrs[i];
- hfi1_cdbg(CNTR, "reading %s", entry->name);
- if (entry->flags & CNTR_DISABLED) {
- /* Nothing */
- hfi1_cdbg(CNTR, "\tDisabled\n");
- continue;
- }
-
- if (entry->flags & CNTR_VL) {
- hfi1_cdbg(CNTR, "\tPer VL");
- for (j = 0; j < C_VL_COUNT; j++) {
- val = entry->rw_cntr(entry, ppd, j,
- CNTR_MODE_R,
- 0);
- hfi1_cdbg(
- CNTR,
- "\t\tRead 0x%llx for %d",
- val, j);
- ppd->cntrs[entry->offset + j] = val;
- }
- } else {
- val = entry->rw_cntr(entry, ppd,
- CNTR_INVALID_VL,
- CNTR_MODE_R,
- 0);
- ppd->cntrs[entry->offset] = val;
- hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
- }
- }
- }
- return ret;
-}
-
-static void free_cntrs(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd;
- int i;
-
- if (dd->synth_stats_timer.data)
- del_timer_sync(&dd->synth_stats_timer);
- dd->synth_stats_timer.data = 0;
- ppd = (struct hfi1_pportdata *)(dd + 1);
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- kfree(ppd->cntrs);
- kfree(ppd->scntrs);
- free_percpu(ppd->ibport_data.rvp.rc_acks);
- free_percpu(ppd->ibport_data.rvp.rc_qacks);
- free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
- ppd->cntrs = NULL;
- ppd->scntrs = NULL;
- ppd->ibport_data.rvp.rc_acks = NULL;
- ppd->ibport_data.rvp.rc_qacks = NULL;
- ppd->ibport_data.rvp.rc_delayed_comp = NULL;
- }
- kfree(dd->portcntrnames);
- dd->portcntrnames = NULL;
- kfree(dd->cntrs);
- dd->cntrs = NULL;
- kfree(dd->scntrs);
- dd->scntrs = NULL;
- kfree(dd->cntrnames);
- dd->cntrnames = NULL;
-}
-
-#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
-#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
-
-static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
- u64 *psval, void *context, int vl)
-{
- u64 val;
- u64 sval = *psval;
-
- if (entry->flags & CNTR_DISABLED) {
- dd_dev_err(dd, "Counter %s not enabled", entry->name);
- return 0;
- }
-
- hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
-
- val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
-
- /* If its a synthetic counter there is more work we need to do */
- if (entry->flags & CNTR_SYNTH) {
- if (sval == CNTR_MAX) {
- /* No need to read already saturated */
- return CNTR_MAX;
- }
-
- if (entry->flags & CNTR_32BIT) {
- /* 32bit counters can wrap multiple times */
- u64 upper = sval >> 32;
- u64 lower = (sval << 32) >> 32;
-
- if (lower > val) { /* hw wrapped */
- if (upper == CNTR_32BIT_MAX)
- val = CNTR_MAX;
- else
- upper++;
- }
-
- if (val != CNTR_MAX)
- val = (upper << 32) | val;
-
- } else {
- /* If we rolled we are saturated */
- if ((val < sval) || (val > CNTR_MAX))
- val = CNTR_MAX;
- }
- }
-
- *psval = val;
-
- hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
-
- return val;
-}
-
-static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
- struct cntr_entry *entry,
- u64 *psval, void *context, int vl, u64 data)
-{
- u64 val;
-
- if (entry->flags & CNTR_DISABLED) {
- dd_dev_err(dd, "Counter %s not enabled", entry->name);
- return 0;
- }
-
- hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
-
- if (entry->flags & CNTR_SYNTH) {
- *psval = data;
- if (entry->flags & CNTR_32BIT) {
- val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
- (data << 32) >> 32);
- val = data; /* return the full 64bit value */
- } else {
- val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
- data);
- }
- } else {
- val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
- }
-
- *psval = val;
-
- hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
-
- return val;
-}
-
-u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
-{
- struct cntr_entry *entry;
- u64 *sval;
-
- entry = &dev_cntrs[index];
- sval = dd->scntrs + entry->offset;
-
- if (vl != CNTR_INVALID_VL)
- sval += vl;
-
- return read_dev_port_cntr(dd, entry, sval, dd, vl);
-}
-
-u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
-{
- struct cntr_entry *entry;
- u64 *sval;
-
- entry = &dev_cntrs[index];
- sval = dd->scntrs + entry->offset;
-
- if (vl != CNTR_INVALID_VL)
- sval += vl;
-
- return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
-}
-
-u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
-{
- struct cntr_entry *entry;
- u64 *sval;
-
- entry = &port_cntrs[index];
- sval = ppd->scntrs + entry->offset;
-
- if (vl != CNTR_INVALID_VL)
- sval += vl;
-
- if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
- (index <= C_RCV_HDR_OVF_LAST)) {
- /* We do not want to bother for disabled contexts */
- return 0;
- }
-
- return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
-}
-
-u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
-{
- struct cntr_entry *entry;
- u64 *sval;
-
- entry = &port_cntrs[index];
- sval = ppd->scntrs + entry->offset;
-
- if (vl != CNTR_INVALID_VL)
- sval += vl;
-
- if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
- (index <= C_RCV_HDR_OVF_LAST)) {
- /* We do not want to bother for disabled contexts */
- return 0;
- }
-
- return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
-}
-
-static void update_synth_timer(unsigned long opaque)
-{
- u64 cur_tx;
- u64 cur_rx;
- u64 total_flits;
- u8 update = 0;
- int i, j, vl;
- struct hfi1_pportdata *ppd;
- struct cntr_entry *entry;
-
- struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
-
- /*
- * Rather than keep beating on the CSRs pick a minimal set that we can
- * check to watch for potential roll over. We can do this by looking at
- * the number of flits sent/recv. If the total flits exceeds 32bits then
- * we have to iterate all the counters and update.
- */
- entry = &dev_cntrs[C_DC_RCV_FLITS];
- cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
-
- entry = &dev_cntrs[C_DC_XMIT_FLITS];
- cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
-
- hfi1_cdbg(
- CNTR,
- "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
- dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
-
- if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
- /*
- * May not be strictly necessary to update but it won't hurt and
- * simplifies the logic here.
- */
- update = 1;
- hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
- dd->unit);
- } else {
- total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
- hfi1_cdbg(CNTR,
- "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
- total_flits, (u64)CNTR_32BIT_MAX);
- if (total_flits >= CNTR_32BIT_MAX) {
- hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
- dd->unit);
- update = 1;
- }
- }
-
- if (update) {
- hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
- for (i = 0; i < DEV_CNTR_LAST; i++) {
- entry = &dev_cntrs[i];
- if (entry->flags & CNTR_VL) {
- for (vl = 0; vl < C_VL_COUNT; vl++)
- read_dev_cntr(dd, i, vl);
- } else {
- read_dev_cntr(dd, i, CNTR_INVALID_VL);
- }
- }
- ppd = (struct hfi1_pportdata *)(dd + 1);
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- for (j = 0; j < PORT_CNTR_LAST; j++) {
- entry = &port_cntrs[j];
- if (entry->flags & CNTR_VL) {
- for (vl = 0; vl < C_VL_COUNT; vl++)
- read_port_cntr(ppd, j, vl);
- } else {
- read_port_cntr(ppd, j, CNTR_INVALID_VL);
- }
- }
- }
-
- /*
- * We want the value in the register. The goal is to keep track
- * of the number of "ticks" not the counter value. In other
- * words if the register rolls we want to notice it and go ahead
- * and force an update.
- */
- entry = &dev_cntrs[C_DC_XMIT_FLITS];
- dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
- CNTR_MODE_R, 0);
-
- entry = &dev_cntrs[C_DC_RCV_FLITS];
- dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
- CNTR_MODE_R, 0);
-
- hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
- dd->unit, dd->last_tx, dd->last_rx);
-
- } else {
- hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
- }
-
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
-}
-
-#define C_MAX_NAME 13 /* 12 chars + one for /0 */
-static int init_cntrs(struct hfi1_devdata *dd)
-{
- int i, rcv_ctxts, j;
- size_t sz;
- char *p;
- char name[C_MAX_NAME];
- struct hfi1_pportdata *ppd;
- const char *bit_type_32 = ",32";
- const int bit_type_32_sz = strlen(bit_type_32);
-
- /* set up the stats timer; the add_timer is done at the end */
- setup_timer(&dd->synth_stats_timer, update_synth_timer,
- (unsigned long)dd);
-
- /***********************/
- /* per device counters */
- /***********************/
-
- /* size names and determine how many we have*/
- dd->ndevcntrs = 0;
- sz = 0;
-
- for (i = 0; i < DEV_CNTR_LAST; i++) {
- if (dev_cntrs[i].flags & CNTR_DISABLED) {
- hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
- continue;
- }
-
- if (dev_cntrs[i].flags & CNTR_VL) {
- dev_cntrs[i].offset = dd->ndevcntrs;
- for (j = 0; j < C_VL_COUNT; j++) {
- snprintf(name, C_MAX_NAME, "%s%d",
- dev_cntrs[i].name, vl_from_idx(j));
- sz += strlen(name);
- /* Add ",32" for 32-bit counters */
- if (dev_cntrs[i].flags & CNTR_32BIT)
- sz += bit_type_32_sz;
- sz++;
- dd->ndevcntrs++;
- }
- } else if (dev_cntrs[i].flags & CNTR_SDMA) {
- dev_cntrs[i].offset = dd->ndevcntrs;
- for (j = 0; j < dd->chip_sdma_engines; j++) {
- snprintf(name, C_MAX_NAME, "%s%d",
- dev_cntrs[i].name, j);
- sz += strlen(name);
- /* Add ",32" for 32-bit counters */
- if (dev_cntrs[i].flags & CNTR_32BIT)
- sz += bit_type_32_sz;
- sz++;
- dd->ndevcntrs++;
- }
- } else {
- /* +1 for newline. */
- sz += strlen(dev_cntrs[i].name) + 1;
- /* Add ",32" for 32-bit counters */
- if (dev_cntrs[i].flags & CNTR_32BIT)
- sz += bit_type_32_sz;
- dev_cntrs[i].offset = dd->ndevcntrs;
- dd->ndevcntrs++;
- }
- }
-
- /* allocate space for the counter values */
- dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
- if (!dd->cntrs)
- goto bail;
-
- dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
- if (!dd->scntrs)
- goto bail;
-
- /* allocate space for the counter names */
- dd->cntrnameslen = sz;
- dd->cntrnames = kmalloc(sz, GFP_KERNEL);
- if (!dd->cntrnames)
- goto bail;
-
- /* fill in the names */
- for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
- if (dev_cntrs[i].flags & CNTR_DISABLED) {
- /* Nothing */
- } else if (dev_cntrs[i].flags & CNTR_VL) {
- for (j = 0; j < C_VL_COUNT; j++) {
- snprintf(name, C_MAX_NAME, "%s%d",
- dev_cntrs[i].name,
- vl_from_idx(j));
- memcpy(p, name, strlen(name));
- p += strlen(name);
-
- /* Counter is 32 bits */
- if (dev_cntrs[i].flags & CNTR_32BIT) {
- memcpy(p, bit_type_32, bit_type_32_sz);
- p += bit_type_32_sz;
- }
-
- *p++ = '\n';
- }
- } else if (dev_cntrs[i].flags & CNTR_SDMA) {
- for (j = 0; j < dd->chip_sdma_engines; j++) {
- snprintf(name, C_MAX_NAME, "%s%d",
- dev_cntrs[i].name, j);
- memcpy(p, name, strlen(name));
- p += strlen(name);
-
- /* Counter is 32 bits */
- if (dev_cntrs[i].flags & CNTR_32BIT) {
- memcpy(p, bit_type_32, bit_type_32_sz);
- p += bit_type_32_sz;
- }
-
- *p++ = '\n';
- }
- } else {
- memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
- p += strlen(dev_cntrs[i].name);
-
- /* Counter is 32 bits */
- if (dev_cntrs[i].flags & CNTR_32BIT) {
- memcpy(p, bit_type_32, bit_type_32_sz);
- p += bit_type_32_sz;
- }
-
- *p++ = '\n';
- }
- }
-
- /*********************/
- /* per port counters */
- /*********************/
-
- /*
- * Go through the counters for the overflows and disable the ones we
- * don't need. This varies based on platform so we need to do it
- * dynamically here.
- */
- rcv_ctxts = dd->num_rcv_contexts;
- for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
- i <= C_RCV_HDR_OVF_LAST; i++) {
- port_cntrs[i].flags |= CNTR_DISABLED;
- }
-
- /* size port counter names and determine how many we have*/
- sz = 0;
- dd->nportcntrs = 0;
- for (i = 0; i < PORT_CNTR_LAST; i++) {
- if (port_cntrs[i].flags & CNTR_DISABLED) {
- hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
- continue;
- }
-
- if (port_cntrs[i].flags & CNTR_VL) {
- port_cntrs[i].offset = dd->nportcntrs;
- for (j = 0; j < C_VL_COUNT; j++) {
- snprintf(name, C_MAX_NAME, "%s%d",
- port_cntrs[i].name, vl_from_idx(j));
- sz += strlen(name);
- /* Add ",32" for 32-bit counters */
- if (port_cntrs[i].flags & CNTR_32BIT)
- sz += bit_type_32_sz;
- sz++;
- dd->nportcntrs++;
- }
- } else {
- /* +1 for newline */
- sz += strlen(port_cntrs[i].name) + 1;
- /* Add ",32" for 32-bit counters */
- if (port_cntrs[i].flags & CNTR_32BIT)
- sz += bit_type_32_sz;
- port_cntrs[i].offset = dd->nportcntrs;
- dd->nportcntrs++;
- }
- }
-
- /* allocate space for the counter names */
- dd->portcntrnameslen = sz;
- dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
- if (!dd->portcntrnames)
- goto bail;
-
- /* fill in port cntr names */
- for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
- if (port_cntrs[i].flags & CNTR_DISABLED)
- continue;
-
- if (port_cntrs[i].flags & CNTR_VL) {
- for (j = 0; j < C_VL_COUNT; j++) {
- snprintf(name, C_MAX_NAME, "%s%d",
- port_cntrs[i].name, vl_from_idx(j));
- memcpy(p, name, strlen(name));
- p += strlen(name);
-
- /* Counter is 32 bits */
- if (port_cntrs[i].flags & CNTR_32BIT) {
- memcpy(p, bit_type_32, bit_type_32_sz);
- p += bit_type_32_sz;
- }
-
- *p++ = '\n';
- }
- } else {
- memcpy(p, port_cntrs[i].name,
- strlen(port_cntrs[i].name));
- p += strlen(port_cntrs[i].name);
-
- /* Counter is 32 bits */
- if (port_cntrs[i].flags & CNTR_32BIT) {
- memcpy(p, bit_type_32, bit_type_32_sz);
- p += bit_type_32_sz;
- }
-
- *p++ = '\n';
- }
- }
-
- /* allocate per port storage for counter values */
- ppd = (struct hfi1_pportdata *)(dd + 1);
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
- if (!ppd->cntrs)
- goto bail;
-
- ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
- if (!ppd->scntrs)
- goto bail;
- }
-
- /* CPU counters need to be allocated and zeroed */
- if (init_cpu_counters(dd))
- goto bail;
-
- mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
- return 0;
-bail:
- free_cntrs(dd);
- return -ENOMEM;
-}
-
-static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
-{
- switch (chip_lstate) {
- default:
- dd_dev_err(dd,
- "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
- chip_lstate);
- /* fall through */
- case LSTATE_DOWN:
- return IB_PORT_DOWN;
- case LSTATE_INIT:
- return IB_PORT_INIT;
- case LSTATE_ARMED:
- return IB_PORT_ARMED;
- case LSTATE_ACTIVE:
- return IB_PORT_ACTIVE;
- }
-}
-
-u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
-{
- /* look at the HFI meta-states only */
- switch (chip_pstate & 0xf0) {
- default:
- dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
- chip_pstate);
- /* fall through */
- case PLS_DISABLED:
- return IB_PORTPHYSSTATE_DISABLED;
- case PLS_OFFLINE:
- return OPA_PORTPHYSSTATE_OFFLINE;
- case PLS_POLLING:
- return IB_PORTPHYSSTATE_POLLING;
- case PLS_CONFIGPHY:
- return IB_PORTPHYSSTATE_TRAINING;
- case PLS_LINKUP:
- return IB_PORTPHYSSTATE_LINKUP;
- case PLS_PHYTEST:
- return IB_PORTPHYSSTATE_PHY_TEST;
- }
-}
-
-/* return the OPA port logical state name */
-const char *opa_lstate_name(u32 lstate)
-{
- static const char * const port_logical_names[] = {
- "PORT_NOP",
- "PORT_DOWN",
- "PORT_INIT",
- "PORT_ARMED",
- "PORT_ACTIVE",
- "PORT_ACTIVE_DEFER",
- };
- if (lstate < ARRAY_SIZE(port_logical_names))
- return port_logical_names[lstate];
- return "unknown";
-}
-
-/* return the OPA port physical state name */
-const char *opa_pstate_name(u32 pstate)
-{
- static const char * const port_physical_names[] = {
- "PHYS_NOP",
- "reserved1",
- "PHYS_POLL",
- "PHYS_DISABLED",
- "PHYS_TRAINING",
- "PHYS_LINKUP",
- "PHYS_LINK_ERR_RECOVER",
- "PHYS_PHY_TEST",
- "reserved8",
- "PHYS_OFFLINE",
- "PHYS_GANGED",
- "PHYS_TEST",
- };
- if (pstate < ARRAY_SIZE(port_physical_names))
- return port_physical_names[pstate];
- return "unknown";
-}
-
-/*
- * Read the hardware link state and set the driver's cached value of it.
- * Return the (new) current value.
- */
-u32 get_logical_state(struct hfi1_pportdata *ppd)
-{
- u32 new_state;
-
- new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
- if (new_state != ppd->lstate) {
- dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
- opa_lstate_name(new_state), new_state);
- ppd->lstate = new_state;
- }
- /*
- * Set port status flags in the page mapped into userspace
- * memory. Do it here to ensure a reliable state - this is
- * the only function called by all state handling code.
- * Always set the flags due to the fact that the cache value
- * might have been changed explicitly outside of this
- * function.
- */
- if (ppd->statusp) {
- switch (ppd->lstate) {
- case IB_PORT_DOWN:
- case IB_PORT_INIT:
- *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
- HFI1_STATUS_IB_READY);
- break;
- case IB_PORT_ARMED:
- *ppd->statusp |= HFI1_STATUS_IB_CONF;
- break;
- case IB_PORT_ACTIVE:
- *ppd->statusp |= HFI1_STATUS_IB_READY;
- break;
- }
- }
- return ppd->lstate;
-}
-
-/**
- * wait_logical_linkstate - wait for an IB link state change to occur
- * @ppd: port device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * Wait up to msecs milliseconds for IB link state change to occur.
- * For now, take the easy polling route.
- * Returns 0 if state reached, otherwise -ETIMEDOUT.
- */
-static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
- int msecs)
-{
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(msecs);
- while (1) {
- if (get_logical_state(ppd) == state)
- return 0;
- if (time_after(jiffies, timeout))
- break;
- msleep(20);
- }
- dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
-
- return -ETIMEDOUT;
-}
-
-u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
-{
- u32 pstate;
- u32 ib_pstate;
-
- pstate = read_physical_state(ppd->dd);
- ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
- if (ppd->last_pstate != ib_pstate) {
- dd_dev_info(ppd->dd,
- "%s: physical state changed to %s (0x%x), phy 0x%x\n",
- __func__, opa_pstate_name(ib_pstate), ib_pstate,
- pstate);
- ppd->last_pstate = ib_pstate;
- }
- return ib_pstate;
-}
-
-/*
- * Read/modify/write ASIC_QSFP register bits as selected by mask
- * data: 0 or 1 in the positions depending on what needs to be written
- * dir: 0 for read, 1 for write
- * mask: select by setting
- * I2CCLK (bit 0)
- * I2CDATA (bit 1)
- */
-u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
- u32 mask)
-{
- u64 qsfp_oe, target_oe;
-
- target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
- if (mask) {
- /* We are writing register bits, so lock access */
- dir &= mask;
- data &= mask;
-
- qsfp_oe = read_csr(dd, target_oe);
- qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
- write_csr(dd, target_oe, qsfp_oe);
- }
- /* We are exclusively reading bits here, but it is unlikely
- * we'll get valid data when we set the direction of the pin
- * in the same call, so read should call this function again
- * to get valid data
- */
- return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
-}
-
-#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
-(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-
-#define SET_STATIC_RATE_CONTROL_SMASK(r) \
-(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-
-int hfi1_init_ctxt(struct send_context *sc)
-{
- if (sc) {
- struct hfi1_devdata *dd = sc->dd;
- u64 reg;
- u8 set = (sc->type == SC_USER ?
- HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
- HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
- reg = read_kctxt_csr(dd, sc->hw_context,
- SEND_CTXT_CHECK_ENABLE);
- if (set)
- CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
- else
- SET_STATIC_RATE_CONTROL_SMASK(reg);
- write_kctxt_csr(dd, sc->hw_context,
- SEND_CTXT_CHECK_ENABLE, reg);
- }
- return 0;
-}
-
-int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
-{
- int ret = 0;
- u64 reg;
-
- if (dd->icode != ICODE_RTL_SILICON) {
- if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
- dd_dev_info(dd, "%s: tempsense not supported by HW\n",
- __func__);
- return -EINVAL;
- }
- reg = read_csr(dd, ASIC_STS_THERM);
- temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
- ASIC_STS_THERM_CURR_TEMP_MASK);
- temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
- ASIC_STS_THERM_LO_TEMP_MASK);
- temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
- ASIC_STS_THERM_HI_TEMP_MASK);
- temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
- ASIC_STS_THERM_CRIT_TEMP_MASK);
- /* triggers is a 3-bit value - 1 bit per trigger. */
- temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
-
- return ret;
-}
-
-/* ========================================================================= */
-
-/*
- * Enable/disable chip from delivering interrupts.
- */
-void set_intr_state(struct hfi1_devdata *dd, u32 enable)
-{
- int i;
-
- /*
- * In HFI, the mask needs to be 1 to allow interrupts.
- */
- if (enable) {
- /* enable all interrupts */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
-
- init_qsfp_int(dd);
- } else {
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
- }
-}
-
-/*
- * Clear all interrupt sources on the chip.
- */
-static void clear_all_interrupts(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
-
- write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
- write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
- write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
- write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
- write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
- write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
- write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
- for (i = 0; i < dd->chip_send_contexts; i++)
- write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
- for (i = 0; i < dd->chip_sdma_engines; i++)
- write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
-
- write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
- write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
- write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
-}
-
-/* Move to pcie.c? */
-static void disable_intx(struct pci_dev *pdev)
-{
- pci_intx(pdev, 0);
-}
-
-static void clean_up_interrupts(struct hfi1_devdata *dd)
-{
- int i;
-
- /* remove irqs - must happen before disabling/turning off */
- if (dd->num_msix_entries) {
- /* MSI-X */
- struct hfi1_msix_entry *me = dd->msix_entries;
-
- for (i = 0; i < dd->num_msix_entries; i++, me++) {
- if (!me->arg) /* => no irq, no affinity */
- continue;
- hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
- free_irq(me->msix.vector, me->arg);
- }
- } else {
- /* INTx */
- if (dd->requested_intx_irq) {
- free_irq(dd->pcidev->irq, dd);
- dd->requested_intx_irq = 0;
- }
- }
-
- /* turn off interrupts */
- if (dd->num_msix_entries) {
- /* MSI-X */
- pci_disable_msix(dd->pcidev);
- } else {
- /* INTx */
- disable_intx(dd->pcidev);
- }
-
- /* clean structures */
- kfree(dd->msix_entries);
- dd->msix_entries = NULL;
- dd->num_msix_entries = 0;
-}
-
-/*
- * Remap the interrupt source from the general handler to the given MSI-X
- * interrupt.
- */
-static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
-{
- u64 reg;
- int m, n;
-
- /* clear from the handled mask of the general interrupt */
- m = isrc / 64;
- n = isrc % 64;
- dd->gi_mask[m] &= ~((u64)1 << n);
-
- /* direct the chip source to the given MSI-X interrupt */
- m = isrc / 8;
- n = isrc % 8;
- reg = read_csr(dd, CCE_INT_MAP + (8 * m));
- reg &= ~((u64)0xff << (8 * n));
- reg |= ((u64)msix_intr & 0xff) << (8 * n);
- write_csr(dd, CCE_INT_MAP + (8 * m), reg);
-}
-
-static void remap_sdma_interrupts(struct hfi1_devdata *dd,
- int engine, int msix_intr)
-{
- /*
- * SDMA engine interrupt sources grouped by type, rather than
- * engine. Per-engine interrupts are as follows:
- * SDMA
- * SDMAProgress
- * SDMAIdle
- */
- remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
-}
-
-static int request_intx_irq(struct hfi1_devdata *dd)
-{
- int ret;
-
- snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
- dd->unit);
- ret = request_irq(dd->pcidev->irq, general_interrupt,
- IRQF_SHARED, dd->intx_name, dd);
- if (ret)
- dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
- ret);
- else
- dd->requested_intx_irq = 1;
- return ret;
-}
-
-static int request_msix_irqs(struct hfi1_devdata *dd)
-{
- int first_general, last_general;
- int first_sdma, last_sdma;
- int first_rx, last_rx;
- int i, ret = 0;
-
- /* calculate the ranges we are going to use */
- first_general = 0;
- last_general = first_general + 1;
- first_sdma = last_general;
- last_sdma = first_sdma + dd->num_sdma;
- first_rx = last_sdma;
- last_rx = first_rx + dd->n_krcv_queues;
-
- /*
- * Sanity check - the code expects all SDMA chip source
- * interrupts to be in the same CSR, starting at bit 0. Verify
- * that this is true by checking the bit location of the start.
- */
- BUILD_BUG_ON(IS_SDMA_START % 64);
-
- for (i = 0; i < dd->num_msix_entries; i++) {
- struct hfi1_msix_entry *me = &dd->msix_entries[i];
- const char *err_info;
- irq_handler_t handler;
- irq_handler_t thread = NULL;
- void *arg;
- int idx;
- struct hfi1_ctxtdata *rcd = NULL;
- struct sdma_engine *sde = NULL;
-
- /* obtain the arguments to request_irq */
- if (first_general <= i && i < last_general) {
- idx = i - first_general;
- handler = general_interrupt;
- arg = dd;
- snprintf(me->name, sizeof(me->name),
- DRIVER_NAME "_%d", dd->unit);
- err_info = "general";
- me->type = IRQ_GENERAL;
- } else if (first_sdma <= i && i < last_sdma) {
- idx = i - first_sdma;
- sde = &dd->per_sdma[idx];
- handler = sdma_interrupt;
- arg = sde;
- snprintf(me->name, sizeof(me->name),
- DRIVER_NAME "_%d sdma%d", dd->unit, idx);
- err_info = "sdma";
- remap_sdma_interrupts(dd, idx, i);
- me->type = IRQ_SDMA;
- } else if (first_rx <= i && i < last_rx) {
- idx = i - first_rx;
- rcd = dd->rcd[idx];
- /* no interrupt if no rcd */
- if (!rcd)
- continue;
- /*
- * Set the interrupt register and mask for this
- * context's interrupt.
- */
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
- rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START + idx) % 64);
- handler = receive_context_interrupt;
- thread = receive_context_thread;
- arg = rcd;
- snprintf(me->name, sizeof(me->name),
- DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
- err_info = "receive context";
- remap_intr(dd, IS_RCVAVAIL_START + idx, i);
- me->type = IRQ_RCVCTXT;
- } else {
- /* not in our expected range - complain, then
- * ignore it
- */
- dd_dev_err(dd,
- "Unexpected extra MSI-X interrupt %d\n", i);
- continue;
- }
- /* no argument, no interrupt */
- if (!arg)
- continue;
- /* make sure the name is terminated */
- me->name[sizeof(me->name) - 1] = 0;
-
- ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
- me->name, arg);
- if (ret) {
- dd_dev_err(dd,
- "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
- err_info, me->msix.vector, idx, ret);
- return ret;
- }
- /*
- * assign arg after request_irq call, so it will be
- * cleaned up
- */
- me->arg = arg;
-
- ret = hfi1_get_irq_affinity(dd, me);
- if (ret)
- dd_dev_err(dd,
- "unable to pin IRQ %d\n", ret);
- }
-
- return ret;
-}
-
-/*
- * Set the general handler to accept all interrupts, remap all
- * chip interrupts back to MSI-X 0.
- */
-static void reset_interrupts(struct hfi1_devdata *dd)
-{
- int i;
-
- /* all interrupts handled by the general handler */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- dd->gi_mask[i] = ~(u64)0;
-
- /* all chip interrupts map to MSI-X 0 */
- for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
- write_csr(dd, CCE_INT_MAP + (8 * i), 0);
-}
-
-static int set_up_interrupts(struct hfi1_devdata *dd)
-{
- struct hfi1_msix_entry *entries;
- u32 total, request;
- int i, ret;
- int single_interrupt = 0; /* we expect to have all the interrupts */
-
- /*
- * Interrupt count:
- * 1 general, "slow path" interrupt (includes the SDMA engines
- * slow source, SDMACleanupDone)
- * N interrupts - one per used SDMA engine
- * M interrupt - one per kernel receive context
- */
- total = 1 + dd->num_sdma + dd->n_krcv_queues;
-
- entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto fail;
- }
- /* 1-1 MSI-X entry assignment */
- for (i = 0; i < total; i++)
- entries[i].msix.entry = i;
-
- /* ask for MSI-X interrupts */
- request = total;
- request_msix(dd, &request, entries);
-
- if (request == 0) {
- /* using INTx */
- /* dd->num_msix_entries already zero */
- kfree(entries);
- single_interrupt = 1;
- dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
- } else {
- /* using MSI-X */
- dd->num_msix_entries = request;
- dd->msix_entries = entries;
-
- if (request != total) {
- /* using MSI-X, with reduced interrupts */
- dd_dev_err(
- dd,
- "cannot handle reduced interrupt case, want %u, got %u\n",
- total, request);
- ret = -EINVAL;
- goto fail;
- }
- dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
- }
-
- /* mask all interrupts */
- set_intr_state(dd, 0);
- /* clear all pending interrupts */
- clear_all_interrupts(dd);
-
- /* reset general handler mask, chip MSI-X mappings */
- reset_interrupts(dd);
-
- if (single_interrupt)
- ret = request_intx_irq(dd);
- else
- ret = request_msix_irqs(dd);
- if (ret)
- goto fail;
-
- return 0;
-
-fail:
- clean_up_interrupts(dd);
- return ret;
-}
-
-/*
- * Set up context values in dd. Sets:
- *
- * num_rcv_contexts - number of contexts being used
- * n_krcv_queues - number of kernel contexts
- * first_user_ctxt - first non-kernel context in array of contexts
- * freectxts - number of free user contexts
- * num_send_contexts - number of PIO send contexts being used
- */
-static int set_up_context_variables(struct hfi1_devdata *dd)
-{
- int num_kernel_contexts;
- int total_contexts;
- int ret;
- unsigned ngroups;
-
- /*
- * Kernel contexts: (to be fixed later):
- * - min or 2 or 1 context/numa
- * - Context 0 - control context (VL15/multicast/error)
- * - Context 1 - default context
- */
- if (n_krcvqs)
- /*
- * Don't count context 0 in n_krcvqs since
- * is isn't used for normal verbs traffic.
- *
- * krcvqs will reflect number of kernel
- * receive contexts above 0.
- */
- num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
- else
- num_kernel_contexts = num_online_nodes() + 1;
- num_kernel_contexts =
- max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
- /*
- * Every kernel receive context needs an ACK send context.
- * one send context is allocated for each VL{0-7} and VL15
- */
- if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
- dd_dev_err(dd,
- "Reducing # kernel rcv contexts to: %d, from %d\n",
- (int)(dd->chip_send_contexts - num_vls - 1),
- (int)num_kernel_contexts);
- num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
- }
- /*
- * User contexts: (to be fixed later)
- * - default to 1 user context per CPU if num_user_contexts is
- * negative
- */
- if (num_user_contexts < 0)
- num_user_contexts = num_online_cpus();
-
- total_contexts = num_kernel_contexts + num_user_contexts;
-
- /*
- * Adjust the counts given a global max.
- */
- if (total_contexts > dd->chip_rcv_contexts) {
- dd_dev_err(dd,
- "Reducing # user receive contexts to: %d, from %d\n",
- (int)(dd->chip_rcv_contexts - num_kernel_contexts),
- (int)num_user_contexts);
- num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
- /* recalculate */
- total_contexts = num_kernel_contexts + num_user_contexts;
- }
-
- /* the first N are kernel contexts, the rest are user contexts */
- dd->num_rcv_contexts = total_contexts;
- dd->n_krcv_queues = num_kernel_contexts;
- dd->first_user_ctxt = num_kernel_contexts;
- dd->num_user_contexts = num_user_contexts;
- dd->freectxts = num_user_contexts;
- dd_dev_info(dd,
- "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
- (int)dd->chip_rcv_contexts,
- (int)dd->num_rcv_contexts,
- (int)dd->n_krcv_queues,
- (int)dd->num_rcv_contexts - dd->n_krcv_queues);
-
- /*
- * Receive array allocation:
- * All RcvArray entries are divided into groups of 8. This
- * is required by the hardware and will speed up writes to
- * consecutive entries by using write-combining of the entire
- * cacheline.
- *
- * The number of groups are evenly divided among all contexts.
- * any left over groups will be given to the first N user
- * contexts.
- */
- dd->rcv_entries.group_size = RCV_INCREMENT;
- ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
- dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
- dd->rcv_entries.nctxt_extra = ngroups -
- (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
- dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
- dd->rcv_entries.ngroups,
- dd->rcv_entries.nctxt_extra);
- if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
- MAX_EAGER_ENTRIES * 2) {
- dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
- dd->rcv_entries.group_size;
- dd_dev_info(dd,
- "RcvArray group count too high, change to %u\n",
- dd->rcv_entries.ngroups);
- dd->rcv_entries.nctxt_extra = 0;
- }
- /*
- * PIO send contexts
- */
- ret = init_sc_pools_and_sizes(dd);
- if (ret >= 0) { /* success */
- dd->num_send_contexts = ret;
- dd_dev_info(
- dd,
- "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
- dd->chip_send_contexts,
- dd->num_send_contexts,
- dd->sc_sizes[SC_KERNEL].count,
- dd->sc_sizes[SC_ACK].count,
- dd->sc_sizes[SC_USER].count);
- ret = 0; /* success */
- }
-
- return ret;
-}
-
-/*
- * Set the device/port partition key table. The MAD code
- * will ensure that, at least, the partial management
- * partition key is present in the table.
- */
-static void set_partition_keys(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 reg = 0;
- int i;
-
- dd_dev_info(dd, "Setting partition keys\n");
- for (i = 0; i < hfi1_get_npkeys(dd); i++) {
- reg |= (ppd->pkeys[i] &
- RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
- ((i % 4) *
- RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
- /* Each register holds 4 PKey values. */
- if ((i % 4) == 3) {
- write_csr(dd, RCV_PARTITION_KEY +
- ((i - 3) * 2), reg);
- reg = 0;
- }
- }
-
- /* Always enable HW pkeys check when pkeys table is set */
- add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
-}
-
-/*
- * These CSRs and memories are uninitialized on reset and must be
- * written before reading to set the ECC/parity bits.
- *
- * NOTE: All user context CSRs that are not mmaped write-only
- * (e.g. the TID flows) must be initialized even if the driver never
- * reads them.
- */
-static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
-{
- int i, j;
-
- /* CceIntMap */
- for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
- write_csr(dd, CCE_INT_MAP + (8 * i), 0);
-
- /* SendCtxtCreditReturnAddr */
- for (i = 0; i < dd->chip_send_contexts; i++)
- write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
-
- /* PIO Send buffers */
- /* SDMA Send buffers */
- /*
- * These are not normally read, and (presently) have no method
- * to be read, so are not pre-initialized
- */
-
- /* RcvHdrAddr */
- /* RcvHdrTailAddr */
- /* RcvTidFlowTable */
- for (i = 0; i < dd->chip_rcv_contexts; i++) {
- write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
- write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
- for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
- write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
- }
-
- /* RcvArray */
- for (i = 0; i < dd->chip_rcv_array_count; i++)
- write_csr(dd, RCV_ARRAY + (8 * i),
- RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
-
- /* RcvQPMapTable */
- for (i = 0; i < 32; i++)
- write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
-}
-
-/*
- * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
- */
-static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
- u64 ctrl_bits)
-{
- unsigned long timeout;
- u64 reg;
-
- /* is the condition present? */
- reg = read_csr(dd, CCE_STATUS);
- if ((reg & status_bits) == 0)
- return;
-
- /* clear the condition */
- write_csr(dd, CCE_CTRL, ctrl_bits);
-
- /* wait for the condition to clear */
- timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
- while (1) {
- reg = read_csr(dd, CCE_STATUS);
- if ((reg & status_bits) == 0)
- return;
- if (time_after(jiffies, timeout)) {
- dd_dev_err(dd,
- "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
- status_bits, reg & status_bits);
- return;
- }
- udelay(1);
- }
-}
-
-/* set CCE CSRs to chip reset defaults */
-static void reset_cce_csrs(struct hfi1_devdata *dd)
-{
- int i;
-
- /* CCE_REVISION read-only */
- /* CCE_REVISION2 read-only */
- /* CCE_CTRL - bits clear automatically */
- /* CCE_STATUS read-only, use CceCtrl to clear */
- clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
- clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
- clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
- for (i = 0; i < CCE_NUM_SCRATCH; i++)
- write_csr(dd, CCE_SCRATCH + (8 * i), 0);
- /* CCE_ERR_STATUS read-only */
- write_csr(dd, CCE_ERR_MASK, 0);
- write_csr(dd, CCE_ERR_CLEAR, ~0ull);
- /* CCE_ERR_FORCE leave alone */
- for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
- write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
- write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
- /* CCE_PCIE_CTRL leave alone */
- for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
- write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
- write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
- CCE_MSIX_TABLE_UPPER_RESETCSR);
- }
- for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
- /* CCE_MSIX_PBA read-only */
- write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
- write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
- }
- for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
- write_csr(dd, CCE_INT_MAP, 0);
- for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
- /* CCE_INT_STATUS read-only */
- write_csr(dd, CCE_INT_MASK + (8 * i), 0);
- write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
- /* CCE_INT_FORCE leave alone */
- /* CCE_INT_BLOCKED read-only */
- }
- for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
- write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
-}
-
-/* set MISC CSRs to chip reset defaults */
-static void reset_misc_csrs(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = 0; i < 32; i++) {
- write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
- write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
- write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
- }
- /*
- * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
- * only be written 128-byte chunks
- */
- /* init RSA engine to clear lingering errors */
- write_csr(dd, MISC_CFG_RSA_CMD, 1);
- write_csr(dd, MISC_CFG_RSA_MU, 0);
- write_csr(dd, MISC_CFG_FW_CTRL, 0);
- /* MISC_STS_8051_DIGEST read-only */
- /* MISC_STS_SBM_DIGEST read-only */
- /* MISC_STS_PCIE_DIGEST read-only */
- /* MISC_STS_FAB_DIGEST read-only */
- /* MISC_ERR_STATUS read-only */
- write_csr(dd, MISC_ERR_MASK, 0);
- write_csr(dd, MISC_ERR_CLEAR, ~0ull);
- /* MISC_ERR_FORCE leave alone */
-}
-
-/* set TXE CSRs to chip reset defaults */
-static void reset_txe_csrs(struct hfi1_devdata *dd)
-{
- int i;
-
- /*
- * TXE Kernel CSRs
- */
- write_csr(dd, SEND_CTRL, 0);
- __cm_reset(dd, 0); /* reset CM internal state */
- /* SEND_CONTEXTS read-only */
- /* SEND_DMA_ENGINES read-only */
- /* SEND_PIO_MEM_SIZE read-only */
- /* SEND_DMA_MEM_SIZE read-only */
- write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
- pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
- /* SEND_PIO_ERR_STATUS read-only */
- write_csr(dd, SEND_PIO_ERR_MASK, 0);
- write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
- /* SEND_PIO_ERR_FORCE leave alone */
- /* SEND_DMA_ERR_STATUS read-only */
- write_csr(dd, SEND_DMA_ERR_MASK, 0);
- write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
- /* SEND_DMA_ERR_FORCE leave alone */
- /* SEND_EGRESS_ERR_STATUS read-only */
- write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
- write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
- /* SEND_EGRESS_ERR_FORCE leave alone */
- write_csr(dd, SEND_BTH_QP, 0);
- write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
- write_csr(dd, SEND_SC2VLT0, 0);
- write_csr(dd, SEND_SC2VLT1, 0);
- write_csr(dd, SEND_SC2VLT2, 0);
- write_csr(dd, SEND_SC2VLT3, 0);
- write_csr(dd, SEND_LEN_CHECK0, 0);
- write_csr(dd, SEND_LEN_CHECK1, 0);
- /* SEND_ERR_STATUS read-only */
- write_csr(dd, SEND_ERR_MASK, 0);
- write_csr(dd, SEND_ERR_CLEAR, ~0ull);
- /* SEND_ERR_FORCE read-only */
- for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
- write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
- for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
- write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
- for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
- write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
- for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
- write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
- for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
- write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
- write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
- write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
- /* SEND_CM_CREDIT_USED_STATUS read-only */
- write_csr(dd, SEND_CM_TIMER_CTRL, 0);
- write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
- write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
- write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
- write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
- for (i = 0; i < TXE_NUM_DATA_VL; i++)
- write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
- write_csr(dd, SEND_CM_CREDIT_VL15, 0);
- /* SEND_CM_CREDIT_USED_VL read-only */
- /* SEND_CM_CREDIT_USED_VL15 read-only */
- /* SEND_EGRESS_CTXT_STATUS read-only */
- /* SEND_EGRESS_SEND_DMA_STATUS read-only */
- write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
- /* SEND_EGRESS_ERR_INFO read-only */
- /* SEND_EGRESS_ERR_SOURCE read-only */
-
- /*
- * TXE Per-Context CSRs
- */
- for (i = 0; i < dd->chip_send_contexts; i++) {
- write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
- write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
- write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
- }
-
- /*
- * TXE Per-SDMA CSRs
- */
- for (i = 0; i < dd->chip_sdma_engines; i++) {
- write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
- /* SEND_DMA_STATUS read-only */
- write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
- write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
- write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
- /* SEND_DMA_HEAD read-only */
- write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
- write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
- /* SEND_DMA_IDLE_CNT read-only */
- write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
- write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
- /* SEND_DMA_DESC_FETCHED_CNT read-only */
- /* SEND_DMA_ENG_ERR_STATUS read-only */
- write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
- write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
- /* SEND_DMA_ENG_ERR_FORCE leave alone */
- write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
- write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
- write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
- write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
- write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
- write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
- write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
- }
-}
-
-/*
- * Expect on entry:
- * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
- */
-static void init_rbufs(struct hfi1_devdata *dd)
-{
- u64 reg;
- int count;
-
- /*
- * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
- * clear.
- */
- count = 0;
- while (1) {
- reg = read_csr(dd, RCV_STATUS);
- if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
- | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
- break;
- /*
- * Give up after 1ms - maximum wait time.
- *
- * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
- * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
- * 148 KB / (66% * 250MB/s) = 920us
- */
- if (count++ > 500) {
- dd_dev_err(dd,
- "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
- __func__, reg);
- break;
- }
- udelay(2); /* do not busy-wait the CSR */
- }
-
- /* start the init - expect RcvCtrl to be 0 */
- write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
-
- /*
- * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
- * period after the write before RcvStatus.RxRbufInitDone is valid.
- * The delay in the first run through the loop below is sufficient and
- * required before the first read of RcvStatus.RxRbufInintDone.
- */
- read_csr(dd, RCV_CTRL);
-
- /* wait for the init to finish */
- count = 0;
- while (1) {
- /* delay is required first time through - see above */
- udelay(2); /* do not busy-wait the CSR */
- reg = read_csr(dd, RCV_STATUS);
- if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
- break;
-
- /* give up after 100us - slowest possible at 33MHz is 73us */
- if (count++ > 50) {
- dd_dev_err(dd,
- "%s: RcvStatus.RxRbufInit not set, continuing\n",
- __func__);
- break;
- }
- }
-}
-
-/* set RXE CSRs to chip reset defaults */
-static void reset_rxe_csrs(struct hfi1_devdata *dd)
-{
- int i, j;
-
- /*
- * RXE Kernel CSRs
- */
- write_csr(dd, RCV_CTRL, 0);
- init_rbufs(dd);
- /* RCV_STATUS read-only */
- /* RCV_CONTEXTS read-only */
- /* RCV_ARRAY_CNT read-only */
- /* RCV_BUF_SIZE read-only */
- write_csr(dd, RCV_BTH_QP, 0);
- write_csr(dd, RCV_MULTICAST, 0);
- write_csr(dd, RCV_BYPASS, 0);
- write_csr(dd, RCV_VL15, 0);
- /* this is a clear-down */
- write_csr(dd, RCV_ERR_INFO,
- RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
- /* RCV_ERR_STATUS read-only */
- write_csr(dd, RCV_ERR_MASK, 0);
- write_csr(dd, RCV_ERR_CLEAR, ~0ull);
- /* RCV_ERR_FORCE leave alone */
- for (i = 0; i < 32; i++)
- write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
- for (i = 0; i < 4; i++)
- write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
- for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
- write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
- for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
- write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
- for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
- write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
- write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
- write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
- }
- for (i = 0; i < 32; i++)
- write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
-
- /*
- * RXE Kernel and User Per-Context CSRs
- */
- for (i = 0; i < dd->chip_rcv_contexts; i++) {
- /* kernel */
- write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
- /* RCV_CTXT_STATUS read-only */
- write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
- write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
- write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
- write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
- write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
- write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
- write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
- write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
- write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
- write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
-
- /* user */
- /* RCV_HDR_TAIL read-only */
- write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
- /* RCV_EGR_INDEX_TAIL read-only */
- write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
- /* RCV_EGR_OFFSET_TAIL read-only */
- for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
- write_uctxt_csr(dd, i,
- RCV_TID_FLOW_TABLE + (8 * j), 0);
- }
- }
-}
-
-/*
- * Set sc2vl tables.
- *
- * They power on to zeros, so to avoid send context errors
- * they need to be set:
- *
- * SC 0-7 -> VL 0-7 (respectively)
- * SC 15 -> VL 15
- * otherwise
- * -> VL 0
- */
-static void init_sc2vl_tables(struct hfi1_devdata *dd)
-{
- int i;
- /* init per architecture spec, constrained by hardware capability */
-
- /* HFI maps sent packets */
- write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
- 0,
- 0, 0, 1, 1,
- 2, 2, 3, 3,
- 4, 4, 5, 5,
- 6, 6, 7, 7));
- write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
- 1,
- 8, 0, 9, 0,
- 10, 0, 11, 0,
- 12, 0, 13, 0,
- 14, 0, 15, 15));
- write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
- 2,
- 16, 0, 17, 0,
- 18, 0, 19, 0,
- 20, 0, 21, 0,
- 22, 0, 23, 0));
- write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
- 3,
- 24, 0, 25, 0,
- 26, 0, 27, 0,
- 28, 0, 29, 0,
- 30, 0, 31, 0));
-
- /* DC maps received packets */
- write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
- 15_0,
- 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
- write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
- 31_16,
- 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
- 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
-
- /* initialize the cached sc2vl values consistently with h/w */
- for (i = 0; i < 32; i++) {
- if (i < 8 || i == 15)
- *((u8 *)(dd->sc2vl) + i) = (u8)i;
- else
- *((u8 *)(dd->sc2vl) + i) = 0;
- }
-}
-
-/*
- * Read chip sizes and then reset parts to sane, disabled, values. We cannot
- * depend on the chip going through a power-on reset - a driver may be loaded
- * and unloaded many times.
- *
- * Do not write any CSR values to the chip in this routine - there may be
- * a reset following the (possible) FLR in this routine.
- *
- */
-static void init_chip(struct hfi1_devdata *dd)
-{
- int i;
-
- /*
- * Put the HFI CSRs in a known state.
- * Combine this with a DC reset.
- *
- * Stop the device from doing anything while we do a
- * reset. We know there are no other active users of
- * the device since we are now in charge. Turn off
- * off all outbound and inbound traffic and make sure
- * the device does not generate any interrupts.
- */
-
- /* disable send contexts and SDMA engines */
- write_csr(dd, SEND_CTRL, 0);
- for (i = 0; i < dd->chip_send_contexts; i++)
- write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
- for (i = 0; i < dd->chip_sdma_engines; i++)
- write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
- /* disable port (turn off RXE inbound traffic) and contexts */
- write_csr(dd, RCV_CTRL, 0);
- for (i = 0; i < dd->chip_rcv_contexts; i++)
- write_csr(dd, RCV_CTXT_CTRL, 0);
- /* mask all interrupt sources */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
-
- /*
- * DC Reset: do a full DC reset before the register clear.
- * A recommended length of time to hold is one CSR read,
- * so reread the CceDcCtrl. Then, hold the DC in reset
- * across the clear.
- */
- write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
- (void)read_csr(dd, CCE_DC_CTRL);
-
- if (use_flr) {
- /*
- * A FLR will reset the SPC core and part of the PCIe.
- * The parts that need to be restored have already been
- * saved.
- */
- dd_dev_info(dd, "Resetting CSRs with FLR\n");
-
- /* do the FLR, the DC reset will remain */
- hfi1_pcie_flr(dd);
-
- /* restore command and BARs */
- restore_pci_variables(dd);
-
- if (is_ax(dd)) {
- dd_dev_info(dd, "Resetting CSRs with FLR\n");
- hfi1_pcie_flr(dd);
- restore_pci_variables(dd);
- }
- } else {
- dd_dev_info(dd, "Resetting CSRs with writes\n");
- reset_cce_csrs(dd);
- reset_txe_csrs(dd);
- reset_rxe_csrs(dd);
- reset_misc_csrs(dd);
- }
- /* clear the DC reset */
- write_csr(dd, CCE_DC_CTRL, 0);
-
- /* Set the LED off */
- setextled(dd, 0);
-
- /*
- * Clear the QSFP reset.
- * An FLR enforces a 0 on all out pins. The driver does not touch
- * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
- * anything plugged constantly in reset, if it pays attention
- * to RESET_N.
- * Prime examples of this are optical cables. Set all pins high.
- * I2CCLK and I2CDAT will change per direction, and INT_N and
- * MODPRS_N are input only and their value is ignored.
- */
- write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
- write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
- init_chip_resources(dd);
-}
-
-static void init_early_variables(struct hfi1_devdata *dd)
-{
- int i;
-
- /* assign link credit variables */
- dd->vau = CM_VAU;
- dd->link_credits = CM_GLOBAL_CREDITS;
- if (is_ax(dd))
- dd->link_credits--;
- dd->vcu = cu_to_vcu(hfi1_cu);
- /* enough room for 8 MAD packets plus header - 17K */
- dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
- if (dd->vl15_init > dd->link_credits)
- dd->vl15_init = dd->link_credits;
-
- write_uninitialized_csrs_and_memories(dd);
-
- if (HFI1_CAP_IS_KSET(PKEY_CHECK))
- for (i = 0; i < dd->num_pports; i++) {
- struct hfi1_pportdata *ppd = &dd->pport[i];
-
- set_partition_keys(ppd);
- }
- init_sc2vl_tables(dd);
-}
-
-static void init_kdeth_qp(struct hfi1_devdata *dd)
-{
- /* user changed the KDETH_QP */
- if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
- /* out of range or illegal value */
- dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
- kdeth_qp = 0;
- }
- if (kdeth_qp == 0) /* not set, or failed range check */
- kdeth_qp = DEFAULT_KDETH_QP;
-
- write_csr(dd, SEND_BTH_QP,
- (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
- SEND_BTH_QP_KDETH_QP_SHIFT);
-
- write_csr(dd, RCV_BTH_QP,
- (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
- RCV_BTH_QP_KDETH_QP_SHIFT);
-}
-
-/**
- * init_qpmap_table
- * @dd - device data
- * @first_ctxt - first context
- * @last_ctxt - first context
- *
- * This return sets the qpn mapping table that
- * is indexed by qpn[8:1].
- *
- * The routine will round robin the 256 settings
- * from first_ctxt to last_ctxt.
- *
- * The first/last looks ahead to having specialized
- * receive contexts for mgmt and bypass. Normal
- * verbs traffic will assumed to be on a range
- * of receive contexts.
- */
-static void init_qpmap_table(struct hfi1_devdata *dd,
- u32 first_ctxt,
- u32 last_ctxt)
-{
- u64 reg = 0;
- u64 regno = RCV_QP_MAP_TABLE;
- int i;
- u64 ctxt = first_ctxt;
-
- for (i = 0; i < 256;) {
- reg |= ctxt << (8 * (i % 8));
- i++;
- ctxt++;
- if (ctxt > last_ctxt)
- ctxt = first_ctxt;
- if (i % 8 == 0) {
- write_csr(dd, regno, reg);
- reg = 0;
- regno += 8;
- }
- }
- if (i % 8)
- write_csr(dd, regno, reg);
-
- add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
- | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
-}
-
-/**
- * init_qos - init RX qos
- * @dd - device data
- * @first_context
- *
- * This routine initializes Rule 0 and the
- * RSM map table to implement qos.
- *
- * If all of the limit tests succeed,
- * qos is applied based on the array
- * interpretation of krcvqs where
- * entry 0 is VL0.
- *
- * The number of vl bits (n) and the number of qpn
- * bits (m) are computed to feed both the RSM map table
- * and the single rule.
- *
- */
-static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
-{
- u8 max_by_vl = 0;
- unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
- u64 *rsmmap;
- u64 reg;
- u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
-
- /* validate */
- if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
- num_vls == 1 ||
- krcvqsset <= 1)
- goto bail;
- for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
- if (krcvqs[i] > max_by_vl)
- max_by_vl = krcvqs[i];
- if (max_by_vl > 32)
- goto bail;
- qpns_per_vl = __roundup_pow_of_two(max_by_vl);
- /* determine bits vl */
- n = ilog2(num_vls);
- /* determine bits for qpn */
- m = ilog2(qpns_per_vl);
- if ((m + n) > 7)
- goto bail;
- if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
- goto bail;
- rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
- if (!rsmmap)
- goto bail;
- memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
- /* init the local copy of the table */
- for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
- unsigned tctxt;
-
- for (qpn = 0, tctxt = ctxt;
- krcvqs[i] && qpn < qpns_per_vl; qpn++) {
- unsigned idx, regoff, regidx;
-
- /* generate index <= 128 */
- idx = (qpn << n) ^ i;
- regoff = (idx % 8) * 8;
- regidx = idx / 8;
- reg = rsmmap[regidx];
- /* replace 0xff with context number */
- reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
- << regoff);
- reg |= (u64)(tctxt++) << regoff;
- rsmmap[regidx] = reg;
- if (tctxt == ctxt + krcvqs[i])
- tctxt = ctxt;
- }
- ctxt += krcvqs[i];
- }
- /* flush cached copies to chip */
- for (i = 0; i < NUM_MAP_REGS; i++)
- write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
- /* add rule0 */
- write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
- RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
- RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
- 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
- write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
- LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
- LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
- LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
- ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
- QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
- ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
- write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
- LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
- LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
- LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
- LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
- /* Enable RSM */
- add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
- kfree(rsmmap);
- /* map everything else to first context */
- init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
- dd->qos_shift = n + 1;
- return;
-bail:
- dd->qos_shift = 1;
- init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
-}
-
-static void init_rxe(struct hfi1_devdata *dd)
-{
- /* enable all receive errors */
- write_csr(dd, RCV_ERR_MASK, ~0ull);
- /* setup QPN map table - start where VL15 context leaves off */
- init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
- MIN_KERNEL_KCTXTS : 0);
- /*
- * make sure RcvCtrl.RcvWcb <= PCIe Device Control
- * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
- * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
- * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
- * Max_PayLoad_Size set to its minimum of 128.
- *
- * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
- * (64 bytes). Max_Payload_Size is possibly modified upward in
- * tune_pcie_caps() which is called after this routine.
- */
-}
-
-static void init_other(struct hfi1_devdata *dd)
-{
- /* enable all CCE errors */
- write_csr(dd, CCE_ERR_MASK, ~0ull);
- /* enable *some* Misc errors */
- write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
- /* enable all DC errors, except LCB */
- write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
- write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
-}
-
-/*
- * Fill out the given AU table using the given CU. A CU is defined in terms
- * AUs. The table is a an encoding: given the index, how many AUs does that
- * represent?
- *
- * NOTE: Assumes that the register layout is the same for the
- * local and remote tables.
- */
-static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
- u32 csr0to3, u32 csr4to7)
-{
- write_csr(dd, csr0to3,
- 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
- 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
- 2ull * cu <<
- SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
- 4ull * cu <<
- SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
- write_csr(dd, csr4to7,
- 8ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
- 16ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
- 32ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
- 64ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
-}
-
-static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
-{
- assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
- SEND_CM_LOCAL_AU_TABLE4_TO7);
-}
-
-void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
-{
- assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
- SEND_CM_REMOTE_AU_TABLE4_TO7);
-}
-
-static void init_txe(struct hfi1_devdata *dd)
-{
- int i;
-
- /* enable all PIO, SDMA, general, and Egress errors */
- write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
- write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
- write_csr(dd, SEND_ERR_MASK, ~0ull);
- write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
-
- /* enable all per-context and per-SDMA engine errors */
- for (i = 0; i < dd->chip_send_contexts; i++)
- write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
- for (i = 0; i < dd->chip_sdma_engines; i++)
- write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
-
- /* set the local CU to AU mapping */
- assign_local_cm_au_table(dd, dd->vcu);
-
- /*
- * Set reasonable default for Credit Return Timer
- * Don't set on Simulator - causes it to choke.
- */
- if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
- write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
-}
-
-int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
-{
- struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
- unsigned sctxt;
- int ret = 0;
- u64 reg;
-
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
- reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
- ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
- SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
- /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
- if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
- reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
- /*
- * Enable send-side J_KEY integrity check, unless this is A0 h/w
- */
- if (!is_ax(dd)) {
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
- reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
- }
-
- /* Enable J_KEY check on receive context. */
- reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
- ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
- RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
- write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
-done:
- return ret;
-}
-
-int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
-{
- struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
- unsigned sctxt;
- int ret = 0;
- u64 reg;
-
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
- /*
- * Disable send-side J_KEY integrity check, unless this is A0 h/w.
- * This check would not have been enabled for A0 h/w, see
- * set_ctxt_jkey().
- */
- if (!is_ax(dd)) {
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
- reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
- }
- /* Turn off the J_KEY on the receive side */
- write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
-done:
- return ret;
-}
-
-int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
-{
- struct hfi1_ctxtdata *rcd;
- unsigned sctxt;
- int ret = 0;
- u64 reg;
-
- if (ctxt < dd->num_rcv_contexts) {
- rcd = dd->rcd[ctxt];
- } else {
- ret = -EINVAL;
- goto done;
- }
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
- reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
- SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
- reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
-done:
- return ret;
-}
-
-int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
-{
- struct hfi1_ctxtdata *rcd;
- unsigned sctxt;
- int ret = 0;
- u64 reg;
-
- if (ctxt < dd->num_rcv_contexts) {
- rcd = dd->rcd[ctxt];
- } else {
- ret = -EINVAL;
- goto done;
- }
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
- reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
-done:
- return ret;
-}
-
-/*
- * Start doing the clean up the the chip. Our clean up happens in multiple
- * stages and this is just the first.
- */
-void hfi1_start_cleanup(struct hfi1_devdata *dd)
-{
- aspm_exit(dd);
- free_cntrs(dd);
- free_rcverr(dd);
- clean_up_interrupts(dd);
- finish_chip_resources(dd);
-}
-
-#define HFI_BASE_GUID(dev) \
- ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
-
-/*
- * Information can be shared between the two HFIs on the same ASIC
- * in the same OS. This function finds the peer device and sets
- * up a shared structure.
- */
-static int init_asic_data(struct hfi1_devdata *dd)
-{
- unsigned long flags;
- struct hfi1_devdata *tmp, *peer = NULL;
- int ret = 0;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- /* Find our peer device */
- list_for_each_entry(tmp, &hfi1_dev_list, list) {
- if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
- dd->unit != tmp->unit) {
- peer = tmp;
- break;
- }
- }
-
- if (peer) {
- dd->asic_data = peer->asic_data;
- } else {
- dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
- if (!dd->asic_data) {
- ret = -ENOMEM;
- goto done;
- }
- mutex_init(&dd->asic_data->asic_resource_mutex);
- }
- dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- return ret;
-}
-
-/*
- * Set dd->boardname. Use a generic name if a name is not returned from
- * EFI variable space.
- *
- * Return 0 on success, -ENOMEM if space could not be allocated.
- */
-static int obtain_boardname(struct hfi1_devdata *dd)
-{
- /* generic board description */
- const char generic[] =
- "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
- unsigned long size;
- int ret;
-
- ret = read_hfi1_efi_var(dd, "description", &size,
- (void **)&dd->boardname);
- if (ret) {
- dd_dev_info(dd, "Board description not found\n");
- /* use generic description */
- dd->boardname = kstrdup(generic, GFP_KERNEL);
- if (!dd->boardname)
- return -ENOMEM;
- }
- return 0;
-}
-
-/*
- * Check the interrupt registers to make sure that they are mapped correctly.
- * It is intended to help user identify any mismapping by VMM when the driver
- * is running in a VM. This function should only be called before interrupt
- * is set up properly.
- *
- * Return 0 on success, -EINVAL on failure.
- */
-static int check_int_registers(struct hfi1_devdata *dd)
-{
- u64 reg;
- u64 all_bits = ~(u64)0;
- u64 mask;
-
- /* Clear CceIntMask[0] to avoid raising any interrupts */
- mask = read_csr(dd, CCE_INT_MASK);
- write_csr(dd, CCE_INT_MASK, 0ull);
- reg = read_csr(dd, CCE_INT_MASK);
- if (reg)
- goto err_exit;
-
- /* Clear all interrupt status bits */
- write_csr(dd, CCE_INT_CLEAR, all_bits);
- reg = read_csr(dd, CCE_INT_STATUS);
- if (reg)
- goto err_exit;
-
- /* Set all interrupt status bits */
- write_csr(dd, CCE_INT_FORCE, all_bits);
- reg = read_csr(dd, CCE_INT_STATUS);
- if (reg != all_bits)
- goto err_exit;
-
- /* Restore the interrupt mask */
- write_csr(dd, CCE_INT_CLEAR, all_bits);
- write_csr(dd, CCE_INT_MASK, mask);
-
- return 0;
-err_exit:
- write_csr(dd, CCE_INT_MASK, mask);
- dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
- return -EINVAL;
-}
-
-/**
- * Allocate and initialize the device structure for the hfi.
- * @dev: the pci_dev for hfi1_ib device
- * @ent: pci_device_id struct for this dev
- *
- * Also allocates, initializes, and returns the devdata struct for this
- * device instance
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct hfi1_devdata *dd;
- struct hfi1_pportdata *ppd;
- u64 reg;
- int i, ret;
- static const char * const inames[] = { /* implementation names */
- "RTL silicon",
- "RTL VCS simulation",
- "RTL FPGA emulation",
- "Functional simulator"
- };
- struct pci_dev *parent = pdev->bus->self;
-
- dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
- sizeof(struct hfi1_pportdata));
- if (IS_ERR(dd))
- goto bail;
- ppd = dd->pport;
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- int vl;
- /* init common fields */
- hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
- /* DC supports 4 link widths */
- ppd->link_width_supported =
- OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
- OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
- ppd->link_width_downgrade_supported =
- ppd->link_width_supported;
- /* start out enabling only 4X */
- ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
- ppd->link_width_downgrade_enabled =
- ppd->link_width_downgrade_supported;
- /* link width active is 0 when link is down */
- /* link width downgrade active is 0 when link is down */
-
- if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
- num_vls > HFI1_MAX_VLS_SUPPORTED) {
- hfi1_early_err(&pdev->dev,
- "Invalid num_vls %u, using %u VLs\n",
- num_vls, HFI1_MAX_VLS_SUPPORTED);
- num_vls = HFI1_MAX_VLS_SUPPORTED;
- }
- ppd->vls_supported = num_vls;
- ppd->vls_operational = ppd->vls_supported;
- ppd->actual_vls_operational = ppd->vls_supported;
- /* Set the default MTU. */
- for (vl = 0; vl < num_vls; vl++)
- dd->vld[vl].mtu = hfi1_max_mtu;
- dd->vld[15].mtu = MAX_MAD_PACKET;
- /*
- * Set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- ppd->lstate = IB_PORT_DOWN;
- ppd->overrun_threshold = 0x4;
- ppd->phy_error_threshold = 0xf;
- ppd->port_crc_mode_enabled = link_crc_mask;
- /* initialize supported LTP CRC mode */
- ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
- /* initialize enabled LTP CRC mode */
- ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
- /* start in offline */
- ppd->host_link_state = HLS_DN_OFFLINE;
- init_vl_arb_caches(ppd);
- ppd->last_pstate = 0xff; /* invalid value */
- }
-
- dd->link_default = HLS_DN_POLL;
-
- /*
- * Do remaining PCIe setup and save PCIe values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped.
- */
- ret = hfi1_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* verify that reads actually work, save revision for reset check */
- dd->revision = read_csr(dd, CCE_REVISION);
- if (dd->revision == ~(u64)0) {
- dd_dev_err(dd, "cannot read chip CSRs\n");
- ret = -EINVAL;
- goto bail_cleanup;
- }
- dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
- & CCE_REVISION_CHIP_REV_MAJOR_MASK;
- dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
- & CCE_REVISION_CHIP_REV_MINOR_MASK;
-
- /*
- * Check interrupt registers mapping if the driver has no access to
- * the upstream component. In this case, it is likely that the driver
- * is running in a VM.
- */
- if (!parent) {
- ret = check_int_registers(dd);
- if (ret)
- goto bail_cleanup;
- }
-
- /*
- * obtain the hardware ID - NOT related to unit, which is a
- * software enumeration
- */
- reg = read_csr(dd, CCE_REVISION2);
- dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
- & CCE_REVISION2_HFI_ID_MASK;
- /* the variable size will remove unwanted bits */
- dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
- dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
- dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
- dd->icode < ARRAY_SIZE(inames) ?
- inames[dd->icode] : "unknown", (int)dd->irev);
-
- /* speeds the hardware can support */
- dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
- /* speeds allowed to run at */
- dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
- /* give a reasonable active value, will be set on link up */
- dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
-
- dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
- dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
- dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
- dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
- dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
- /* fix up link widths for emulation _p */
- ppd = dd->pport;
- if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
- ppd->link_width_supported =
- ppd->link_width_enabled =
- ppd->link_width_downgrade_supported =
- ppd->link_width_downgrade_enabled =
- OPA_LINK_WIDTH_1X;
- }
- /* insure num_vls isn't larger than number of sdma engines */
- if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
- dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
- num_vls, dd->chip_sdma_engines);
- num_vls = dd->chip_sdma_engines;
- ppd->vls_supported = dd->chip_sdma_engines;
- ppd->vls_operational = ppd->vls_supported;
- }
-
- /*
- * Convert the ns parameter to the 64 * cclocks used in the CSR.
- * Limit the max if larger than the field holds. If timeout is
- * non-zero, then the calculated field will be at least 1.
- *
- * Must be after icode is set up - the cclock rate depends
- * on knowing the hardware being used.
- */
- dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
- if (dd->rcv_intr_timeout_csr >
- RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
- dd->rcv_intr_timeout_csr =
- RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
- else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
- dd->rcv_intr_timeout_csr = 1;
-
- /* needs to be done before we look for the peer device */
- read_guid(dd);
-
- /* set up shared ASIC data with peer device */
- ret = init_asic_data(dd);
- if (ret)
- goto bail_cleanup;
-
- /* obtain chip sizes, reset chip CSRs */
- init_chip(dd);
-
- /* read in the PCIe link speed information */
- ret = pcie_speeds(dd);
- if (ret)
- goto bail_cleanup;
-
- /* Needs to be called before hfi1_firmware_init */
- get_platform_config(dd);
-
- /* read in firmware */
- ret = hfi1_firmware_init(dd);
- if (ret)
- goto bail_cleanup;
-
- /*
- * In general, the PCIe Gen3 transition must occur after the
- * chip has been idled (so it won't initiate any PCIe transactions
- * e.g. an interrupt) and before the driver changes any registers
- * (the transition will reset the registers).
- *
- * In particular, place this call after:
- * - init_chip() - the chip will not initiate any PCIe transactions
- * - pcie_speeds() - reads the current link speed
- * - hfi1_firmware_init() - the needed firmware is ready to be
- * downloaded
- */
- ret = do_pcie_gen3_transition(dd);
- if (ret)
- goto bail_cleanup;
-
- /* start setting dd values and adjusting CSRs */
- init_early_variables(dd);
-
- parse_platform_config(dd);
-
- ret = obtain_boardname(dd);
- if (ret)
- goto bail_cleanup;
-
- snprintf(dd->boardversion, BOARD_VERS_MAX,
- "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
- HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
- (u32)dd->majrev,
- (u32)dd->minrev,
- (dd->revision >> CCE_REVISION_SW_SHIFT)
- & CCE_REVISION_SW_MASK);
-
- ret = set_up_context_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- /* set initial RXE CSRs */
- init_rxe(dd);
- /* set initial TXE CSRs */
- init_txe(dd);
- /* set initial non-RXE, non-TXE CSRs */
- init_other(dd);
- /* set up KDETH QP prefix in both RX and TX CSRs */
- init_kdeth_qp(dd);
-
- ret = hfi1_dev_affinity_init(dd);
- if (ret)
- goto bail_cleanup;
-
- /* send contexts must be set up before receive contexts */
- ret = init_send_contexts(dd);
- if (ret)
- goto bail_cleanup;
-
- ret = hfi1_create_ctxts(dd);
- if (ret)
- goto bail_cleanup;
-
- dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
- /*
- * rcd[0] is guaranteed to be valid by this point. Also, all
- * context are using the same value, as per the module parameter.
- */
- dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
-
- ret = init_pervl_scs(dd);
- if (ret)
- goto bail_cleanup;
-
- /* sdma init */
- for (i = 0; i < dd->num_pports; ++i) {
- ret = sdma_init(dd, i);
- if (ret)
- goto bail_cleanup;
- }
-
- /* use contexts created by hfi1_create_ctxts */
- ret = set_up_interrupts(dd);
- if (ret)
- goto bail_cleanup;
-
- /* set up LCB access - must be after set_up_interrupts() */
- init_lcb_access(dd);
-
- snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
- dd->base_guid & 0xFFFFFF);
-
- dd->oui1 = dd->base_guid >> 56 & 0xFF;
- dd->oui2 = dd->base_guid >> 48 & 0xFF;
- dd->oui3 = dd->base_guid >> 40 & 0xFF;
-
- ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
- if (ret)
- goto bail_clear_intr;
- check_fabric_firmware_versions(dd);
-
- thermal_init(dd);
-
- ret = init_cntrs(dd);
- if (ret)
- goto bail_clear_intr;
-
- ret = init_rcverr(dd);
- if (ret)
- goto bail_free_cntrs;
-
- ret = eprom_init(dd);
- if (ret)
- goto bail_free_rcverr;
-
- goto bail;
-
-bail_free_rcverr:
- free_rcverr(dd);
-bail_free_cntrs:
- free_cntrs(dd);
-bail_clear_intr:
- clean_up_interrupts(dd);
-bail_cleanup:
- hfi1_pcie_ddcleanup(dd);
-bail_free:
- hfi1_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
-
-static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
- u32 dw_len)
-{
- u32 delta_cycles;
- u32 current_egress_rate = ppd->current_egress_rate;
- /* rates here are in units of 10^6 bits/sec */
-
- if (desired_egress_rate == -1)
- return 0; /* shouldn't happen */
-
- if (desired_egress_rate >= current_egress_rate)
- return 0; /* we can't help go faster, only slower */
-
- delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
- egress_cycles(dw_len * 4, current_egress_rate);
-
- return (u16)delta_cycles;
-}
-
-/**
- * create_pbc - build a pbc for transmission
- * @flags: special case flags or-ed in built pbc
- * @srate: static rate
- * @vl: vl
- * @dwlen: dword length (header words + data words + pbc words)
- *
- * Create a PBC with the given flags, rate, VL, and length.
- *
- * NOTE: The PBC created will not insert any HCRC - all callers but one are
- * for verbs, which does not use this PSM feature. The lone other caller
- * is for the diagnostic interface which calls this if the user does not
- * supply their own PBC.
- */
-u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
- u32 dw_len)
-{
- u64 pbc, delay = 0;
-
- if (unlikely(srate_mbs))
- delay = delay_cycles(ppd, srate_mbs, dw_len);
-
- pbc = flags
- | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
- | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
- | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
- | (dw_len & PBC_LENGTH_DWS_MASK)
- << PBC_LENGTH_DWS_SHIFT;
-
- return pbc;
-}
-
-#define SBUS_THERMAL 0x4f
-#define SBUS_THERM_MONITOR_MODE 0x1
-
-#define THERM_FAILURE(dev, ret, reason) \
- dd_dev_err((dd), \
- "Thermal sensor initialization failed: %s (%d)\n", \
- (reason), (ret))
-
-/*
- * Initialize the Avago Thermal sensor.
- *
- * After initialization, enable polling of thermal sensor through
- * SBus interface. In order for this to work, the SBus Master
- * firmware has to be loaded due to the fact that the HW polling
- * logic uses SBus interrupts, which are not supported with
- * default firmware. Otherwise, no data will be returned through
- * the ASIC_STS_THERM CSR.
- */
-static int thermal_init(struct hfi1_devdata *dd)
-{
- int ret = 0;
-
- if (dd->icode != ICODE_RTL_SILICON ||
- check_chip_resource(dd, CR_THERM_INIT, NULL))
- return ret;
-
- ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
- if (ret) {
- THERM_FAILURE(dd, ret, "Acquire SBus");
- return ret;
- }
-
- dd_dev_info(dd, "Initializing thermal sensor\n");
- /* Disable polling of thermal readings */
- write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
- msleep(100);
- /* Thermal Sensor Initialization */
- /* Step 1: Reset the Thermal SBus Receiver */
- ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
- RESET_SBUS_RECEIVER, 0);
- if (ret) {
- THERM_FAILURE(dd, ret, "Bus Reset");
- goto done;
- }
- /* Step 2: Set Reset bit in Thermal block */
- ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
- WRITE_SBUS_RECEIVER, 0x1);
- if (ret) {
- THERM_FAILURE(dd, ret, "Therm Block Reset");
- goto done;
- }
- /* Step 3: Write clock divider value (100MHz -> 2MHz) */
- ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
- WRITE_SBUS_RECEIVER, 0x32);
- if (ret) {
- THERM_FAILURE(dd, ret, "Write Clock Div");
- goto done;
- }
- /* Step 4: Select temperature mode */
- ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
- WRITE_SBUS_RECEIVER,
- SBUS_THERM_MONITOR_MODE);
- if (ret) {
- THERM_FAILURE(dd, ret, "Write Mode Sel");
- goto done;
- }
- /* Step 5: De-assert block reset and start conversion */
- ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
- WRITE_SBUS_RECEIVER, 0x2);
- if (ret) {
- THERM_FAILURE(dd, ret, "Write Reset Deassert");
- goto done;
- }
- /* Step 5.1: Wait for first conversion (21.5ms per spec) */
- msleep(22);
-
- /* Enable polling of thermal readings */
- write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
-
- /* Set initialized flag */
- ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
- if (ret)
- THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
-
-done:
- release_chip_resource(dd, CR_SBUS);
- return ret;
-}
-
-static void handle_temp_err(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd = &dd->pport[0];
- /*
- * Thermal Critical Interrupt
- * Put the device into forced freeze mode, take link down to
- * offline, and put DC into reset.
- */
- dd_dev_emerg(dd,
- "Critical temperature reached! Forcing device into freeze mode!\n");
- dd->flags |= HFI1_FORCED_FREEZE;
- start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
- /*
- * Shut DC down as much and as quickly as possible.
- *
- * Step 1: Take the link down to OFFLINE. This will cause the
- * 8051 to put the Serdes in reset. However, we don't want to
- * go through the entire link state machine since we want to
- * shutdown ASAP. Furthermore, this is not a graceful shutdown
- * but rather an attempt to save the chip.
- * Code below is almost the same as quiet_serdes() but avoids
- * all the extra work and the sleeps.
- */
- ppd->driver_link_ready = 0;
- ppd->link_enabled = 0;
- set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
- PLS_OFFLINE);
- /*
- * Step 2: Shutdown LCB and 8051
- * After shutdown, do not restore DC_CFG_RESET value.
- */
- dc_shutdown(dd);
-}
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h
deleted file mode 100644
index 4f3b878e4..000000000
--- a/drivers/staging/rdma/hfi1/chip.h
+++ /dev/null
@@ -1,1363 +0,0 @@
-#ifndef _CHIP_H
-#define _CHIP_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
- * This file contains all of the defines that is specific to the HFI chip
- */
-
-/* sizes */
-#define CCE_NUM_MSIX_VECTORS 256
-#define CCE_NUM_INT_CSRS 12
-#define CCE_NUM_INT_MAP_CSRS 96
-#define NUM_INTERRUPT_SOURCES 768
-#define RXE_NUM_CONTEXTS 160
-#define RXE_PER_CONTEXT_SIZE 0x1000 /* 4k */
-#define RXE_NUM_TID_FLOWS 32
-#define RXE_NUM_DATA_VL 8
-#define TXE_NUM_CONTEXTS 160
-#define TXE_NUM_SDMA_ENGINES 16
-#define NUM_CONTEXTS_PER_SET 8
-#define VL_ARB_HIGH_PRIO_TABLE_SIZE 16
-#define VL_ARB_LOW_PRIO_TABLE_SIZE 16
-#define VL_ARB_TABLE_SIZE 16
-#define TXE_NUM_32_BIT_COUNTER 7
-#define TXE_NUM_64_BIT_COUNTER 30
-#define TXE_NUM_DATA_VL 8
-#define TXE_PIO_SIZE (32 * 0x100000) /* 32 MB */
-#define PIO_BLOCK_SIZE 64 /* bytes */
-#define SDMA_BLOCK_SIZE 64 /* bytes */
-#define RCV_BUF_BLOCK_SIZE 64 /* bytes */
-#define PIO_CMASK 0x7ff /* counter mask for free and fill counters */
-#define MAX_EAGER_ENTRIES 2048 /* max receive eager entries */
-#define MAX_TID_PAIR_ENTRIES 1024 /* max receive expected pairs */
-/*
- * Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed
- * at 64 bytes for all generation one devices
- */
-#define CM_VAU 3
-/* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */
-#define CM_GLOBAL_CREDITS 0x940
-/* Number of PKey entries in the HW */
-#define MAX_PKEY_VALUES 16
-
-#include "chip_registers.h"
-
-#define RXE_PER_CONTEXT_USER (RXE + RXE_PER_CONTEXT_OFFSET)
-#define TXE_PIO_SEND (TXE + TXE_PIO_SEND_OFFSET)
-
-/* PBC flags */
-#define PBC_INTR BIT_ULL(31)
-#define PBC_DC_INFO_SHIFT (30)
-#define PBC_DC_INFO BIT_ULL(PBC_DC_INFO_SHIFT)
-#define PBC_TEST_EBP BIT_ULL(29)
-#define PBC_PACKET_BYPASS BIT_ULL(28)
-#define PBC_CREDIT_RETURN BIT_ULL(25)
-#define PBC_INSERT_BYPASS_ICRC BIT_ULL(24)
-#define PBC_TEST_BAD_ICRC BIT_ULL(23)
-#define PBC_FECN BIT_ULL(22)
-
-/* PbcInsertHcrc field settings */
-#define PBC_IHCRC_LKDETH 0x0 /* insert @ local KDETH offset */
-#define PBC_IHCRC_GKDETH 0x1 /* insert @ global KDETH offset */
-#define PBC_IHCRC_NONE 0x2 /* no HCRC inserted */
-
-/* PBC fields */
-#define PBC_STATIC_RATE_CONTROL_COUNT_SHIFT 32
-#define PBC_STATIC_RATE_CONTROL_COUNT_MASK 0xffffull
-#define PBC_STATIC_RATE_CONTROL_COUNT_SMASK \
- (PBC_STATIC_RATE_CONTROL_COUNT_MASK << \
- PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
-
-#define PBC_INSERT_HCRC_SHIFT 26
-#define PBC_INSERT_HCRC_MASK 0x3ull
-#define PBC_INSERT_HCRC_SMASK \
- (PBC_INSERT_HCRC_MASK << PBC_INSERT_HCRC_SHIFT)
-
-#define PBC_VL_SHIFT 12
-#define PBC_VL_MASK 0xfull
-#define PBC_VL_SMASK (PBC_VL_MASK << PBC_VL_SHIFT)
-
-#define PBC_LENGTH_DWS_SHIFT 0
-#define PBC_LENGTH_DWS_MASK 0xfffull
-#define PBC_LENGTH_DWS_SMASK \
- (PBC_LENGTH_DWS_MASK << PBC_LENGTH_DWS_SHIFT)
-
-/* Credit Return Fields */
-#define CR_COUNTER_SHIFT 0
-#define CR_COUNTER_MASK 0x7ffull
-#define CR_COUNTER_SMASK (CR_COUNTER_MASK << CR_COUNTER_SHIFT)
-
-#define CR_STATUS_SHIFT 11
-#define CR_STATUS_MASK 0x1ull
-#define CR_STATUS_SMASK (CR_STATUS_MASK << CR_STATUS_SHIFT)
-
-#define CR_CREDIT_RETURN_DUE_TO_PBC_SHIFT 12
-#define CR_CREDIT_RETURN_DUE_TO_PBC_MASK 0x1ull
-#define CR_CREDIT_RETURN_DUE_TO_PBC_SMASK \
- (CR_CREDIT_RETURN_DUE_TO_PBC_MASK << \
- CR_CREDIT_RETURN_DUE_TO_PBC_SHIFT)
-
-#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SHIFT 13
-#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_MASK 0x1ull
-#define CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK \
- (CR_CREDIT_RETURN_DUE_TO_THRESHOLD_MASK << \
- CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SHIFT)
-
-#define CR_CREDIT_RETURN_DUE_TO_ERR_SHIFT 14
-#define CR_CREDIT_RETURN_DUE_TO_ERR_MASK 0x1ull
-#define CR_CREDIT_RETURN_DUE_TO_ERR_SMASK \
- (CR_CREDIT_RETURN_DUE_TO_ERR_MASK << \
- CR_CREDIT_RETURN_DUE_TO_ERR_SHIFT)
-
-#define CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT 15
-#define CR_CREDIT_RETURN_DUE_TO_FORCE_MASK 0x1ull
-#define CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK \
- (CR_CREDIT_RETURN_DUE_TO_FORCE_MASK << \
- CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT)
-
-/* interrupt source numbers */
-#define IS_GENERAL_ERR_START 0
-#define IS_SDMAENG_ERR_START 16
-#define IS_SENDCTXT_ERR_START 32
-#define IS_SDMA_START 192 /* includes SDmaProgress,SDmaIdle */
-#define IS_VARIOUS_START 240
-#define IS_DC_START 248
-#define IS_RCVAVAIL_START 256
-#define IS_RCVURGENT_START 416
-#define IS_SENDCREDIT_START 576
-#define IS_RESERVED_START 736
-#define IS_MAX_SOURCES 768
-
-/* derived interrupt source values */
-#define IS_GENERAL_ERR_END IS_SDMAENG_ERR_START
-#define IS_SDMAENG_ERR_END IS_SENDCTXT_ERR_START
-#define IS_SENDCTXT_ERR_END IS_SDMA_START
-#define IS_SDMA_END IS_VARIOUS_START
-#define IS_VARIOUS_END IS_DC_START
-#define IS_DC_END IS_RCVAVAIL_START
-#define IS_RCVAVAIL_END IS_RCVURGENT_START
-#define IS_RCVURGENT_END IS_SENDCREDIT_START
-#define IS_SENDCREDIT_END IS_RESERVED_START
-#define IS_RESERVED_END IS_MAX_SOURCES
-
-/* absolute interrupt numbers for QSFP1Int and QSFP2Int */
-#define QSFP1_INT 242
-#define QSFP2_INT 243
-
-/* DCC_CFG_PORT_CONFIG logical link states */
-#define LSTATE_DOWN 0x1
-#define LSTATE_INIT 0x2
-#define LSTATE_ARMED 0x3
-#define LSTATE_ACTIVE 0x4
-
-/* DC8051_STS_CUR_STATE port values (physical link states) */
-#define PLS_DISABLED 0x30
-#define PLS_OFFLINE 0x90
-#define PLS_OFFLINE_QUIET 0x90
-#define PLS_OFFLINE_PLANNED_DOWN_INFORM 0x91
-#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
-#define PLS_OFFLINE_REPORT_FAILURE 0x93
-#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
-#define PLS_POLLING 0x20
-#define PLS_POLLING_QUIET 0x20
-#define PLS_POLLING_ACTIVE 0x21
-#define PLS_CONFIGPHY 0x40
-#define PLS_CONFIGPHY_DEBOUCE 0x40
-#define PLS_CONFIGPHY_ESTCOMM 0x41
-#define PLS_CONFIGPHY_ESTCOMM_TXRX_HUNT 0x42
-#define PLS_CONFIGPHY_ESTCOMM_LOCAL_COMPLETE 0x43
-#define PLS_CONFIGPHY_OPTEQ 0x44
-#define PLS_CONFIGPHY_OPTEQ_OPTIMIZING 0x44
-#define PLS_CONFIGPHY_OPTEQ_LOCAL_COMPLETE 0x45
-#define PLS_CONFIGPHY_VERIFYCAP 0x46
-#define PLS_CONFIGPHY_VERIFYCAP_EXCHANGE 0x46
-#define PLS_CONFIGPHY_VERIFYCAP_LOCAL_COMPLETE 0x47
-#define PLS_CONFIGLT 0x48
-#define PLS_CONFIGLT_CONFIGURE 0x48
-#define PLS_CONFIGLT_LINK_TRANSFER_ACTIVE 0x49
-#define PLS_LINKUP 0x50
-#define PLS_PHYTEST 0xB0
-#define PLS_INTERNAL_SERDES_LOOPBACK 0xe1
-#define PLS_QUICK_LINKUP 0xe2
-
-/* DC_DC8051_CFG_HOST_CMD_0.REQ_TYPE - 8051 host commands */
-#define HCMD_LOAD_CONFIG_DATA 0x01
-#define HCMD_READ_CONFIG_DATA 0x02
-#define HCMD_CHANGE_PHY_STATE 0x03
-#define HCMD_SEND_LCB_IDLE_MSG 0x04
-#define HCMD_MISC 0x05
-#define HCMD_READ_LCB_IDLE_MSG 0x06
-#define HCMD_READ_LCB_CSR 0x07
-#define HCMD_WRITE_LCB_CSR 0x08
-#define HCMD_INTERFACE_TEST 0xff
-
-/* DC_DC8051_CFG_HOST_CMD_1.RETURN_CODE - 8051 host command return */
-#define HCMD_SUCCESS 2
-
-/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR - error flags */
-#define SPICO_ROM_FAILED BIT(0)
-#define UNKNOWN_FRAME BIT(1)
-#define TARGET_BER_NOT_MET BIT(2)
-#define FAILED_SERDES_INTERNAL_LOOPBACK BIT(3)
-#define FAILED_SERDES_INIT BIT(4)
-#define FAILED_LNI_POLLING BIT(5)
-#define FAILED_LNI_DEBOUNCE BIT(6)
-#define FAILED_LNI_ESTBCOMM BIT(7)
-#define FAILED_LNI_OPTEQ BIT(8)
-#define FAILED_LNI_VERIFY_CAP1 BIT(9)
-#define FAILED_LNI_VERIFY_CAP2 BIT(10)
-#define FAILED_LNI_CONFIGLT BIT(11)
-#define HOST_HANDSHAKE_TIMEOUT BIT(12)
-
-#define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \
- | FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \
- | FAILED_LNI_VERIFY_CAP1 \
- | FAILED_LNI_VERIFY_CAP2 \
- | FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT)
-
-/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */
-#define HOST_REQ_DONE BIT(0)
-#define BC_PWR_MGM_MSG BIT(1)
-#define BC_SMA_MSG BIT(2)
-#define BC_BCC_UNKNOWN_MSG BIT(3)
-#define BC_IDLE_UNKNOWN_MSG BIT(4)
-#define EXT_DEVICE_CFG_REQ BIT(5)
-#define VERIFY_CAP_FRAME BIT(6)
-#define LINKUP_ACHIEVED BIT(7)
-#define LINK_GOING_DOWN BIT(8)
-#define LINK_WIDTH_DOWNGRADED BIT(9)
-
-/* DC_DC8051_CFG_EXT_DEV_1.REQ_TYPE - 8051 host requests */
-#define HREQ_LOAD_CONFIG 0x01
-#define HREQ_SAVE_CONFIG 0x02
-#define HREQ_READ_CONFIG 0x03
-#define HREQ_SET_TX_EQ_ABS 0x04
-#define HREQ_SET_TX_EQ_REL 0x05
-#define HREQ_ENABLE 0x06
-#define HREQ_CONFIG_DONE 0xfe
-#define HREQ_INTERFACE_TEST 0xff
-
-/* DC_DC8051_CFG_EXT_DEV_0.RETURN_CODE - 8051 host request return codes */
-#define HREQ_INVALID 0x01
-#define HREQ_SUCCESS 0x02
-#define HREQ_NOT_SUPPORTED 0x03
-#define HREQ_FEATURE_NOT_SUPPORTED 0x04 /* request specific feature */
-#define HREQ_REQUEST_REJECTED 0xfe
-#define HREQ_EXECUTION_ONGOING 0xff
-
-/* MISC host command functions */
-#define HCMD_MISC_REQUEST_LCB_ACCESS 0x1
-#define HCMD_MISC_GRANT_LCB_ACCESS 0x2
-
-/* idle flit message types */
-#define IDLE_PHYSICAL_LINK_MGMT 0x1
-#define IDLE_CRU 0x2
-#define IDLE_SMA 0x3
-#define IDLE_POWER_MGMT 0x4
-
-/* idle flit message send fields (both send and read) */
-#define IDLE_PAYLOAD_MASK 0xffffffffffull /* 40 bits */
-#define IDLE_PAYLOAD_SHIFT 8
-#define IDLE_MSG_TYPE_MASK 0xf
-#define IDLE_MSG_TYPE_SHIFT 0
-
-/* idle flit message read fields */
-#define READ_IDLE_MSG_TYPE_MASK 0xf
-#define READ_IDLE_MSG_TYPE_SHIFT 0
-
-/* SMA idle flit payload commands */
-#define SMA_IDLE_ARM 1
-#define SMA_IDLE_ACTIVE 2
-
-/* DC_DC8051_CFG_MODE.GENERAL bits */
-#define DISABLE_SELF_GUID_CHECK 0x2
-
-/*
- * Eager buffer minimum and maximum sizes supported by the hardware.
- * All power-of-two sizes in between are supported as well.
- * MAX_EAGER_BUFFER_TOTAL is the maximum size of memory
- * allocatable for Eager buffer to a single context. All others
- * are limits for the RcvArray entries.
- */
-#define MIN_EAGER_BUFFER (4 * 1024)
-#define MAX_EAGER_BUFFER (256 * 1024)
-#define MAX_EAGER_BUFFER_TOTAL (64 * (1 << 20)) /* max per ctxt 64MB */
-#define MAX_EXPECTED_BUFFER (2048 * 1024)
-
-/*
- * Receive expected base and count and eager base and count increment -
- * the CSR fields hold multiples of this value.
- */
-#define RCV_SHIFT 3
-#define RCV_INCREMENT BIT(RCV_SHIFT)
-
-/*
- * Receive header queue entry increment - the CSR holds multiples of
- * this value.
- */
-#define HDRQ_SIZE_SHIFT 5
-#define HDRQ_INCREMENT BIT(HDRQ_SIZE_SHIFT)
-
-/*
- * Freeze handling flags
- */
-#define FREEZE_ABORT 0x01 /* do not do recovery */
-#define FREEZE_SELF 0x02 /* initiate the freeze */
-#define FREEZE_LINK_DOWN 0x04 /* link is down */
-
-/*
- * Chip implementation codes.
- */
-#define ICODE_RTL_SILICON 0x00
-#define ICODE_RTL_VCS_SIMULATION 0x01
-#define ICODE_FPGA_EMULATION 0x02
-#define ICODE_FUNCTIONAL_SIMULATOR 0x03
-
-/*
- * 8051 data memory size.
- */
-#define DC8051_DATA_MEM_SIZE 0x1000
-
-/*
- * 8051 firmware registers
- */
-#define NUM_GENERAL_FIELDS 0x17
-#define NUM_LANE_FIELDS 0x8
-
-/* 8051 general register Field IDs */
-#define LINK_OPTIMIZATION_SETTINGS 0x00
-#define LINK_TUNING_PARAMETERS 0x02
-#define DC_HOST_COMM_SETTINGS 0x03
-#define TX_SETTINGS 0x06
-#define VERIFY_CAP_LOCAL_PHY 0x07
-#define VERIFY_CAP_LOCAL_FABRIC 0x08
-#define VERIFY_CAP_LOCAL_LINK_WIDTH 0x09
-#define LOCAL_DEVICE_ID 0x0a
-#define LOCAL_LNI_INFO 0x0c
-#define REMOTE_LNI_INFO 0x0d
-#define MISC_STATUS 0x0e
-#define VERIFY_CAP_REMOTE_PHY 0x0f
-#define VERIFY_CAP_REMOTE_FABRIC 0x10
-#define VERIFY_CAP_REMOTE_LINK_WIDTH 0x11
-#define LAST_LOCAL_STATE_COMPLETE 0x12
-#define LAST_REMOTE_STATE_COMPLETE 0x13
-#define LINK_QUALITY_INFO 0x14
-#define REMOTE_DEVICE_ID 0x15
-
-/* 8051 lane specific register field IDs */
-#define TX_EQ_SETTINGS 0x00
-#define CHANNEL_LOSS_SETTINGS 0x05
-
-/* Lane ID for general configuration registers */
-#define GENERAL_CONFIG 4
-
-/* LOAD_DATA 8051 command shifts and fields */
-#define LOAD_DATA_FIELD_ID_SHIFT 40
-#define LOAD_DATA_FIELD_ID_MASK 0xfull
-#define LOAD_DATA_LANE_ID_SHIFT 32
-#define LOAD_DATA_LANE_ID_MASK 0xfull
-#define LOAD_DATA_DATA_SHIFT 0x0
-#define LOAD_DATA_DATA_MASK 0xffffffffull
-
-/* READ_DATA 8051 command shifts and fields */
-#define READ_DATA_FIELD_ID_SHIFT 40
-#define READ_DATA_FIELD_ID_MASK 0xffull
-#define READ_DATA_LANE_ID_SHIFT 32
-#define READ_DATA_LANE_ID_MASK 0xffull
-#define READ_DATA_DATA_SHIFT 0x0
-#define READ_DATA_DATA_MASK 0xffffffffull
-
-/* TX settings fields */
-#define ENABLE_LANE_TX_SHIFT 0
-#define ENABLE_LANE_TX_MASK 0xff
-#define TX_POLARITY_INVERSION_SHIFT 8
-#define TX_POLARITY_INVERSION_MASK 0xff
-#define RX_POLARITY_INVERSION_SHIFT 16
-#define RX_POLARITY_INVERSION_MASK 0xff
-#define MAX_RATE_SHIFT 24
-#define MAX_RATE_MASK 0xff
-
-/* verify capability PHY fields */
-#define CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT 0x4
-#define CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK 0x1
-#define POWER_MANAGEMENT_SHIFT 0x0
-#define POWER_MANAGEMENT_MASK 0xf
-
-/* 8051 lane register Field IDs */
-#define SPICO_FW_VERSION 0x7 /* SPICO firmware version */
-
-/* SPICO firmware version fields */
-#define SPICO_ROM_VERSION_SHIFT 0
-#define SPICO_ROM_VERSION_MASK 0xffff
-#define SPICO_ROM_PROD_ID_SHIFT 16
-#define SPICO_ROM_PROD_ID_MASK 0xffff
-
-/* verify capability fabric fields */
-#define VAU_SHIFT 0
-#define VAU_MASK 0x0007
-#define Z_SHIFT 3
-#define Z_MASK 0x0001
-#define VCU_SHIFT 4
-#define VCU_MASK 0x0007
-#define VL15BUF_SHIFT 8
-#define VL15BUF_MASK 0x0fff
-#define CRC_SIZES_SHIFT 20
-#define CRC_SIZES_MASK 0x7
-
-/* verify capability local link width fields */
-#define LINK_WIDTH_SHIFT 0 /* also for remote link width */
-#define LINK_WIDTH_MASK 0xffff /* also for remote link width */
-#define LOCAL_FLAG_BITS_SHIFT 16
-#define LOCAL_FLAG_BITS_MASK 0xff
-#define MISC_CONFIG_BITS_SHIFT 24
-#define MISC_CONFIG_BITS_MASK 0xff
-
-/* verify capability remote link width fields */
-#define REMOTE_TX_RATE_SHIFT 16
-#define REMOTE_TX_RATE_MASK 0xff
-
-/* LOCAL_DEVICE_ID fields */
-#define LOCAL_DEVICE_REV_SHIFT 0
-#define LOCAL_DEVICE_REV_MASK 0xff
-#define LOCAL_DEVICE_ID_SHIFT 8
-#define LOCAL_DEVICE_ID_MASK 0xffff
-
-/* REMOTE_DEVICE_ID fields */
-#define REMOTE_DEVICE_REV_SHIFT 0
-#define REMOTE_DEVICE_REV_MASK 0xff
-#define REMOTE_DEVICE_ID_SHIFT 8
-#define REMOTE_DEVICE_ID_MASK 0xffff
-
-/* local LNI link width fields */
-#define ENABLE_LANE_RX_SHIFT 16
-#define ENABLE_LANE_RX_MASK 0xff
-
-/* mask, shift for reading 'mgmt_enabled' value from REMOTE_LNI_INFO field */
-#define MGMT_ALLOWED_SHIFT 23
-#define MGMT_ALLOWED_MASK 0x1
-
-/* mask, shift for 'link_quality' within LINK_QUALITY_INFO field */
-#define LINK_QUALITY_SHIFT 24
-#define LINK_QUALITY_MASK 0x7
-
-/*
- * mask, shift for reading 'planned_down_remote_reason_code'
- * from LINK_QUALITY_INFO field
- */
-#define DOWN_REMOTE_REASON_SHIFT 16
-#define DOWN_REMOTE_REASON_MASK 0xff
-
-/* verify capability PHY power management bits */
-#define PWRM_BER_CONTROL 0x1
-#define PWRM_BANDWIDTH_CONTROL 0x2
-
-/* verify capability fabric CRC size bits */
-enum {
- CAP_CRC_14B = (1 << 0), /* 14b CRC */
- CAP_CRC_48B = (1 << 1), /* 48b CRC */
- CAP_CRC_12B_16B_PER_LANE = (1 << 2) /* 12b-16b per lane CRC */
-};
-
-#define SUPPORTED_CRCS (CAP_CRC_14B | CAP_CRC_48B)
-
-/* misc status version fields */
-#define STS_FM_VERSION_A_SHIFT 16
-#define STS_FM_VERSION_A_MASK 0xff
-#define STS_FM_VERSION_B_SHIFT 24
-#define STS_FM_VERSION_B_MASK 0xff
-
-/* LCB_CFG_CRC_MODE TX_VAL and RX_VAL CRC mode values */
-#define LCB_CRC_16B 0x0 /* 16b CRC */
-#define LCB_CRC_14B 0x1 /* 14b CRC */
-#define LCB_CRC_48B 0x2 /* 48b CRC */
-#define LCB_CRC_12B_16B_PER_LANE 0x3 /* 12b-16b per lane CRC */
-
-/*
- * the following enum is (almost) a copy/paste of the definition
- * in the OPA spec, section 20.2.2.6.8 (PortInfo)
- */
-enum {
- PORT_LTP_CRC_MODE_NONE = 0,
- PORT_LTP_CRC_MODE_14 = 1, /* 14-bit LTP CRC mode (optional) */
- PORT_LTP_CRC_MODE_16 = 2, /* 16-bit LTP CRC mode */
- PORT_LTP_CRC_MODE_48 = 4,
- /* 48-bit overlapping LTP CRC mode (optional) */
- PORT_LTP_CRC_MODE_PER_LANE = 8
- /* 12 to 16 bit per lane LTP CRC mode (optional) */
-};
-
-/* timeouts */
-#define LINK_RESTART_DELAY 1000 /* link restart delay, in ms */
-#define TIMEOUT_8051_START 5000 /* 8051 start timeout, in ms */
-#define DC8051_COMMAND_TIMEOUT 20000 /* DC8051 command timeout, in ms */
-#define FREEZE_STATUS_TIMEOUT 20 /* wait for freeze indicators, in ms */
-#define VL_STATUS_CLEAR_TIMEOUT 5000 /* per-VL status clear, in ms */
-#define CCE_STATUS_TIMEOUT 10 /* time to clear CCE Status, in ms */
-
-/* cclock tick time, in picoseconds per tick: 1/speed * 10^12 */
-#define ASIC_CCLOCK_PS 1242 /* 805 MHz */
-#define FPGA_CCLOCK_PS 30300 /* 33 MHz */
-
-/*
- * Mask of enabled MISC errors. Do not enable the two RSA engine errors -
- * see firmware.c:run_rsa() for details.
- */
-#define DRIVER_MISC_MASK \
- (~(MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK \
- | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK))
-
-/* valid values for the loopback module parameter */
-#define LOOPBACK_NONE 0 /* no loopback - default */
-#define LOOPBACK_SERDES 1
-#define LOOPBACK_LCB 2
-#define LOOPBACK_CABLE 3 /* external cable */
-
-/* read and write hardware registers */
-u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
-void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
-
-/*
- * The *_kctxt_* flavor of the CSR read/write functions are for
- * per-context or per-SDMA CSRs that are not mappable to user-space.
- * Their spacing is not a PAGE_SIZE multiple.
- */
-static inline u64 read_kctxt_csr(const struct hfi1_devdata *dd, int ctxt,
- u32 offset0)
-{
- /* kernel per-context CSRs are separated by 0x100 */
- return read_csr(dd, offset0 + (0x100 * ctxt));
-}
-
-static inline void write_kctxt_csr(struct hfi1_devdata *dd, int ctxt,
- u32 offset0, u64 value)
-{
- /* kernel per-context CSRs are separated by 0x100 */
- write_csr(dd, offset0 + (0x100 * ctxt), value);
-}
-
-int read_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 *data);
-int write_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 data);
-
-void __iomem *get_csr_addr(
- struct hfi1_devdata *dd,
- u32 offset);
-
-static inline void __iomem *get_kctxt_csr_addr(
- struct hfi1_devdata *dd,
- int ctxt,
- u32 offset0)
-{
- return get_csr_addr(dd, offset0 + (0x100 * ctxt));
-}
-
-/*
- * The *_uctxt_* flavor of the CSR read/write functions are for
- * per-context CSRs that are mappable to user space. All these CSRs
- * are spaced by a PAGE_SIZE multiple in order to be mappable to
- * different processes without exposing other contexts' CSRs
- */
-static inline u64 read_uctxt_csr(const struct hfi1_devdata *dd, int ctxt,
- u32 offset0)
-{
- /* user per-context CSRs are separated by 0x1000 */
- return read_csr(dd, offset0 + (0x1000 * ctxt));
-}
-
-static inline void write_uctxt_csr(struct hfi1_devdata *dd, int ctxt,
- u32 offset0, u64 value)
-{
- /* user per-context CSRs are separated by 0x1000 */
- write_csr(dd, offset0 + (0x1000 * ctxt), value);
-}
-
-u64 create_pbc(struct hfi1_pportdata *ppd, u64, int, u32, u32);
-
-/* firmware.c */
-#define SBUS_MASTER_BROADCAST 0xfd
-#define NUM_PCIE_SERDES 16 /* number of PCIe serdes on the SBus */
-extern const u8 pcie_serdes_broadcast[];
-extern const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES];
-extern uint platform_config_load;
-
-/* SBus commands */
-#define RESET_SBUS_RECEIVER 0x20
-#define WRITE_SBUS_RECEIVER 0x21
-void sbus_request(struct hfi1_devdata *dd,
- u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
-int sbus_request_slow(struct hfi1_devdata *dd,
- u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
-void set_sbus_fast_mode(struct hfi1_devdata *dd);
-void clear_sbus_fast_mode(struct hfi1_devdata *dd);
-int hfi1_firmware_init(struct hfi1_devdata *dd);
-int load_pcie_firmware(struct hfi1_devdata *dd);
-int load_firmware(struct hfi1_devdata *dd);
-void dispose_firmware(void);
-int acquire_hw_mutex(struct hfi1_devdata *dd);
-void release_hw_mutex(struct hfi1_devdata *dd);
-
-/*
- * Bitmask of dynamic access for ASIC block chip resources. Each HFI has its
- * own range of bits for the resource so it can clear its own bits on
- * starting and exiting. If either HFI has the resource bit set, the
- * resource is in use. The separate bit ranges are:
- * HFI0 bits 7:0
- * HFI1 bits 15:8
- */
-#define CR_SBUS 0x01 /* SBUS, THERM, and PCIE registers */
-#define CR_EPROM 0x02 /* EEP, GPIO registers */
-#define CR_I2C1 0x04 /* QSFP1_OE register */
-#define CR_I2C2 0x08 /* QSFP2_OE register */
-#define CR_DYN_SHIFT 8 /* dynamic flag shift */
-#define CR_DYN_MASK ((1ull << CR_DYN_SHIFT) - 1)
-
-/*
- * Bitmask of static ASIC states these are outside of the dynamic ASIC
- * block chip resources above. These are to be set once and never cleared.
- * Must be holding the SBus dynamic flag when setting.
- */
-#define CR_THERM_INIT 0x010000
-
-int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait);
-void release_chip_resource(struct hfi1_devdata *dd, u32 resource);
-bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
- const char *func);
-void init_chip_resources(struct hfi1_devdata *dd);
-void finish_chip_resources(struct hfi1_devdata *dd);
-
-/* ms wait time for access to an SBus resoure */
-#define SBUS_TIMEOUT 4000 /* long enough for a FW download and SBR */
-
-/* ms wait time for a qsfp (i2c) chain to become available */
-#define QSFP_WAIT 20000 /* long enough for FW update to the F4 uc */
-
-void fabric_serdes_reset(struct hfi1_devdata *dd);
-int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result);
-
-/* chip.c */
-void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b);
-void read_guid(struct hfi1_devdata *dd);
-int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
-void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
- u8 neigh_reason, u8 rem_reason);
-int set_link_state(struct hfi1_pportdata *, u32 state);
-int port_ltp_to_cap(int port_ltp);
-void handle_verify_cap(struct work_struct *work);
-void handle_freeze(struct work_struct *work);
-void handle_link_up(struct work_struct *work);
-void handle_link_down(struct work_struct *work);
-void handle_8051_request(struct work_struct *work);
-void handle_link_downgrade(struct work_struct *work);
-void handle_link_bounce(struct work_struct *work);
-void handle_sma_message(struct work_struct *work);
-void reset_qsfp(struct hfi1_pportdata *ppd);
-void qsfp_event(struct work_struct *work);
-void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
-int send_idle_sma(struct hfi1_devdata *dd, u64 message);
-int load_8051_config(struct hfi1_devdata *, u8, u8, u32);
-int read_8051_config(struct hfi1_devdata *, u8, u8, u32 *);
-int start_link(struct hfi1_pportdata *ppd);
-int bringup_serdes(struct hfi1_pportdata *ppd);
-void set_intr_state(struct hfi1_devdata *dd, u32 enable);
-void apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
- int refresh_widths);
-void update_usrhead(struct hfi1_ctxtdata *, u32, u32, u32, u32, u32);
-int stop_drain_data_vls(struct hfi1_devdata *dd);
-int open_fill_data_vls(struct hfi1_devdata *dd);
-u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns);
-u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclock);
-void get_linkup_link_widths(struct hfi1_pportdata *ppd);
-void read_ltp_rtt(struct hfi1_devdata *dd);
-void clear_linkup_counters(struct hfi1_devdata *dd);
-u32 hdrqempty(struct hfi1_ctxtdata *rcd);
-int is_ax(struct hfi1_devdata *dd);
-int is_bx(struct hfi1_devdata *dd);
-u32 read_physical_state(struct hfi1_devdata *dd);
-u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate);
-u32 get_logical_state(struct hfi1_pportdata *ppd);
-const char *opa_lstate_name(u32 lstate);
-const char *opa_pstate_name(u32 pstate);
-u32 driver_physical_state(struct hfi1_pportdata *ppd);
-u32 driver_logical_state(struct hfi1_pportdata *ppd);
-
-int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
-int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
-#define LCB_START DC_LCB_CSRS
-#define LCB_END DC_8051_CSRS /* next block is 8051 */
-static inline int is_lcb_offset(u32 offset)
-{
- return (offset >= LCB_START && offset < LCB_END);
-}
-
-extern uint num_vls;
-
-extern uint disable_integrity;
-u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl);
-u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data);
-u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl);
-u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data);
-u32 read_logical_state(struct hfi1_devdata *dd);
-void force_recv_intr(struct hfi1_ctxtdata *rcd);
-
-/* Per VL indexes */
-enum {
- C_VL_0 = 0,
- C_VL_1,
- C_VL_2,
- C_VL_3,
- C_VL_4,
- C_VL_5,
- C_VL_6,
- C_VL_7,
- C_VL_15,
- C_VL_COUNT
-};
-
-static inline int vl_from_idx(int idx)
-{
- return (idx == C_VL_15 ? 15 : idx);
-}
-
-static inline int idx_from_vl(int vl)
-{
- return (vl == 15 ? C_VL_15 : vl);
-}
-
-/* Per device counter indexes */
-enum {
- C_RCV_OVF = 0,
- C_RX_TID_FULL,
- C_RX_TID_INVALID,
- C_RX_TID_FLGMS,
- C_RX_CTX_EGRS,
- C_RCV_TID_FLSMS,
- C_CCE_PCI_CR_ST,
- C_CCE_PCI_TR_ST,
- C_CCE_PIO_WR_ST,
- C_CCE_ERR_INT,
- C_CCE_SDMA_INT,
- C_CCE_MISC_INT,
- C_CCE_RCV_AV_INT,
- C_CCE_RCV_URG_INT,
- C_CCE_SEND_CR_INT,
- C_DC_UNC_ERR,
- C_DC_RCV_ERR,
- C_DC_FM_CFG_ERR,
- C_DC_RMT_PHY_ERR,
- C_DC_DROPPED_PKT,
- C_DC_MC_XMIT_PKTS,
- C_DC_MC_RCV_PKTS,
- C_DC_XMIT_CERR,
- C_DC_RCV_CERR,
- C_DC_RCV_FCC,
- C_DC_XMIT_FCC,
- C_DC_XMIT_FLITS,
- C_DC_RCV_FLITS,
- C_DC_XMIT_PKTS,
- C_DC_RCV_PKTS,
- C_DC_RX_FLIT_VL,
- C_DC_RX_PKT_VL,
- C_DC_RCV_FCN,
- C_DC_RCV_FCN_VL,
- C_DC_RCV_BCN,
- C_DC_RCV_BCN_VL,
- C_DC_RCV_BBL,
- C_DC_RCV_BBL_VL,
- C_DC_MARK_FECN,
- C_DC_MARK_FECN_VL,
- C_DC_TOTAL_CRC,
- C_DC_CRC_LN0,
- C_DC_CRC_LN1,
- C_DC_CRC_LN2,
- C_DC_CRC_LN3,
- C_DC_CRC_MULT_LN,
- C_DC_TX_REPLAY,
- C_DC_RX_REPLAY,
- C_DC_SEQ_CRC_CNT,
- C_DC_ESC0_ONLY_CNT,
- C_DC_ESC0_PLUS1_CNT,
- C_DC_ESC0_PLUS2_CNT,
- C_DC_REINIT_FROM_PEER_CNT,
- C_DC_SBE_CNT,
- C_DC_MISC_FLG_CNT,
- C_DC_PRF_GOOD_LTP_CNT,
- C_DC_PRF_ACCEPTED_LTP_CNT,
- C_DC_PRF_RX_FLIT_CNT,
- C_DC_PRF_TX_FLIT_CNT,
- C_DC_PRF_CLK_CNTR,
- C_DC_PG_DBG_FLIT_CRDTS_CNT,
- C_DC_PG_STS_PAUSE_COMPLETE_CNT,
- C_DC_PG_STS_TX_SBE_CNT,
- C_DC_PG_STS_TX_MBE_CNT,
- C_SW_CPU_INTR,
- C_SW_CPU_RCV_LIM,
- C_SW_VTX_WAIT,
- C_SW_PIO_WAIT,
- C_SW_PIO_DRAIN,
- C_SW_KMEM_WAIT,
- C_SW_SEND_SCHED,
- C_SDMA_DESC_FETCHED_CNT,
- C_SDMA_INT_CNT,
- C_SDMA_ERR_CNT,
- C_SDMA_IDLE_INT_CNT,
- C_SDMA_PROGRESS_INT_CNT,
-/* MISC_ERR_STATUS */
- C_MISC_PLL_LOCK_FAIL_ERR,
- C_MISC_MBIST_FAIL_ERR,
- C_MISC_INVALID_EEP_CMD_ERR,
- C_MISC_EFUSE_DONE_PARITY_ERR,
- C_MISC_EFUSE_WRITE_ERR,
- C_MISC_EFUSE_READ_BAD_ADDR_ERR,
- C_MISC_EFUSE_CSR_PARITY_ERR,
- C_MISC_FW_AUTH_FAILED_ERR,
- C_MISC_KEY_MISMATCH_ERR,
- C_MISC_SBUS_WRITE_FAILED_ERR,
- C_MISC_CSR_WRITE_BAD_ADDR_ERR,
- C_MISC_CSR_READ_BAD_ADDR_ERR,
- C_MISC_CSR_PARITY_ERR,
-/* CceErrStatus */
- /*
- * A special counter that is the aggregate count
- * of all the cce_err_status errors. The remainder
- * are actual bits in the CceErrStatus register.
- */
- C_CCE_ERR_STATUS_AGGREGATED_CNT,
- C_CCE_MSIX_CSR_PARITY_ERR,
- C_CCE_INT_MAP_UNC_ERR,
- C_CCE_INT_MAP_COR_ERR,
- C_CCE_MSIX_TABLE_UNC_ERR,
- C_CCE_MSIX_TABLE_COR_ERR,
- C_CCE_RXDMA_CONV_FIFO_PARITY_ERR,
- C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR,
- C_CCE_SEG_WRITE_BAD_ADDR_ERR,
- C_CCE_SEG_READ_BAD_ADDR_ERR,
- C_LA_TRIGGERED,
- C_CCE_TRGT_CPL_TIMEOUT_ERR,
- C_PCIC_RECEIVE_PARITY_ERR,
- C_PCIC_TRANSMIT_BACK_PARITY_ERR,
- C_PCIC_TRANSMIT_FRONT_PARITY_ERR,
- C_PCIC_CPL_DAT_Q_UNC_ERR,
- C_PCIC_CPL_HD_Q_UNC_ERR,
- C_PCIC_POST_DAT_Q_UNC_ERR,
- C_PCIC_POST_HD_Q_UNC_ERR,
- C_PCIC_RETRY_SOT_MEM_UNC_ERR,
- C_PCIC_RETRY_MEM_UNC_ERR,
- C_PCIC_N_POST_DAT_Q_PARITY_ERR,
- C_PCIC_N_POST_H_Q_PARITY_ERR,
- C_PCIC_CPL_DAT_Q_COR_ERR,
- C_PCIC_CPL_HD_Q_COR_ERR,
- C_PCIC_POST_DAT_Q_COR_ERR,
- C_PCIC_POST_HD_Q_COR_ERR,
- C_PCIC_RETRY_SOT_MEM_COR_ERR,
- C_PCIC_RETRY_MEM_COR_ERR,
- C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR,
- C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR,
- C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR,
- C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR,
- C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR,
- C_CCE_CSR_CFG_BUS_PARITY_ERR,
- C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR,
- C_CCE_RSPD_DATA_PARITY_ERR,
- C_CCE_TRGT_ACCESS_ERR,
- C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR,
- C_CCE_CSR_WRITE_BAD_ADDR_ERR,
- C_CCE_CSR_READ_BAD_ADDR_ERR,
- C_CCE_CSR_PARITY_ERR,
-/* RcvErrStatus */
- C_RX_CSR_PARITY_ERR,
- C_RX_CSR_WRITE_BAD_ADDR_ERR,
- C_RX_CSR_READ_BAD_ADDR_ERR,
- C_RX_DMA_CSR_UNC_ERR,
- C_RX_DMA_DQ_FSM_ENCODING_ERR,
- C_RX_DMA_EQ_FSM_ENCODING_ERR,
- C_RX_DMA_CSR_PARITY_ERR,
- C_RX_RBUF_DATA_COR_ERR,
- C_RX_RBUF_DATA_UNC_ERR,
- C_RX_DMA_DATA_FIFO_RD_COR_ERR,
- C_RX_DMA_DATA_FIFO_RD_UNC_ERR,
- C_RX_DMA_HDR_FIFO_RD_COR_ERR,
- C_RX_DMA_HDR_FIFO_RD_UNC_ERR,
- C_RX_RBUF_DESC_PART2_COR_ERR,
- C_RX_RBUF_DESC_PART2_UNC_ERR,
- C_RX_RBUF_DESC_PART1_COR_ERR,
- C_RX_RBUF_DESC_PART1_UNC_ERR,
- C_RX_HQ_INTR_FSM_ERR,
- C_RX_HQ_INTR_CSR_PARITY_ERR,
- C_RX_LOOKUP_CSR_PARITY_ERR,
- C_RX_LOOKUP_RCV_ARRAY_COR_ERR,
- C_RX_LOOKUP_RCV_ARRAY_UNC_ERR,
- C_RX_LOOKUP_DES_PART2_PARITY_ERR,
- C_RX_LOOKUP_DES_PART1_UNC_COR_ERR,
- C_RX_LOOKUP_DES_PART1_UNC_ERR,
- C_RX_RBUF_NEXT_FREE_BUF_COR_ERR,
- C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR,
- C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR,
- C_RX_RBUF_FL_INITDONE_PARITY_ERR,
- C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR,
- C_RX_RBUF_FL_RD_ADDR_PARITY_ERR,
- C_RX_RBUF_EMPTY_ERR,
- C_RX_RBUF_FULL_ERR,
- C_RX_RBUF_BAD_LOOKUP_ERR,
- C_RX_RBUF_CTX_ID_PARITY_ERR,
- C_RX_RBUF_CSR_QEOPDW_PARITY_ERR,
- C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR,
- C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR,
- C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR,
- C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR,
- C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR,
- C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR,
- C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR,
- C_RX_RBUF_BLOCK_LIST_READ_COR_ERR,
- C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR,
- C_RX_RBUF_LOOKUP_DES_COR_ERR,
- C_RX_RBUF_LOOKUP_DES_UNC_ERR,
- C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR,
- C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR,
- C_RX_RBUF_FREE_LIST_COR_ERR,
- C_RX_RBUF_FREE_LIST_UNC_ERR,
- C_RX_RCV_FSM_ENCODING_ERR,
- C_RX_DMA_FLAG_COR_ERR,
- C_RX_DMA_FLAG_UNC_ERR,
- C_RX_DC_SOP_EOP_PARITY_ERR,
- C_RX_RCV_CSR_PARITY_ERR,
- C_RX_RCV_QP_MAP_TABLE_COR_ERR,
- C_RX_RCV_QP_MAP_TABLE_UNC_ERR,
- C_RX_RCV_DATA_COR_ERR,
- C_RX_RCV_DATA_UNC_ERR,
- C_RX_RCV_HDR_COR_ERR,
- C_RX_RCV_HDR_UNC_ERR,
- C_RX_DC_INTF_PARITY_ERR,
- C_RX_DMA_CSR_COR_ERR,
-/* SendPioErrStatus */
- C_PIO_PEC_SOP_HEAD_PARITY_ERR,
- C_PIO_PCC_SOP_HEAD_PARITY_ERR,
- C_PIO_LAST_RETURNED_CNT_PARITY_ERR,
- C_PIO_CURRENT_FREE_CNT_PARITY_ERR,
- C_PIO_RSVD_31_ERR,
- C_PIO_RSVD_30_ERR,
- C_PIO_PPMC_SOP_LEN_ERR,
- C_PIO_PPMC_BQC_MEM_PARITY_ERR,
- C_PIO_VL_FIFO_PARITY_ERR,
- C_PIO_VLF_SOP_PARITY_ERR,
- C_PIO_VLF_V1_LEN_PARITY_ERR,
- C_PIO_BLOCK_QW_COUNT_PARITY_ERR,
- C_PIO_WRITE_QW_VALID_PARITY_ERR,
- C_PIO_STATE_MACHINE_ERR,
- C_PIO_WRITE_DATA_PARITY_ERR,
- C_PIO_HOST_ADDR_MEM_COR_ERR,
- C_PIO_HOST_ADDR_MEM_UNC_ERR,
- C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR,
- C_PIO_INIT_SM_IN_ERR,
- C_PIO_PPMC_PBL_FIFO_ERR,
- C_PIO_CREDIT_RET_FIFO_PARITY_ERR,
- C_PIO_V1_LEN_MEM_BANK1_COR_ERR,
- C_PIO_V1_LEN_MEM_BANK0_COR_ERR,
- C_PIO_V1_LEN_MEM_BANK1_UNC_ERR,
- C_PIO_V1_LEN_MEM_BANK0_UNC_ERR,
- C_PIO_SM_PKT_RESET_PARITY_ERR,
- C_PIO_PKT_EVICT_FIFO_PARITY_ERR,
- C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR,
- C_PIO_SBRDCTL_CRREL_PARITY_ERR,
- C_PIO_PEC_FIFO_PARITY_ERR,
- C_PIO_PCC_FIFO_PARITY_ERR,
- C_PIO_SB_MEM_FIFO1_ERR,
- C_PIO_SB_MEM_FIFO0_ERR,
- C_PIO_CSR_PARITY_ERR,
- C_PIO_WRITE_ADDR_PARITY_ERR,
- C_PIO_WRITE_BAD_CTXT_ERR,
-/* SendDmaErrStatus */
- C_SDMA_PCIE_REQ_TRACKING_COR_ERR,
- C_SDMA_PCIE_REQ_TRACKING_UNC_ERR,
- C_SDMA_CSR_PARITY_ERR,
- C_SDMA_RPY_TAG_ERR,
-/* SendEgressErrStatus */
- C_TX_READ_PIO_MEMORY_CSR_UNC_ERR,
- C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR,
- C_TX_EGRESS_FIFO_COR_ERR,
- C_TX_READ_PIO_MEMORY_COR_ERR,
- C_TX_READ_SDMA_MEMORY_COR_ERR,
- C_TX_SB_HDR_COR_ERR,
- C_TX_CREDIT_OVERRUN_ERR,
- C_TX_LAUNCH_FIFO8_COR_ERR,
- C_TX_LAUNCH_FIFO7_COR_ERR,
- C_TX_LAUNCH_FIFO6_COR_ERR,
- C_TX_LAUNCH_FIFO5_COR_ERR,
- C_TX_LAUNCH_FIFO4_COR_ERR,
- C_TX_LAUNCH_FIFO3_COR_ERR,
- C_TX_LAUNCH_FIFO2_COR_ERR,
- C_TX_LAUNCH_FIFO1_COR_ERR,
- C_TX_LAUNCH_FIFO0_COR_ERR,
- C_TX_CREDIT_RETURN_VL_ERR,
- C_TX_HCRC_INSERTION_ERR,
- C_TX_EGRESS_FIFI_UNC_ERR,
- C_TX_READ_PIO_MEMORY_UNC_ERR,
- C_TX_READ_SDMA_MEMORY_UNC_ERR,
- C_TX_SB_HDR_UNC_ERR,
- C_TX_CREDIT_RETURN_PARITY_ERR,
- C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR,
- C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR,
- C_TX_SDMA15_DISALLOWED_PACKET_ERR,
- C_TX_SDMA14_DISALLOWED_PACKET_ERR,
- C_TX_SDMA13_DISALLOWED_PACKET_ERR,
- C_TX_SDMA12_DISALLOWED_PACKET_ERR,
- C_TX_SDMA11_DISALLOWED_PACKET_ERR,
- C_TX_SDMA10_DISALLOWED_PACKET_ERR,
- C_TX_SDMA9_DISALLOWED_PACKET_ERR,
- C_TX_SDMA8_DISALLOWED_PACKET_ERR,
- C_TX_SDMA7_DISALLOWED_PACKET_ERR,
- C_TX_SDMA6_DISALLOWED_PACKET_ERR,
- C_TX_SDMA5_DISALLOWED_PACKET_ERR,
- C_TX_SDMA4_DISALLOWED_PACKET_ERR,
- C_TX_SDMA3_DISALLOWED_PACKET_ERR,
- C_TX_SDMA2_DISALLOWED_PACKET_ERR,
- C_TX_SDMA1_DISALLOWED_PACKET_ERR,
- C_TX_SDMA0_DISALLOWED_PACKET_ERR,
- C_TX_CONFIG_PARITY_ERR,
- C_TX_SBRD_CTL_CSR_PARITY_ERR,
- C_TX_LAUNCH_CSR_PARITY_ERR,
- C_TX_ILLEGAL_CL_ERR,
- C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR,
- C_TX_RESERVED_10,
- C_TX_RESERVED_9,
- C_TX_SDMA_LAUNCH_INTF_PARITY_ERR,
- C_TX_PIO_LAUNCH_INTF_PARITY_ERR,
- C_TX_RESERVED_6,
- C_TX_INCORRECT_LINK_STATE_ERR,
- C_TX_LINK_DOWN_ERR,
- C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR,
- C_TX_RESERVED_2,
- C_TX_PKT_INTEGRITY_MEM_UNC_ERR,
- C_TX_PKT_INTEGRITY_MEM_COR_ERR,
-/* SendErrStatus */
- C_SEND_CSR_WRITE_BAD_ADDR_ERR,
- C_SEND_CSR_READ_BAD_ADD_ERR,
- C_SEND_CSR_PARITY_ERR,
-/* SendCtxtErrStatus */
- C_PIO_WRITE_OUT_OF_BOUNDS_ERR,
- C_PIO_WRITE_OVERFLOW_ERR,
- C_PIO_WRITE_CROSSES_BOUNDARY_ERR,
- C_PIO_DISALLOWED_PACKET_ERR,
- C_PIO_INCONSISTENT_SOP_ERR,
-/*SendDmaEngErrStatus */
- C_SDMA_HEADER_REQUEST_FIFO_COR_ERR,
- C_SDMA_HEADER_STORAGE_COR_ERR,
- C_SDMA_PACKET_TRACKING_COR_ERR,
- C_SDMA_ASSEMBLY_COR_ERR,
- C_SDMA_DESC_TABLE_COR_ERR,
- C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR,
- C_SDMA_HEADER_STORAGE_UNC_ERR,
- C_SDMA_PACKET_TRACKING_UNC_ERR,
- C_SDMA_ASSEMBLY_UNC_ERR,
- C_SDMA_DESC_TABLE_UNC_ERR,
- C_SDMA_TIMEOUT_ERR,
- C_SDMA_HEADER_LENGTH_ERR,
- C_SDMA_HEADER_ADDRESS_ERR,
- C_SDMA_HEADER_SELECT_ERR,
- C_SMDA_RESERVED_9,
- C_SDMA_PACKET_DESC_OVERFLOW_ERR,
- C_SDMA_LENGTH_MISMATCH_ERR,
- C_SDMA_HALT_ERR,
- C_SDMA_MEM_READ_ERR,
- C_SDMA_FIRST_DESC_ERR,
- C_SDMA_TAIL_OUT_OF_BOUNDS_ERR,
- C_SDMA_TOO_LONG_ERR,
- C_SDMA_GEN_MISMATCH_ERR,
- C_SDMA_WRONG_DW_ERR,
- DEV_CNTR_LAST /* Must be kept last */
-};
-
-/* Per port counter indexes */
-enum {
- C_TX_UNSUP_VL = 0,
- C_TX_INVAL_LEN,
- C_TX_MM_LEN_ERR,
- C_TX_UNDERRUN,
- C_TX_FLOW_STALL,
- C_TX_DROPPED,
- C_TX_HDR_ERR,
- C_TX_PKT,
- C_TX_WORDS,
- C_TX_WAIT,
- C_TX_FLIT_VL,
- C_TX_PKT_VL,
- C_TX_WAIT_VL,
- C_RX_PKT,
- C_RX_WORDS,
- C_SW_LINK_DOWN,
- C_SW_LINK_UP,
- C_SW_UNKNOWN_FRAME,
- C_SW_XMIT_DSCD,
- C_SW_XMIT_DSCD_VL,
- C_SW_XMIT_CSTR_ERR,
- C_SW_RCV_CSTR_ERR,
- C_SW_IBP_LOOP_PKTS,
- C_SW_IBP_RC_RESENDS,
- C_SW_IBP_RNR_NAKS,
- C_SW_IBP_OTHER_NAKS,
- C_SW_IBP_RC_TIMEOUTS,
- C_SW_IBP_PKT_DROPS,
- C_SW_IBP_DMA_WAIT,
- C_SW_IBP_RC_SEQNAK,
- C_SW_IBP_RC_DUPREQ,
- C_SW_IBP_RDMA_SEQ,
- C_SW_IBP_UNALIGNED,
- C_SW_IBP_SEQ_NAK,
- C_SW_CPU_RC_ACKS,
- C_SW_CPU_RC_QACKS,
- C_SW_CPU_RC_DELAYED_COMP,
- C_RCV_HDR_OVF_0,
- C_RCV_HDR_OVF_1,
- C_RCV_HDR_OVF_2,
- C_RCV_HDR_OVF_3,
- C_RCV_HDR_OVF_4,
- C_RCV_HDR_OVF_5,
- C_RCV_HDR_OVF_6,
- C_RCV_HDR_OVF_7,
- C_RCV_HDR_OVF_8,
- C_RCV_HDR_OVF_9,
- C_RCV_HDR_OVF_10,
- C_RCV_HDR_OVF_11,
- C_RCV_HDR_OVF_12,
- C_RCV_HDR_OVF_13,
- C_RCV_HDR_OVF_14,
- C_RCV_HDR_OVF_15,
- C_RCV_HDR_OVF_16,
- C_RCV_HDR_OVF_17,
- C_RCV_HDR_OVF_18,
- C_RCV_HDR_OVF_19,
- C_RCV_HDR_OVF_20,
- C_RCV_HDR_OVF_21,
- C_RCV_HDR_OVF_22,
- C_RCV_HDR_OVF_23,
- C_RCV_HDR_OVF_24,
- C_RCV_HDR_OVF_25,
- C_RCV_HDR_OVF_26,
- C_RCV_HDR_OVF_27,
- C_RCV_HDR_OVF_28,
- C_RCV_HDR_OVF_29,
- C_RCV_HDR_OVF_30,
- C_RCV_HDR_OVF_31,
- C_RCV_HDR_OVF_32,
- C_RCV_HDR_OVF_33,
- C_RCV_HDR_OVF_34,
- C_RCV_HDR_OVF_35,
- C_RCV_HDR_OVF_36,
- C_RCV_HDR_OVF_37,
- C_RCV_HDR_OVF_38,
- C_RCV_HDR_OVF_39,
- C_RCV_HDR_OVF_40,
- C_RCV_HDR_OVF_41,
- C_RCV_HDR_OVF_42,
- C_RCV_HDR_OVF_43,
- C_RCV_HDR_OVF_44,
- C_RCV_HDR_OVF_45,
- C_RCV_HDR_OVF_46,
- C_RCV_HDR_OVF_47,
- C_RCV_HDR_OVF_48,
- C_RCV_HDR_OVF_49,
- C_RCV_HDR_OVF_50,
- C_RCV_HDR_OVF_51,
- C_RCV_HDR_OVF_52,
- C_RCV_HDR_OVF_53,
- C_RCV_HDR_OVF_54,
- C_RCV_HDR_OVF_55,
- C_RCV_HDR_OVF_56,
- C_RCV_HDR_OVF_57,
- C_RCV_HDR_OVF_58,
- C_RCV_HDR_OVF_59,
- C_RCV_HDR_OVF_60,
- C_RCV_HDR_OVF_61,
- C_RCV_HDR_OVF_62,
- C_RCV_HDR_OVF_63,
- C_RCV_HDR_OVF_64,
- C_RCV_HDR_OVF_65,
- C_RCV_HDR_OVF_66,
- C_RCV_HDR_OVF_67,
- C_RCV_HDR_OVF_68,
- C_RCV_HDR_OVF_69,
- C_RCV_HDR_OVF_70,
- C_RCV_HDR_OVF_71,
- C_RCV_HDR_OVF_72,
- C_RCV_HDR_OVF_73,
- C_RCV_HDR_OVF_74,
- C_RCV_HDR_OVF_75,
- C_RCV_HDR_OVF_76,
- C_RCV_HDR_OVF_77,
- C_RCV_HDR_OVF_78,
- C_RCV_HDR_OVF_79,
- C_RCV_HDR_OVF_80,
- C_RCV_HDR_OVF_81,
- C_RCV_HDR_OVF_82,
- C_RCV_HDR_OVF_83,
- C_RCV_HDR_OVF_84,
- C_RCV_HDR_OVF_85,
- C_RCV_HDR_OVF_86,
- C_RCV_HDR_OVF_87,
- C_RCV_HDR_OVF_88,
- C_RCV_HDR_OVF_89,
- C_RCV_HDR_OVF_90,
- C_RCV_HDR_OVF_91,
- C_RCV_HDR_OVF_92,
- C_RCV_HDR_OVF_93,
- C_RCV_HDR_OVF_94,
- C_RCV_HDR_OVF_95,
- C_RCV_HDR_OVF_96,
- C_RCV_HDR_OVF_97,
- C_RCV_HDR_OVF_98,
- C_RCV_HDR_OVF_99,
- C_RCV_HDR_OVF_100,
- C_RCV_HDR_OVF_101,
- C_RCV_HDR_OVF_102,
- C_RCV_HDR_OVF_103,
- C_RCV_HDR_OVF_104,
- C_RCV_HDR_OVF_105,
- C_RCV_HDR_OVF_106,
- C_RCV_HDR_OVF_107,
- C_RCV_HDR_OVF_108,
- C_RCV_HDR_OVF_109,
- C_RCV_HDR_OVF_110,
- C_RCV_HDR_OVF_111,
- C_RCV_HDR_OVF_112,
- C_RCV_HDR_OVF_113,
- C_RCV_HDR_OVF_114,
- C_RCV_HDR_OVF_115,
- C_RCV_HDR_OVF_116,
- C_RCV_HDR_OVF_117,
- C_RCV_HDR_OVF_118,
- C_RCV_HDR_OVF_119,
- C_RCV_HDR_OVF_120,
- C_RCV_HDR_OVF_121,
- C_RCV_HDR_OVF_122,
- C_RCV_HDR_OVF_123,
- C_RCV_HDR_OVF_124,
- C_RCV_HDR_OVF_125,
- C_RCV_HDR_OVF_126,
- C_RCV_HDR_OVF_127,
- C_RCV_HDR_OVF_128,
- C_RCV_HDR_OVF_129,
- C_RCV_HDR_OVF_130,
- C_RCV_HDR_OVF_131,
- C_RCV_HDR_OVF_132,
- C_RCV_HDR_OVF_133,
- C_RCV_HDR_OVF_134,
- C_RCV_HDR_OVF_135,
- C_RCV_HDR_OVF_136,
- C_RCV_HDR_OVF_137,
- C_RCV_HDR_OVF_138,
- C_RCV_HDR_OVF_139,
- C_RCV_HDR_OVF_140,
- C_RCV_HDR_OVF_141,
- C_RCV_HDR_OVF_142,
- C_RCV_HDR_OVF_143,
- C_RCV_HDR_OVF_144,
- C_RCV_HDR_OVF_145,
- C_RCV_HDR_OVF_146,
- C_RCV_HDR_OVF_147,
- C_RCV_HDR_OVF_148,
- C_RCV_HDR_OVF_149,
- C_RCV_HDR_OVF_150,
- C_RCV_HDR_OVF_151,
- C_RCV_HDR_OVF_152,
- C_RCV_HDR_OVF_153,
- C_RCV_HDR_OVF_154,
- C_RCV_HDR_OVF_155,
- C_RCV_HDR_OVF_156,
- C_RCV_HDR_OVF_157,
- C_RCV_HDR_OVF_158,
- C_RCV_HDR_OVF_159,
- PORT_CNTR_LAST /* Must be kept last */
-};
-
-u64 get_all_cpu_total(u64 __percpu *cntr);
-void hfi1_start_cleanup(struct hfi1_devdata *dd);
-void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
-struct hfi1_message_header *hfi1_get_msgheader(
- struct hfi1_devdata *dd, __le32 *rhf_addr);
-int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
- struct hfi1_ctxt_info *kinfo);
-u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
- u32 mask);
-int hfi1_init_ctxt(struct send_context *sc);
-void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
- u32 type, unsigned long pa, u16 order);
-void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
-void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt);
-u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp);
-u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp);
-u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd);
-int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which);
-int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
-int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey);
-int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt);
-int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey);
-int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt);
-void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
-
-/*
- * Interrupt source table.
- *
- * Each entry is an interrupt source "type". It is ordered by increasing
- * number.
- */
-struct is_table {
- int start; /* interrupt source type start */
- int end; /* interrupt source type end */
- /* routine that returns the name of the interrupt source */
- char *(*is_name)(char *name, size_t size, unsigned int source);
- /* routine to call when receiving an interrupt */
- void (*is_int)(struct hfi1_devdata *dd, unsigned int source);
-};
-
-#endif /* _CHIP_H */
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h
deleted file mode 100644
index 770f05c9b..000000000
--- a/drivers/staging/rdma/hfi1/chip_registers.h
+++ /dev/null
@@ -1,1306 +0,0 @@
-#ifndef DEF_CHIP_REG
-#define DEF_CHIP_REG
-
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#define CORE 0x000000000000
-#define CCE (CORE + 0x000000000000)
-#define ASIC (CORE + 0x000000400000)
-#define MISC (CORE + 0x000000500000)
-#define DC_TOP_CSRS (CORE + 0x000000600000)
-#define CHIP_DEBUG (CORE + 0x000000700000)
-#define RXE (CORE + 0x000001000000)
-#define TXE (CORE + 0x000001800000)
-#define DCC_CSRS (DC_TOP_CSRS + 0x000000000000)
-#define DC_LCB_CSRS (DC_TOP_CSRS + 0x000000001000)
-#define DC_8051_CSRS (DC_TOP_CSRS + 0x000000002000)
-#define PCIE 0
-
-#define ASIC_NUM_SCRATCH 4
-#define CCE_ERR_INT_CNT 0
-#define CCE_MISC_INT_CNT 2
-#define CCE_NUM_32_BIT_COUNTERS 3
-#define CCE_NUM_32_BIT_INT_COUNTERS 6
-#define CCE_NUM_INT_CSRS 12
-#define CCE_NUM_INT_MAP_CSRS 96
-#define CCE_NUM_MSIX_PBAS 4
-#define CCE_NUM_MSIX_VECTORS 256
-#define CCE_NUM_SCRATCH 4
-#define CCE_PCIE_POSTED_CRDT_STALL_CNT 2
-#define CCE_PCIE_TRGT_STALL_CNT 0
-#define CCE_PIO_WR_STALL_CNT 1
-#define CCE_RCV_AVAIL_INT_CNT 3
-#define CCE_RCV_URGENT_INT_CNT 4
-#define CCE_SDMA_INT_CNT 1
-#define CCE_SEND_CREDIT_INT_CNT 5
-#define DCC_CFG_LED_CNTRL (DCC_CSRS + 0x000000000040)
-#define DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK 0x10ull
-#define DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SHIFT 0
-#define DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK 0xFull
-#define DCC_CFG_PORT_CONFIG (DCC_CSRS + 0x000000000008)
-#define DCC_CFG_PORT_CONFIG1 (DCC_CSRS + 0x000000000010)
-#define DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK 0xFFFFull
-#define DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT 16
-#define DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK 0xFFFF0000ull
-#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK 0xFFFFull
-#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT 0
-#define DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 0xFFFFull
-#define DCC_CFG_PORT_CONFIG_LINK_STATE_MASK 0x7ull
-#define DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT 48
-#define DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK 0x7000000000000ull
-#define DCC_CFG_PORT_CONFIG_MTU_CAP_MASK 0x7ull
-#define DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT 32
-#define DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK 0x700000000ull
-#define DCC_CFG_RESET (DCC_CSRS + 0x000000000000)
-#define DCC_CFG_RESET_RESET_LCB_SHIFT 0
-#define DCC_CFG_RESET_RESET_RX_FPE_SHIFT 2
-#define DCC_CFG_SC_VL_TABLE_15_0 (DCC_CSRS + 0x000000000028)
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY0_SHIFT 0
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY10_SHIFT 40
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY11_SHIFT 44
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY12_SHIFT 48
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY13_SHIFT 52
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY14_SHIFT 56
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY15_SHIFT 60
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY1_SHIFT 4
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY2_SHIFT 8
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY3_SHIFT 12
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY4_SHIFT 16
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY5_SHIFT 20
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY6_SHIFT 24
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY7_SHIFT 28
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY8_SHIFT 32
-#define DCC_CFG_SC_VL_TABLE_15_0_ENTRY9_SHIFT 36
-#define DCC_CFG_SC_VL_TABLE_31_16 (DCC_CSRS + 0x000000000030)
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY16_SHIFT 0
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY17_SHIFT 4
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY18_SHIFT 8
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY19_SHIFT 12
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY20_SHIFT 16
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY21_SHIFT 20
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY22_SHIFT 24
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY23_SHIFT 28
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY24_SHIFT 32
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY25_SHIFT 36
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY26_SHIFT 40
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY27_SHIFT 44
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY28_SHIFT 48
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY29_SHIFT 52
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY30_SHIFT 56
-#define DCC_CFG_SC_VL_TABLE_31_16_ENTRY31_SHIFT 60
-#define DCC_ERR_DROPPED_PKT_CNT (DCC_CSRS + 0x000000000120)
-#define DCC_ERR_FLG (DCC_CSRS + 0x000000000050)
-#define DCC_ERR_FLG_BAD_CRDT_ACK_ERR_SMASK 0x4000ull
-#define DCC_ERR_FLG_BAD_CTRL_DIST_ERR_SMASK 0x200000ull
-#define DCC_ERR_FLG_BAD_CTRL_FLIT_ERR_SMASK 0x10000ull
-#define DCC_ERR_FLG_BAD_DLID_TARGET_ERR_SMASK 0x200ull
-#define DCC_ERR_FLG_BAD_HEAD_DIST_ERR_SMASK 0x800000ull
-#define DCC_ERR_FLG_BAD_L2_ERR_SMASK 0x2ull
-#define DCC_ERR_FLG_BAD_LVER_ERR_SMASK 0x400ull
-#define DCC_ERR_FLG_BAD_MID_TAIL_ERR_SMASK 0x8ull
-#define DCC_ERR_FLG_BAD_PKT_LENGTH_ERR_SMASK 0x4000000ull
-#define DCC_ERR_FLG_BAD_PREEMPTION_ERR_SMASK 0x10ull
-#define DCC_ERR_FLG_BAD_SC_ERR_SMASK 0x4ull
-#define DCC_ERR_FLG_BAD_TAIL_DIST_ERR_SMASK 0x400000ull
-#define DCC_ERR_FLG_BAD_VL_MARKER_ERR_SMASK 0x80ull
-#define DCC_ERR_FLG_CLR (DCC_CSRS + 0x000000000060)
-#define DCC_ERR_FLG_CSR_ACCESS_BLOCKED_HOST_SMASK 0x8000000000ull
-#define DCC_ERR_FLG_CSR_ACCESS_BLOCKED_UC_SMASK 0x10000000000ull
-#define DCC_ERR_FLG_CSR_INVAL_ADDR_SMASK 0x400000000000ull
-#define DCC_ERR_FLG_CSR_PARITY_ERR_SMASK 0x200000000000ull
-#define DCC_ERR_FLG_DLID_ZERO_ERR_SMASK 0x40000000ull
-#define DCC_ERR_FLG_EN (DCC_CSRS + 0x000000000058)
-#define DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK 0x8000000000ull
-#define DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK 0x10000000000ull
-#define DCC_ERR_FLG_EVENT_CNTR_PARITY_ERR_SMASK 0x20000ull
-#define DCC_ERR_FLG_EVENT_CNTR_ROLLOVER_ERR_SMASK 0x40000ull
-#define DCC_ERR_FLG_FMCONFIG_ERR_SMASK 0x40000000000000ull
-#define DCC_ERR_FLG_FPE_TX_FIFO_OVFLW_ERR_SMASK 0x2000000000ull
-#define DCC_ERR_FLG_FPE_TX_FIFO_UNFLW_ERR_SMASK 0x4000000000ull
-#define DCC_ERR_FLG_LATE_EBP_ERR_SMASK 0x1000000000ull
-#define DCC_ERR_FLG_LATE_LONG_ERR_SMASK 0x800000000ull
-#define DCC_ERR_FLG_LATE_SHORT_ERR_SMASK 0x400000000ull
-#define DCC_ERR_FLG_LENGTH_MTU_ERR_SMASK 0x80000000ull
-#define DCC_ERR_FLG_LINK_ERR_SMASK 0x80000ull
-#define DCC_ERR_FLG_MISC_CNTR_ROLLOVER_ERR_SMASK 0x100000ull
-#define DCC_ERR_FLG_NONVL15_STATE_ERR_SMASK 0x1000000ull
-#define DCC_ERR_FLG_PERM_NVL15_ERR_SMASK 0x10000000ull
-#define DCC_ERR_FLG_PREEMPTION_ERR_SMASK 0x20ull
-#define DCC_ERR_FLG_PREEMPTIONVL15_ERR_SMASK 0x40ull
-#define DCC_ERR_FLG_RCVPORT_ERR_SMASK 0x80000000000000ull
-#define DCC_ERR_FLG_RX_BYTE_SHFT_PARITY_ERR_SMASK 0x1000000000000ull
-#define DCC_ERR_FLG_RX_CTRL_PARITY_MBE_ERR_SMASK 0x100000000000ull
-#define DCC_ERR_FLG_RX_EARLY_DROP_ERR_SMASK 0x200000000ull
-#define DCC_ERR_FLG_SLID_ZERO_ERR_SMASK 0x20000000ull
-#define DCC_ERR_FLG_TX_BYTE_SHFT_PARITY_ERR_SMASK 0x800000000000ull
-#define DCC_ERR_FLG_TX_CTRL_PARITY_ERR_SMASK 0x20000000000ull
-#define DCC_ERR_FLG_TX_CTRL_PARITY_MBE_ERR_SMASK 0x40000000000ull
-#define DCC_ERR_FLG_TX_SC_PARITY_ERR_SMASK 0x80000000000ull
-#define DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK 0x2000ull
-#define DCC_ERR_FLG_UNSUP_PKT_TYPE_SMASK 0x8000ull
-#define DCC_ERR_FLG_UNSUP_VL_ERR_SMASK 0x8000000ull
-#define DCC_ERR_FLG_VL15_MULTI_ERR_SMASK 0x2000000ull
-#define DCC_ERR_FMCONFIG_ERR_CNT (DCC_CSRS + 0x000000000110)
-#define DCC_ERR_INFO_FMCONFIG (DCC_CSRS + 0x000000000090)
-#define DCC_ERR_INFO_PORTRCV (DCC_CSRS + 0x000000000078)
-#define DCC_ERR_INFO_PORTRCV_HDR0 (DCC_CSRS + 0x000000000080)
-#define DCC_ERR_INFO_PORTRCV_HDR1 (DCC_CSRS + 0x000000000088)
-#define DCC_ERR_INFO_UNCORRECTABLE (DCC_CSRS + 0x000000000098)
-#define DCC_ERR_PORTRCV_ERR_CNT (DCC_CSRS + 0x000000000108)
-#define DCC_ERR_RCVREMOTE_PHY_ERR_CNT (DCC_CSRS + 0x000000000118)
-#define DCC_ERR_UNCORRECTABLE_CNT (DCC_CSRS + 0x000000000100)
-#define DCC_PRF_PORT_MARK_FECN_CNT (DCC_CSRS + 0x000000000330)
-#define DCC_PRF_PORT_RCV_BECN_CNT (DCC_CSRS + 0x000000000290)
-#define DCC_PRF_PORT_RCV_BUBBLE_CNT (DCC_CSRS + 0x0000000002E0)
-#define DCC_PRF_PORT_RCV_CORRECTABLE_CNT (DCC_CSRS + 0x000000000140)
-#define DCC_PRF_PORT_RCV_DATA_CNT (DCC_CSRS + 0x000000000198)
-#define DCC_PRF_PORT_RCV_FECN_CNT (DCC_CSRS + 0x000000000240)
-#define DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT (DCC_CSRS + 0x000000000130)
-#define DCC_PRF_PORT_RCV_PKTS_CNT (DCC_CSRS + 0x0000000001A8)
-#define DCC_PRF_PORT_VL_MARK_FECN_CNT (DCC_CSRS + 0x000000000338)
-#define DCC_PRF_PORT_VL_RCV_BECN_CNT (DCC_CSRS + 0x000000000298)
-#define DCC_PRF_PORT_VL_RCV_BUBBLE_CNT (DCC_CSRS + 0x0000000002E8)
-#define DCC_PRF_PORT_VL_RCV_DATA_CNT (DCC_CSRS + 0x0000000001B0)
-#define DCC_PRF_PORT_VL_RCV_FECN_CNT (DCC_CSRS + 0x000000000248)
-#define DCC_PRF_PORT_VL_RCV_PKTS_CNT (DCC_CSRS + 0x0000000001F8)
-#define DCC_PRF_PORT_XMIT_CORRECTABLE_CNT (DCC_CSRS + 0x000000000138)
-#define DCC_PRF_PORT_XMIT_DATA_CNT (DCC_CSRS + 0x000000000190)
-#define DCC_PRF_PORT_XMIT_MULTICAST_CNT (DCC_CSRS + 0x000000000128)
-#define DCC_PRF_PORT_XMIT_PKTS_CNT (DCC_CSRS + 0x0000000001A0)
-#define DCC_PRF_RX_FLOW_CRTL_CNT (DCC_CSRS + 0x000000000180)
-#define DCC_PRF_TX_FLOW_CRTL_CNT (DCC_CSRS + 0x000000000188)
-#define DC_DC8051_CFG_CSR_ACCESS_SEL (DC_8051_CSRS + 0x000000000110)
-#define DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK 0x2ull
-#define DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK 0x1ull
-#define DC_DC8051_CFG_EXT_DEV_0 (DC_8051_CSRS + 0x000000000118)
-#define DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK 0x1ull
-#define DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT 8
-#define DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT 16
-#define DC_DC8051_CFG_EXT_DEV_1 (DC_8051_CSRS + 0x000000000120)
-#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK 0xFFFFull
-#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT 16
-#define DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK 0xFFFF0000ull
-#define DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK 0x1ull
-#define DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK 0xFFull
-#define DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT 8
-#define DC_DC8051_CFG_HOST_CMD_0 (DC_8051_CSRS + 0x000000000028)
-#define DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK 0xFFFFFFFFFFFFull
-#define DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT 16
-#define DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK 0x1ull
-#define DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK 0xFFull
-#define DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT 8
-#define DC_DC8051_CFG_HOST_CMD_1 (DC_8051_CSRS + 0x000000000030)
-#define DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK 0x1ull
-#define DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK 0xFFull
-#define DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT 8
-#define DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK 0xFFFFFFFFFFFFull
-#define DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT 16
-#define DC_DC8051_CFG_LOCAL_GUID (DC_8051_CSRS + 0x000000000038)
-#define DC_DC8051_CFG_MODE (DC_8051_CSRS + 0x000000000070)
-#define DC_DC8051_CFG_RAM_ACCESS_CTRL (DC_8051_CSRS + 0x000000000008)
-#define DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK 0x7FFFull
-#define DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT 0
-#define DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK 0x1000000ull
-#define DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK 0x10000ull
-#define DC_DC8051_CFG_RAM_ACCESS_SETUP (DC_8051_CSRS + 0x000000000000)
-#define DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK 0x100ull
-#define DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK 0x1ull
-#define DC_DC8051_CFG_RAM_ACCESS_STATUS (DC_8051_CSRS + 0x000000000018)
-#define DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK 0x10000ull
-#define DC_DC8051_CFG_RAM_ACCESS_WR_DATA (DC_8051_CSRS + 0x000000000010)
-#define DC_DC8051_CFG_RAM_ACCESS_RD_DATA (DC_8051_CSRS + 0x000000000020)
-#define DC_DC8051_CFG_RST (DC_8051_CSRS + 0x000000000068)
-#define DC_DC8051_CFG_RST_CRAM_SMASK 0x2ull
-#define DC_DC8051_CFG_RST_DRAM_SMASK 0x4ull
-#define DC_DC8051_CFG_RST_IRAM_SMASK 0x8ull
-#define DC_DC8051_CFG_RST_M8051W_SMASK 0x1ull
-#define DC_DC8051_CFG_RST_SFR_SMASK 0x10ull
-#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051 (DC_8051_CSRS + 0x0000000000D8)
-#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK 0xFFFFFFFFull
-#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT 16
-#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK 0xFFFFull
-#define DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT 0
-#define DC_DC8051_ERR_CLR (DC_8051_CSRS + 0x0000000000E8)
-#define DC_DC8051_ERR_EN (DC_8051_CSRS + 0x0000000000F0)
-#define DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK 0x2ull
-#define DC_DC8051_ERR_FLG (DC_8051_CSRS + 0x0000000000E0)
-#define DC_DC8051_ERR_FLG_CRAM_MBE_SMASK 0x4ull
-#define DC_DC8051_ERR_FLG_CRAM_SBE_SMASK 0x8ull
-#define DC_DC8051_ERR_FLG_DRAM_MBE_SMASK 0x10ull
-#define DC_DC8051_ERR_FLG_DRAM_SBE_SMASK 0x20ull
-#define DC_DC8051_ERR_FLG_INVALID_CSR_ADDR_SMASK 0x400ull
-#define DC_DC8051_ERR_FLG_IRAM_MBE_SMASK 0x40ull
-#define DC_DC8051_ERR_FLG_IRAM_SBE_SMASK 0x80ull
-#define DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK 0x2ull
-#define DC_DC8051_ERR_FLG_SET_BY_8051_SMASK 0x1ull
-#define DC_DC8051_ERR_FLG_UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES_SMASK 0x100ull
-#define DC_DC8051_STS_CUR_STATE (DC_8051_CSRS + 0x000000000060)
-#define DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK 0xFFull
-#define DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT 16
-#define DC_DC8051_STS_CUR_STATE_PORT_MASK 0xFFull
-#define DC_DC8051_STS_CUR_STATE_PORT_SHIFT 0
-#define DC_DC8051_STS_LOCAL_FM_SECURITY (DC_8051_CSRS + 0x000000000050)
-#define DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK 0x1ull
-#define DC_DC8051_STS_REMOTE_FM_SECURITY (DC_8051_CSRS + 0x000000000058)
-#define DC_DC8051_STS_REMOTE_GUID (DC_8051_CSRS + 0x000000000040)
-#define DC_DC8051_STS_REMOTE_NODE_TYPE (DC_8051_CSRS + 0x000000000048)
-#define DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK 0x3ull
-#define DC_DC8051_STS_REMOTE_PORT_NO (DC_8051_CSRS + 0x000000000130)
-#define DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK 0xFFull
-#define DC_LCB_CFG_ALLOW_LINK_UP (DC_LCB_CSRS + 0x000000000128)
-#define DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT 0
-#define DC_LCB_CFG_CRC_MODE (DC_LCB_CSRS + 0x000000000058)
-#define DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT 0
-#define DC_LCB_CFG_IGNORE_LOST_RCLK (DC_LCB_CSRS + 0x000000000020)
-#define DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK 0x1ull
-#define DC_LCB_CFG_LANE_WIDTH (DC_LCB_CSRS + 0x000000000100)
-#define DC_LCB_CFG_LINK_KILL_EN (DC_LCB_CSRS + 0x000000000120)
-#define DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK 0x100000ull
-#define DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK 0x400000ull
-#define DC_LCB_CFG_LN_DCLK (DC_LCB_CSRS + 0x000000000060)
-#define DC_LCB_CFG_LOOPBACK (DC_LCB_CSRS + 0x0000000000F8)
-#define DC_LCB_CFG_LOOPBACK_VAL_SHIFT 0
-#define DC_LCB_CFG_RUN (DC_LCB_CSRS + 0x000000000000)
-#define DC_LCB_CFG_RUN_EN_SHIFT 0
-#define DC_LCB_CFG_RX_FIFOS_RADR (DC_LCB_CSRS + 0x000000000018)
-#define DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT 8
-#define DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT 4
-#define DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT 0
-#define DC_LCB_CFG_TX_FIFOS_RADR (DC_LCB_CSRS + 0x000000000010)
-#define DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT 0
-#define DC_LCB_CFG_TX_FIFOS_RESET (DC_LCB_CSRS + 0x000000000008)
-#define DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT 0
-#define DC_LCB_CFG_REINIT_AS_SLAVE (DC_LCB_CSRS + 0x000000000030)
-#define DC_LCB_CFG_CNT_FOR_SKIP_STALL (DC_LCB_CSRS + 0x000000000040)
-#define DC_LCB_CFG_CLK_CNTR (DC_LCB_CSRS + 0x000000000110)
-#define DC_LCB_ERR_CLR (DC_LCB_CSRS + 0x000000000308)
-#define DC_LCB_ERR_EN (DC_LCB_CSRS + 0x000000000310)
-#define DC_LCB_ERR_FLG (DC_LCB_CSRS + 0x000000000300)
-#define DC_LCB_ERR_FLG_REDUNDANT_FLIT_PARITY_ERR_SMASK 0x20000000ull
-#define DC_LCB_ERR_FLG_NEG_EDGE_LINK_TRANSFER_ACTIVE_SMASK 0x10000000ull
-#define DC_LCB_ERR_FLG_HOLD_REINIT_SMASK 0x8000000ull
-#define DC_LCB_ERR_FLG_RST_FOR_INCOMPLT_RND_TRIP_SMASK 0x4000000ull
-#define DC_LCB_ERR_FLG_RST_FOR_LINK_TIMEOUT_SMASK 0x2000000ull
-#define DC_LCB_ERR_FLG_CREDIT_RETURN_FLIT_MBE_SMASK 0x1000000ull
-#define DC_LCB_ERR_FLG_REPLAY_BUF_SBE_SMASK 0x800000ull
-#define DC_LCB_ERR_FLG_REPLAY_BUF_MBE_SMASK 0x400000ull
-#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_SBE_SMASK 0x200000ull
-#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_MBE_SMASK 0x100000ull
-#define DC_LCB_ERR_FLG_VL_ACK_INPUT_WRONG_CRC_MODE_SMASK 0x80000ull
-#define DC_LCB_ERR_FLG_VL_ACK_INPUT_PARITY_ERR_SMASK 0x40000ull
-#define DC_LCB_ERR_FLG_VL_ACK_INPUT_BUF_OFLW_SMASK 0x20000ull
-#define DC_LCB_ERR_FLG_FLIT_INPUT_BUF_OFLW_SMASK 0x10000ull
-#define DC_LCB_ERR_FLG_ILLEGAL_FLIT_ENCODING_SMASK 0x8000ull
-#define DC_LCB_ERR_FLG_ILLEGAL_NULL_LTP_SMASK 0x4000ull
-#define DC_LCB_ERR_FLG_UNEXPECTED_ROUND_TRIP_MARKER_SMASK 0x2000ull
-#define DC_LCB_ERR_FLG_UNEXPECTED_REPLAY_MARKER_SMASK 0x1000ull
-#define DC_LCB_ERR_FLG_RCLK_STOPPED_SMASK 0x800ull
-#define DC_LCB_ERR_FLG_CRC_ERR_CNT_HIT_LIMIT_SMASK 0x400ull
-#define DC_LCB_ERR_FLG_REINIT_FOR_LN_DEGRADE_SMASK 0x200ull
-#define DC_LCB_ERR_FLG_REINIT_FROM_PEER_SMASK 0x100ull
-#define DC_LCB_ERR_FLG_SEQ_CRC_ERR_SMASK 0x80ull
-#define DC_LCB_ERR_FLG_RX_LESS_THAN_FOUR_LNS_SMASK 0x40ull
-#define DC_LCB_ERR_FLG_TX_LESS_THAN_FOUR_LNS_SMASK 0x20ull
-#define DC_LCB_ERR_FLG_LOST_REINIT_STALL_OR_TOS_SMASK 0x10ull
-#define DC_LCB_ERR_FLG_ALL_LNS_FAILED_REINIT_TEST_SMASK 0x8ull
-#define DC_LCB_ERR_FLG_RST_FOR_FAILED_DESKEW_SMASK 0x4ull
-#define DC_LCB_ERR_FLG_INVALID_CSR_ADDR_SMASK 0x2ull
-#define DC_LCB_ERR_FLG_CSR_PARITY_ERR_SMASK 0x1ull
-#define DC_LCB_ERR_INFO_CRC_ERR_LN0 (DC_LCB_CSRS + 0x000000000328)
-#define DC_LCB_ERR_INFO_CRC_ERR_LN1 (DC_LCB_CSRS + 0x000000000330)
-#define DC_LCB_ERR_INFO_CRC_ERR_LN2 (DC_LCB_CSRS + 0x000000000338)
-#define DC_LCB_ERR_INFO_CRC_ERR_LN3 (DC_LCB_CSRS + 0x000000000340)
-#define DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN (DC_LCB_CSRS + 0x000000000348)
-#define DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT (DC_LCB_CSRS + 0x000000000368)
-#define DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT (DC_LCB_CSRS + 0x000000000370)
-#define DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT (DC_LCB_CSRS + 0x000000000378)
-#define DC_LCB_ERR_INFO_MISC_FLG_CNT (DC_LCB_CSRS + 0x000000000390)
-#define DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT (DC_LCB_CSRS + 0x000000000380)
-#define DC_LCB_ERR_INFO_RX_REPLAY_CNT (DC_LCB_CSRS + 0x000000000358)
-#define DC_LCB_ERR_INFO_SBE_CNT (DC_LCB_CSRS + 0x000000000388)
-#define DC_LCB_ERR_INFO_SEQ_CRC_CNT (DC_LCB_CSRS + 0x000000000360)
-#define DC_LCB_ERR_INFO_TOTAL_CRC_ERR (DC_LCB_CSRS + 0x000000000320)
-#define DC_LCB_ERR_INFO_TX_REPLAY_CNT (DC_LCB_CSRS + 0x000000000350)
-#define DC_LCB_PG_DBG_FLIT_CRDTS_CNT (DC_LCB_CSRS + 0x000000000580)
-#define DC_LCB_PG_STS_PAUSE_COMPLETE_CNT (DC_LCB_CSRS + 0x0000000005F8)
-#define DC_LCB_PG_STS_TX_MBE_CNT (DC_LCB_CSRS + 0x000000000608)
-#define DC_LCB_PG_STS_TX_SBE_CNT (DC_LCB_CSRS + 0x000000000600)
-#define DC_LCB_PRF_ACCEPTED_LTP_CNT (DC_LCB_CSRS + 0x000000000408)
-#define DC_LCB_PRF_CLK_CNTR (DC_LCB_CSRS + 0x000000000420)
-#define DC_LCB_PRF_GOOD_LTP_CNT (DC_LCB_CSRS + 0x000000000400)
-#define DC_LCB_PRF_RX_FLIT_CNT (DC_LCB_CSRS + 0x000000000410)
-#define DC_LCB_PRF_TX_FLIT_CNT (DC_LCB_CSRS + 0x000000000418)
-#define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
-#define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
-#define RCV_BUF_OVFL_CNT 10
-#define RCV_CONTEXT_EGR_STALL 22
-#define RCV_DATA_PKT_CNT 0
-#define RCV_DWORD_CNT 1
-#define RCV_TID_FLOW_GEN_MISMATCH_CNT 20
-#define RCV_TID_FLOW_SEQ_MISMATCH_CNT 23
-#define RCV_TID_FULL_ERR_CNT 18
-#define RCV_TID_VALID_ERR_CNT 19
-#define RXE_NUM_32_BIT_COUNTERS 24
-#define RXE_NUM_64_BIT_COUNTERS 2
-#define RXE_NUM_RSM_INSTANCES 4
-#define RXE_NUM_TID_FLOWS 32
-#define RXE_PER_CONTEXT_OFFSET 0x0300000
-#define SEND_DATA_PKT_CNT 0
-#define SEND_DATA_PKT_VL0_CNT 12
-#define SEND_DATA_VL0_CNT 3
-#define SEND_DROPPED_PKT_CNT 5
-#define SEND_DWORD_CNT 1
-#define SEND_FLOW_STALL_CNT 4
-#define SEND_HEADERS_ERR_CNT 6
-#define SEND_LEN_ERR_CNT 1
-#define SEND_MAX_MIN_LEN_ERR_CNT 2
-#define SEND_UNDERRUN_CNT 3
-#define SEND_UNSUP_VL_ERR_CNT 0
-#define SEND_WAIT_CNT 2
-#define SEND_WAIT_VL0_CNT 21
-#define TXE_PIO_SEND_OFFSET 0x0800000
-#define ASIC_CFG_DRV_STR (ASIC + 0x000000000048)
-#define ASIC_CFG_MUTEX (ASIC + 0x000000000040)
-#define ASIC_CFG_SBUS_EXECUTE (ASIC + 0x000000000008)
-#define ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK 0x1ull
-#define ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK 0x2ull
-#define ASIC_CFG_SBUS_REQUEST (ASIC + 0x000000000000)
-#define ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT 16
-#define ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT 8
-#define ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT 32
-#define ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT 0
-#define ASIC_CFG_SCRATCH (ASIC + 0x000000000020)
-#define ASIC_CFG_THERM_POLL_EN (ASIC + 0x000000000050)
-#define ASIC_EEP_ADDR_CMD (ASIC + 0x000000000308)
-#define ASIC_EEP_ADDR_CMD_EP_ADDR_MASK 0xFFFFFFull
-#define ASIC_EEP_CTL_STAT (ASIC + 0x000000000300)
-#define ASIC_EEP_CTL_STAT_EP_RESET_SMASK 0x4ull
-#define ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT 8
-#define ASIC_EEP_CTL_STAT_RESETCSR 0x0000000083818000ull
-#define ASIC_EEP_DATA (ASIC + 0x000000000310)
-#define ASIC_GPIO_CLEAR (ASIC + 0x000000000230)
-#define ASIC_GPIO_FORCE (ASIC + 0x000000000238)
-#define ASIC_GPIO_IN (ASIC + 0x000000000200)
-#define ASIC_GPIO_INVERT (ASIC + 0x000000000210)
-#define ASIC_GPIO_MASK (ASIC + 0x000000000220)
-#define ASIC_GPIO_OE (ASIC + 0x000000000208)
-#define ASIC_GPIO_OUT (ASIC + 0x000000000218)
-#define ASIC_PCIE_SD_HOST_CMD (ASIC + 0x000000000100)
-#define ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT 0
-#define ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK 0x400ull
-#define ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT 2
-#define ASIC_PCIE_SD_HOST_CMD_TIMER_MASK 0xFFFFFull
-#define ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT 12
-#define ASIC_PCIE_SD_HOST_STATUS (ASIC + 0x000000000108)
-#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK 0x7ull
-#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT 2
-#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK 0x3ull
-#define ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT 0
-#define ASIC_PCIE_SD_INTRPT_DATA_CODE (ASIC + 0x000000000110)
-#define ASIC_PCIE_SD_INTRPT_ENABLE (ASIC + 0x000000000118)
-#define ASIC_PCIE_SD_INTRPT_LIST (ASIC + 0x000000000180)
-#define ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT 16
-#define ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT 0
-#define ASIC_PCIE_SD_INTRPT_STATUS (ASIC + 0x000000000128)
-#define ASIC_QSFP1_CLEAR (ASIC + 0x000000000270)
-#define ASIC_QSFP1_FORCE (ASIC + 0x000000000278)
-#define ASIC_QSFP1_IN (ASIC + 0x000000000240)
-#define ASIC_QSFP1_INVERT (ASIC + 0x000000000250)
-#define ASIC_QSFP1_MASK (ASIC + 0x000000000260)
-#define ASIC_QSFP1_OE (ASIC + 0x000000000248)
-#define ASIC_QSFP1_OUT (ASIC + 0x000000000258)
-#define ASIC_QSFP1_STATUS (ASIC + 0x000000000268)
-#define ASIC_QSFP2_CLEAR (ASIC + 0x0000000002B0)
-#define ASIC_QSFP2_FORCE (ASIC + 0x0000000002B8)
-#define ASIC_QSFP2_IN (ASIC + 0x000000000280)
-#define ASIC_QSFP2_INVERT (ASIC + 0x000000000290)
-#define ASIC_QSFP2_MASK (ASIC + 0x0000000002A0)
-#define ASIC_QSFP2_OE (ASIC + 0x000000000288)
-#define ASIC_QSFP2_OUT (ASIC + 0x000000000298)
-#define ASIC_QSFP2_STATUS (ASIC + 0x0000000002A8)
-#define ASIC_STS_SBUS_COUNTERS (ASIC + 0x000000000018)
-#define ASIC_STS_SBUS_COUNTERS_EXECUTE_CNT_MASK 0xFFFFull
-#define ASIC_STS_SBUS_COUNTERS_EXECUTE_CNT_SHIFT 0
-#define ASIC_STS_SBUS_COUNTERS_RCV_DATA_VALID_CNT_MASK 0xFFFFull
-#define ASIC_STS_SBUS_COUNTERS_RCV_DATA_VALID_CNT_SHIFT 16
-#define ASIC_STS_SBUS_RESULT (ASIC + 0x000000000010)
-#define ASIC_STS_SBUS_RESULT_DONE_SMASK 0x1ull
-#define ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK 0x2ull
-#define ASIC_STS_THERM (ASIC + 0x000000000058)
-#define ASIC_STS_THERM_CRIT_TEMP_MASK 0x7FFull
-#define ASIC_STS_THERM_CRIT_TEMP_SHIFT 18
-#define ASIC_STS_THERM_CURR_TEMP_MASK 0x7FFull
-#define ASIC_STS_THERM_CURR_TEMP_SHIFT 2
-#define ASIC_STS_THERM_HI_TEMP_MASK 0x7FFull
-#define ASIC_STS_THERM_HI_TEMP_SHIFT 50
-#define ASIC_STS_THERM_LO_TEMP_MASK 0x7FFull
-#define ASIC_STS_THERM_LO_TEMP_SHIFT 34
-#define ASIC_STS_THERM_LOW_SHIFT 13
-#define CCE_COUNTER_ARRAY32 (CCE + 0x000000000060)
-#define CCE_CTRL (CCE + 0x000000000010)
-#define CCE_CTRL_RXE_RESUME_SMASK 0x800ull
-#define CCE_CTRL_SPC_FREEZE_SMASK 0x100ull
-#define CCE_CTRL_SPC_UNFREEZE_SMASK 0x200ull
-#define CCE_CTRL_TXE_RESUME_SMASK 0x2000ull
-#define CCE_DC_CTRL (CCE + 0x0000000000B8)
-#define CCE_DC_CTRL_DC_RESET_SMASK 0x1ull
-#define CCE_DC_CTRL_RESETCSR 0x0000000000000001ull
-#define CCE_ERR_CLEAR (CCE + 0x000000000050)
-#define CCE_ERR_MASK (CCE + 0x000000000048)
-#define CCE_ERR_STATUS (CCE + 0x000000000040)
-#define CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK 0x40ull
-#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK 0x1000ull
-#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK \
- 0x200ull
-#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK \
- 0x800ull
-#define CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK \
- 0x400ull
-#define CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK 0x100ull
-#define CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK 0x80ull
-#define CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK 0x1ull
-#define CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
-#define CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
-#define CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK 0x4000000000ull
-#define CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK 0x8000000000ull
-#define CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK 0x10000000000ull
-#define CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK 0x1000000000ull
-#define CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK 0x2000000000ull
-#define CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK 0x400000000ull
-#define CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK 0x20ull
-#define CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK 0x800000000ull
-#define CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK 0x100000000ull
-#define CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK 0x200000000ull
-#define CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK 0x10ull
-#define CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK 0x8ull
-#define CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK 0x40000000ull
-#define CCE_ERR_STATUS_LA_TRIGGERED_SMASK 0x80000000ull
-#define CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK 0x40000ull
-#define CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK 0x4000000ull
-#define CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK 0x20000ull
-#define CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK 0x2000000ull
-#define CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK 0x100000ull
-#define CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK 0x80000ull
-#define CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK 0x10000ull
-#define CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK 0x1000000ull
-#define CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK 0x8000ull
-#define CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK 0x800000ull
-#define CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK 0x20000000ull
-#define CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK 0x2000ull
-#define CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK 0x200000ull
-#define CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK 0x4000ull
-#define CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK 0x400000ull
-#define CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK 0x10000000ull
-#define CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK 0x8000000ull
-#define CCE_INT_CLEAR (CCE + 0x000000110A00)
-#define CCE_INT_COUNTER_ARRAY32 (CCE + 0x000000110D00)
-#define CCE_INT_FORCE (CCE + 0x000000110B00)
-#define CCE_INT_MAP (CCE + 0x000000110500)
-#define CCE_INT_MASK (CCE + 0x000000110900)
-#define CCE_INT_STATUS (CCE + 0x000000110800)
-#define CCE_MSIX_INT_GRANTED (CCE + 0x000000110200)
-#define CCE_MSIX_TABLE_LOWER (CCE + 0x000000100000)
-#define CCE_MSIX_TABLE_UPPER (CCE + 0x000000100008)
-#define CCE_MSIX_TABLE_UPPER_RESETCSR 0x0000000100000000ull
-#define CCE_MSIX_VEC_CLR_WITHOUT_INT (CCE + 0x000000110400)
-#define CCE_PCIE_CTRL (CCE + 0x0000000000C0)
-#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK 0x3ull
-#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT 0
-#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK 0xFull
-#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT 2
-#define CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT 8
-#define CCE_PCIE_CTRL_XMT_MARGIN_SHIFT 9
-#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK 0x1ull
-#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT 12
-#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK 0x7ull
-#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT 13
-#define CCE_REVISION (CCE + 0x000000000000)
-#define CCE_REVISION2 (CCE + 0x000000000008)
-#define CCE_REVISION2_HFI_ID_MASK 0x1ull
-#define CCE_REVISION2_HFI_ID_SHIFT 0
-#define CCE_REVISION2_IMPL_CODE_SHIFT 8
-#define CCE_REVISION2_IMPL_REVISION_SHIFT 16
-#define CCE_REVISION_BOARD_ID_LOWER_NIBBLE_MASK 0xFull
-#define CCE_REVISION_BOARD_ID_LOWER_NIBBLE_SHIFT 32
-#define CCE_REVISION_CHIP_REV_MAJOR_MASK 0xFFull
-#define CCE_REVISION_CHIP_REV_MAJOR_SHIFT 8
-#define CCE_REVISION_CHIP_REV_MINOR_MASK 0xFFull
-#define CCE_REVISION_CHIP_REV_MINOR_SHIFT 0
-#define CCE_REVISION_SW_MASK 0xFFull
-#define CCE_REVISION_SW_SHIFT 24
-#define CCE_SCRATCH (CCE + 0x000000000020)
-#define CCE_STATUS (CCE + 0x000000000018)
-#define CCE_STATUS_RXE_FROZE_SMASK 0x2ull
-#define CCE_STATUS_RXE_PAUSED_SMASK 0x20ull
-#define CCE_STATUS_SDMA_FROZE_SMASK 0x1ull
-#define CCE_STATUS_SDMA_PAUSED_SMASK 0x10ull
-#define CCE_STATUS_TXE_FROZE_SMASK 0x4ull
-#define CCE_STATUS_TXE_PAUSED_SMASK 0x40ull
-#define CCE_STATUS_TXE_PIO_FROZE_SMASK 0x8ull
-#define CCE_STATUS_TXE_PIO_PAUSED_SMASK 0x80ull
-#define MISC_CFG_FW_CTRL (MISC + 0x000000001000)
-#define MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK 0x2ull
-#define MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT 2
-#define MISC_CFG_FW_CTRL_RSA_STATUS_SMASK 0xCull
-#define MISC_CFG_RSA_CMD (MISC + 0x000000000A08)
-#define MISC_CFG_RSA_MODULUS (MISC + 0x000000000400)
-#define MISC_CFG_RSA_MU (MISC + 0x000000000A10)
-#define MISC_CFG_RSA_R2 (MISC + 0x000000000000)
-#define MISC_CFG_RSA_SIGNATURE (MISC + 0x000000000200)
-#define MISC_CFG_SHA_PRELOAD (MISC + 0x000000000A00)
-#define MISC_ERR_CLEAR (MISC + 0x000000002010)
-#define MISC_ERR_MASK (MISC + 0x000000002008)
-#define MISC_ERR_STATUS (MISC + 0x000000002000)
-#define MISC_ERR_STATUS_MISC_PLL_LOCK_FAIL_ERR_SMASK 0x1000ull
-#define MISC_ERR_STATUS_MISC_MBIST_FAIL_ERR_SMASK 0x800ull
-#define MISC_ERR_STATUS_MISC_INVALID_EEP_CMD_ERR_SMASK 0x400ull
-#define MISC_ERR_STATUS_MISC_EFUSE_DONE_PARITY_ERR_SMASK 0x200ull
-#define MISC_ERR_STATUS_MISC_EFUSE_WRITE_ERR_SMASK 0x100ull
-#define MISC_ERR_STATUS_MISC_EFUSE_READ_BAD_ADDR_ERR_SMASK 0x80ull
-#define MISC_ERR_STATUS_MISC_EFUSE_CSR_PARITY_ERR_SMASK 0x40ull
-#define MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK 0x20ull
-#define MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK 0x10ull
-#define MISC_ERR_STATUS_MISC_SBUS_WRITE_FAILED_ERR_SMASK 0x8ull
-#define MISC_ERR_STATUS_MISC_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
-#define MISC_ERR_STATUS_MISC_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
-#define MISC_ERR_STATUS_MISC_CSR_PARITY_ERR_SMASK 0x1ull
-#define PCI_CFG_MSIX0 (PCIE + 0x0000000000B0)
-#define PCI_CFG_REG1 (PCIE + 0x000000000004)
-#define PCI_CFG_REG11 (PCIE + 0x00000000002C)
-#define PCIE_CFG_SPCIE1 (PCIE + 0x00000000014C)
-#define PCIE_CFG_SPCIE2 (PCIE + 0x000000000150)
-#define PCIE_CFG_TPH2 (PCIE + 0x000000000180)
-#define RCV_ARRAY (RXE + 0x000000200000)
-#define RCV_ARRAY_CNT (RXE + 0x000000000018)
-#define RCV_ARRAY_RT_ADDR_MASK 0xFFFFFFFFFull
-#define RCV_ARRAY_RT_ADDR_SHIFT 0
-#define RCV_ARRAY_RT_BUF_SIZE_SHIFT 36
-#define RCV_ARRAY_RT_WRITE_ENABLE_SMASK 0x8000000000000000ull
-#define RCV_AVAIL_TIME_OUT (RXE + 0x000000100050)
-#define RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK 0xFFull
-#define RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT 0
-#define RCV_BTH_QP (RXE + 0x000000000028)
-#define RCV_BTH_QP_KDETH_QP_MASK 0xFFull
-#define RCV_BTH_QP_KDETH_QP_SHIFT 16
-#define RCV_BYPASS (RXE + 0x000000000038)
-#define RCV_CONTEXTS (RXE + 0x000000000010)
-#define RCV_COUNTER_ARRAY32 (RXE + 0x000000000400)
-#define RCV_COUNTER_ARRAY64 (RXE + 0x000000000500)
-#define RCV_CTRL (RXE + 0x000000000000)
-#define RCV_CTRL_RCV_BYPASS_ENABLE_SMASK 0x10ull
-#define RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK 0x40ull
-#define RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK 0x4ull
-#define RCV_CTRL_RCV_PORT_ENABLE_SMASK 0x1ull
-#define RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK 0x2ull
-#define RCV_CTRL_RCV_RSM_ENABLE_SMASK 0x20ull
-#define RCV_CTRL_RX_RBUF_INIT_SMASK 0x200ull
-#define RCV_CTXT_CTRL (RXE + 0x000000100000)
-#define RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK 0x4ull
-#define RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK 0x8ull
-#define RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK 0x7ull
-#define RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT 8
-#define RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK 0x700ull
-#define RCV_CTXT_CTRL_ENABLE_SMASK 0x1ull
-#define RCV_CTXT_CTRL_INTR_AVAIL_SMASK 0x20ull
-#define RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK 0x2ull
-#define RCV_CTXT_CTRL_TAIL_UPD_SMASK 0x40ull
-#define RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK 0x10ull
-#define RCV_CTXT_STATUS (RXE + 0x000000100008)
-#define RCV_EGR_CTRL (RXE + 0x000000100010)
-#define RCV_EGR_CTRL_EGR_BASE_INDEX_MASK 0x1FFFull
-#define RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT 0
-#define RCV_EGR_CTRL_EGR_CNT_MASK 0x1FFull
-#define RCV_EGR_CTRL_EGR_CNT_SHIFT 32
-#define RCV_EGR_INDEX_HEAD (RXE + 0x000000300018)
-#define RCV_EGR_INDEX_HEAD_HEAD_MASK 0x7FFull
-#define RCV_EGR_INDEX_HEAD_HEAD_SHIFT 0
-#define RCV_ERR_CLEAR (RXE + 0x000000000070)
-#define RCV_ERR_INFO (RXE + 0x000000000050)
-#define RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK 0x1Full
-#define RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK 0x20ull
-#define RCV_ERR_MASK (RXE + 0x000000000068)
-#define RCV_ERR_STATUS (RXE + 0x000000000060)
-#define RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK 0x8000000000000000ull
-#define RCV_ERR_STATUS_RX_CSR_READ_BAD_ADDR_ERR_SMASK 0x2000000000000000ull
-#define RCV_ERR_STATUS_RX_CSR_WRITE_BAD_ADDR_ERR_SMASK \
- 0x4000000000000000ull
-#define RCV_ERR_STATUS_RX_DC_INTF_PARITY_ERR_SMASK 0x2ull
-#define RCV_ERR_STATUS_RX_DC_SOP_EOP_PARITY_ERR_SMASK 0x200ull
-#define RCV_ERR_STATUS_RX_DMA_CSR_COR_ERR_SMASK 0x1ull
-#define RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK 0x200000000000000ull
-#define RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK 0x1000000000000000ull
-#define RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_COR_ERR_SMASK \
- 0x40000000000000ull
-#define RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
- 0x20000000000000ull
-#define RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
- 0x800000000000000ull
-#define RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
- 0x400000000000000ull
-#define RCV_ERR_STATUS_RX_DMA_FLAG_COR_ERR_SMASK 0x800ull
-#define RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK 0x400ull
-#define RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_COR_ERR_SMASK 0x10000000000000ull
-#define RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK 0x8000000000000ull
-#define RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK 0x200000000000ull
-#define RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK 0x400000000000ull
-#define RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK 0x100000000000ull
-#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
- 0x10000000000ull
-#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK 0x8000000000ull
-#define RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
- 0x20000000000ull
-#define RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_COR_ERR_SMASK 0x80000000000ull
-#define RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK 0x40000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK 0x40000000ull
-#define RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_COR_ERR_SMASK 0x100000ull
-#define RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK 0x80000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK 0x400000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK 0x10000000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK 0x2000000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
- 0x200000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK 0x800000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
- 0x8000000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK 0x4000000ull
-#define RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK 0x1000000ull
-#define RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK 0x20000000ull
-#define RCV_ERR_STATUS_RX_RBUF_DATA_COR_ERR_SMASK 0x100000000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK 0x80000000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK 0x1000000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK 0x800000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_DESC_PART2_COR_ERR_SMASK 0x4000000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK 0x2000000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK 0x100000000ull
-#define RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK 0x800000000ull
-#define RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
- 0x1000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK 0x200000000ull
-#define RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK 0x400000000ull
-#define RCV_ERR_STATUS_RX_RBUF_FREE_LIST_COR_ERR_SMASK 0x4000ull
-#define RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK 0x2000ull
-#define RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK 0x80000000ull
-#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_COR_ERR_SMASK 0x40000ull
-#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK 0x10000ull
-#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK 0x8000ull
-#define RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK 0x20000ull
-#define RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_COR_ERR_SMASK 0x4000000000ull
-#define RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK 0x2000000000ull
-#define RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK 0x100ull
-#define RCV_ERR_STATUS_RX_RCV_DATA_COR_ERR_SMASK 0x20ull
-#define RCV_ERR_STATUS_RX_RCV_DATA_UNC_ERR_SMASK 0x10ull
-#define RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK 0x1000ull
-#define RCV_ERR_STATUS_RX_RCV_HDR_COR_ERR_SMASK 0x8ull
-#define RCV_ERR_STATUS_RX_RCV_HDR_UNC_ERR_SMASK 0x4ull
-#define RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_COR_ERR_SMASK 0x80ull
-#define RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK 0x40ull
-#define RCV_HDR_ADDR (RXE + 0x000000100028)
-#define RCV_HDR_CNT (RXE + 0x000000100030)
-#define RCV_HDR_CNT_CNT_MASK 0x1FFull
-#define RCV_HDR_CNT_CNT_SHIFT 0
-#define RCV_HDR_ENT_SIZE (RXE + 0x000000100038)
-#define RCV_HDR_ENT_SIZE_ENT_SIZE_MASK 0x7ull
-#define RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT 0
-#define RCV_HDR_HEAD (RXE + 0x000000300008)
-#define RCV_HDR_HEAD_COUNTER_MASK 0xFFull
-#define RCV_HDR_HEAD_COUNTER_SHIFT 32
-#define RCV_HDR_HEAD_HEAD_MASK 0x7FFFFull
-#define RCV_HDR_HEAD_HEAD_SHIFT 0
-#define RCV_HDR_HEAD_HEAD_SMASK 0x7FFFFull
-#define RCV_HDR_OVFL_CNT (RXE + 0x000000100058)
-#define RCV_HDR_SIZE (RXE + 0x000000100040)
-#define RCV_HDR_SIZE_HDR_SIZE_MASK 0x1Full
-#define RCV_HDR_SIZE_HDR_SIZE_SHIFT 0
-#define RCV_HDR_TAIL (RXE + 0x000000300000)
-#define RCV_HDR_TAIL_ADDR (RXE + 0x000000100048)
-#define RCV_KEY_CTRL (RXE + 0x000000100020)
-#define RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK 0x200000000ull
-#define RCV_KEY_CTRL_JOB_KEY_VALUE_MASK 0xFFFFull
-#define RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT 0
-#define RCV_MULTICAST (RXE + 0x000000000030)
-#define RCV_PARTITION_KEY (RXE + 0x000000000200)
-#define RCV_PARTITION_KEY_PARTITION_KEY_A_MASK 0xFFFFull
-#define RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT 16
-#define RCV_QP_MAP_TABLE (RXE + 0x000000000100)
-#define RCV_RSM_CFG (RXE + 0x000000000600)
-#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK 0x1ull
-#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT 0
-#define RCV_RSM_CFG_PACKET_TYPE_SHIFT 60
-#define RCV_RSM_MAP_TABLE (RXE + 0x000000000900)
-#define RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK 0xFFull
-#define RCV_RSM_MATCH (RXE + 0x000000000800)
-#define RCV_RSM_MATCH_MASK1_SHIFT 0
-#define RCV_RSM_MATCH_MASK2_SHIFT 16
-#define RCV_RSM_MATCH_VALUE1_SHIFT 8
-#define RCV_RSM_MATCH_VALUE2_SHIFT 24
-#define RCV_RSM_SELECT (RXE + 0x000000000700)
-#define RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT 0
-#define RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT 16
-#define RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT 32
-#define RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT 44
-#define RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT 48
-#define RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT 60
-#define RCV_STATUS (RXE + 0x000000000008)
-#define RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK 0x1ull
-#define RCV_STATUS_RX_RBUF_INIT_DONE_SMASK 0x200ull
-#define RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK 0x40ull
-#define RCV_TID_CTRL (RXE + 0x000000100018)
-#define RCV_TID_CTRL_TID_BASE_INDEX_MASK 0x1FFFull
-#define RCV_TID_CTRL_TID_BASE_INDEX_SHIFT 0
-#define RCV_TID_CTRL_TID_PAIR_CNT_MASK 0x1FFull
-#define RCV_TID_CTRL_TID_PAIR_CNT_SHIFT 32
-#define RCV_TID_FLOW_TABLE (RXE + 0x000000300800)
-#define RCV_VL15 (RXE + 0x000000000048)
-#define SEND_BTH_QP (TXE + 0x0000000000A0)
-#define SEND_BTH_QP_KDETH_QP_MASK 0xFFull
-#define SEND_BTH_QP_KDETH_QP_SHIFT 16
-#define SEND_CM_CREDIT_USED_STATUS (TXE + 0x000000000510)
-#define SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK \
- 0x1000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK \
- 0x8000000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK \
- 0x2000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK \
- 0x4000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK \
- 0x8000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK \
- 0x10000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK \
- 0x20000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK \
- 0x40000000000000ull
-#define SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK \
- 0x80000000000000ull
-#define SEND_CM_CREDIT_VL (TXE + 0x000000000600)
-#define SEND_CM_CREDIT_VL15 (TXE + 0x000000000678)
-#define SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT 0
-#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK 0xFFFFull
-#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT 0
-#define SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK 0xFFFFull
-#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK 0xFFFFull
-#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT 16
-#define SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK 0xFFFF0000ull
-#define SEND_CM_CTRL (TXE + 0x000000000500)
-#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
-#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
-#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
-#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
-#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
-#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
-#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
-#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK 0xFFFFull
-#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK 0xFFFFull
-#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT 32
-#define SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK 0xFFFF00000000ull
-#define SEND_CM_LOCAL_AU_TABLE0_TO3 (TXE + 0x000000000520)
-#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT 0
-#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT 16
-#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT 32
-#define SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT 48
-#define SEND_CM_LOCAL_AU_TABLE4_TO7 (TXE + 0x000000000528)
-#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT 0
-#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT 16
-#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT 32
-#define SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT 48
-#define SEND_CM_REMOTE_AU_TABLE0_TO3 (TXE + 0x000000000530)
-#define SEND_CM_REMOTE_AU_TABLE4_TO7 (TXE + 0x000000000538)
-#define SEND_CM_TIMER_CTRL (TXE + 0x000000000518)
-#define SEND_CONTEXTS (TXE + 0x000000000010)
-#define SEND_CONTEXT_SET_CTRL (TXE + 0x000000000200)
-#define SEND_COUNTER_ARRAY32 (TXE + 0x000000000300)
-#define SEND_COUNTER_ARRAY64 (TXE + 0x000000000400)
-#define SEND_CTRL (TXE + 0x000000000000)
-#define SEND_CTRL_CM_RESET_SMASK 0x4ull
-#define SEND_CTRL_SEND_ENABLE_SMASK 0x1ull
-#define SEND_CTRL_VL_ARBITER_ENABLE_SMASK 0x2ull
-#define SEND_CTXT_CHECK_ENABLE (TXE + 0x000000100080)
-#define SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
-#define SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK 0x1ull
-#define SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 0x4ull
-#define SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 0x20ull
-#define SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK 0x8ull
-#define SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 0x10ull
-#define SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 0x40ull
-#define SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 0x2ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 0x20000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK \
- 0x200000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK 0x800ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK 0x400ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK 0x1000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK 0x2000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK \
- 0x100000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK 0x10000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 0x200ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK 0x100ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK \
- 0x80000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK \
- 0x40000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK \
- 0x8000ull
-#define SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK \
- 0x4000ull
-#define SEND_CTXT_CHECK_JOB_KEY (TXE + 0x000000100090)
-#define SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK 0x100000000ull
-#define SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK 0xFFFF0000ull
-#define SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK 0xFFFFull
-#define SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT 0
-#define SEND_CTXT_CHECK_OPCODE (TXE + 0x0000001000A8)
-#define SEND_CTXT_CHECK_OPCODE_MASK_SHIFT 8
-#define SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT 0
-#define SEND_CTXT_CHECK_PARTITION_KEY (TXE + 0x000000100098)
-#define SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK 0xFFFFull
-#define SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT 0
-#define SEND_CTXT_CHECK_SLID (TXE + 0x0000001000A0)
-#define SEND_CTXT_CHECK_SLID_MASK_MASK 0xFFFFull
-#define SEND_CTXT_CHECK_SLID_MASK_SHIFT 16
-#define SEND_CTXT_CHECK_SLID_VALUE_MASK 0xFFFFull
-#define SEND_CTXT_CHECK_SLID_VALUE_SHIFT 0
-#define SEND_CTXT_CHECK_VL (TXE + 0x000000100088)
-#define SEND_CTXT_CREDIT_CTRL (TXE + 0x000000100010)
-#define SEND_CTXT_CREDIT_CTRL_CREDIT_INTR_SMASK 0x20000ull
-#define SEND_CTXT_CREDIT_CTRL_EARLY_RETURN_SMASK 0x10000ull
-#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_MASK 0x7FFull
-#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SHIFT 0
-#define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SMASK 0x7FFull
-#define SEND_CTXT_CREDIT_FORCE (TXE + 0x000000100028)
-#define SEND_CTXT_CREDIT_FORCE_FORCE_RETURN_SMASK 0x1ull
-#define SEND_CTXT_CREDIT_RETURN_ADDR (TXE + 0x000000100020)
-#define SEND_CTXT_CREDIT_RETURN_ADDR_ADDRESS_SMASK 0xFFFFFFFFFFC0ull
-#define SEND_CTXT_CTRL (TXE + 0x000000100000)
-#define SEND_CTXT_CTRL_CTXT_BASE_MASK 0x3FFFull
-#define SEND_CTXT_CTRL_CTXT_BASE_SHIFT 32
-#define SEND_CTXT_CTRL_CTXT_DEPTH_MASK 0x7FFull
-#define SEND_CTXT_CTRL_CTXT_DEPTH_SHIFT 48
-#define SEND_CTXT_CTRL_CTXT_ENABLE_SMASK 0x1ull
-#define SEND_CTXT_ERR_CLEAR (TXE + 0x000000100050)
-#define SEND_CTXT_ERR_MASK (TXE + 0x000000100048)
-#define SEND_CTXT_ERR_STATUS (TXE + 0x000000100040)
-#define SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK 0x2ull
-#define SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK 0x1ull
-#define SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK 0x4ull
-#define SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK 0x10ull
-#define SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK 0x8ull
-#define SEND_CTXT_STATUS (TXE + 0x000000100008)
-#define SEND_CTXT_STATUS_CTXT_HALTED_SMASK 0x1ull
-#define SEND_DMA_BASE_ADDR (TXE + 0x000000200010)
-#define SEND_DMA_CHECK_ENABLE (TXE + 0x000000200080)
-#define SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
-#define SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK 0x1ull
-#define SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK 0x4ull
-#define SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 0x20ull
-#define SEND_DMA_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK 0x8ull
-#define SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 0x10ull
-#define SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 0x40ull
-#define SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 0x2ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 0x20000ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 0x200000ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK \
- 0x100000ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 0x200ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK 0x100ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK \
- 0x80000ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 0x40000ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK \
- 0x8000ull
-#define SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK 0x4000ull
-#define SEND_DMA_CHECK_JOB_KEY (TXE + 0x000000200090)
-#define SEND_DMA_CHECK_OPCODE (TXE + 0x0000002000A8)
-#define SEND_DMA_CHECK_PARTITION_KEY (TXE + 0x000000200098)
-#define SEND_DMA_CHECK_SLID (TXE + 0x0000002000A0)
-#define SEND_DMA_CHECK_SLID_MASK_MASK 0xFFFFull
-#define SEND_DMA_CHECK_SLID_MASK_SHIFT 16
-#define SEND_DMA_CHECK_SLID_VALUE_MASK 0xFFFFull
-#define SEND_DMA_CHECK_SLID_VALUE_SHIFT 0
-#define SEND_DMA_CHECK_VL (TXE + 0x000000200088)
-#define SEND_DMA_CTRL (TXE + 0x000000200000)
-#define SEND_DMA_CTRL_SDMA_CLEANUP_SMASK 0x4ull
-#define SEND_DMA_CTRL_SDMA_ENABLE_SMASK 0x1ull
-#define SEND_DMA_CTRL_SDMA_HALT_SMASK 0x2ull
-#define SEND_DMA_CTRL_SDMA_INT_ENABLE_SMASK 0x8ull
-#define SEND_DMA_DESC_CNT (TXE + 0x000000200050)
-#define SEND_DMA_DESC_CNT_CNT_MASK 0xFFFFull
-#define SEND_DMA_DESC_CNT_CNT_SHIFT 0
-#define SEND_DMA_ENG_ERR_CLEAR (TXE + 0x000000200070)
-#define SEND_DMA_ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK 0x1ull
-#define SEND_DMA_ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT 18
-#define SEND_DMA_ENG_ERR_MASK (TXE + 0x000000200068)
-#define SEND_DMA_ENG_ERR_STATUS (TXE + 0x000000200060)
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK 0x8000ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK 0x4000ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK 0x10ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK 0x2ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK 0x40ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK 0x800ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK 0x1000ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK \
- 0x40000ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK 0x400ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK \
- 0x20000ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK 0x80ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK 0x20ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK \
- 0x100ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK \
- 0x10000ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK 0x8ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK 0x2000ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK 0x4ull
-#define SEND_DMA_ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK 0x1ull
-#define SEND_DMA_ENGINES (TXE + 0x000000000018)
-#define SEND_DMA_ERR_CLEAR (TXE + 0x000000000070)
-#define SEND_DMA_ERR_MASK (TXE + 0x000000000068)
-#define SEND_DMA_ERR_STATUS (TXE + 0x000000000060)
-#define SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK 0x2ull
-#define SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK 0x8ull
-#define SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK 0x4ull
-#define SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK 0x1ull
-#define SEND_DMA_HEAD (TXE + 0x000000200028)
-#define SEND_DMA_HEAD_ADDR (TXE + 0x000000200030)
-#define SEND_DMA_LEN_GEN (TXE + 0x000000200018)
-#define SEND_DMA_LEN_GEN_GENERATION_SHIFT 16
-#define SEND_DMA_LEN_GEN_LENGTH_SHIFT 6
-#define SEND_DMA_MEMORY (TXE + 0x0000002000B0)
-#define SEND_DMA_MEMORY_SDMA_MEMORY_CNT_SHIFT 16
-#define SEND_DMA_MEMORY_SDMA_MEMORY_INDEX_SHIFT 0
-#define SEND_DMA_MEM_SIZE (TXE + 0x000000000028)
-#define SEND_DMA_PRIORITY_THLD (TXE + 0x000000200038)
-#define SEND_DMA_RELOAD_CNT (TXE + 0x000000200048)
-#define SEND_DMA_STATUS (TXE + 0x000000200008)
-#define SEND_DMA_STATUS_ENG_CLEANED_UP_SMASK 0x200000000000000ull
-#define SEND_DMA_STATUS_ENG_HALTED_SMASK 0x100000000000000ull
-#define SEND_DMA_TAIL (TXE + 0x000000200020)
-#define SEND_EGRESS_CTXT_STATUS (TXE + 0x000000000800)
-#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK 0x10000ull
-#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT 0
-#define SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK \
- 0x3FFFull
-#define SEND_EGRESS_ERR_CLEAR (TXE + 0x000000000090)
-#define SEND_EGRESS_ERR_INFO (TXE + 0x000000000F00)
-#define SEND_EGRESS_ERR_INFO_BAD_PKT_LEN_ERR_SMASK 0x20000ull
-#define SEND_EGRESS_ERR_INFO_BYPASS_ERR_SMASK 0x800ull
-#define SEND_EGRESS_ERR_INFO_GRH_ERR_SMASK 0x400ull
-#define SEND_EGRESS_ERR_INFO_JOB_KEY_ERR_SMASK 0x4ull
-#define SEND_EGRESS_ERR_INFO_KDETH_PACKETS_ERR_SMASK 0x1000ull
-#define SEND_EGRESS_ERR_INFO_NON_KDETH_PACKETS_ERR_SMASK 0x2000ull
-#define SEND_EGRESS_ERR_INFO_OPCODE_ERR_SMASK 0x20ull
-#define SEND_EGRESS_ERR_INFO_PARTITION_KEY_ERR_SMASK 0x8ull
-#define SEND_EGRESS_ERR_INFO_PBC_STATIC_RATE_CONTROL_ERR_SMASK 0x100000ull
-#define SEND_EGRESS_ERR_INFO_PBC_TEST_ERR_SMASK 0x10000ull
-#define SEND_EGRESS_ERR_INFO_RAW_ERR_SMASK 0x100ull
-#define SEND_EGRESS_ERR_INFO_RAW_IPV6_ERR_SMASK 0x200ull
-#define SEND_EGRESS_ERR_INFO_SLID_ERR_SMASK 0x10ull
-#define SEND_EGRESS_ERR_INFO_TOO_LONG_BYPASS_PACKETS_ERR_SMASK 0x80000ull
-#define SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK 0x40000ull
-#define SEND_EGRESS_ERR_INFO_TOO_SMALL_BYPASS_PACKETS_ERR_SMASK 0x8000ull
-#define SEND_EGRESS_ERR_INFO_TOO_SMALL_IB_PACKETS_ERR_SMASK 0x4000ull
-#define SEND_EGRESS_ERR_INFO_VL_ERR_SMASK 0x2ull
-#define SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK 0x40ull
-#define SEND_EGRESS_ERR_MASK (TXE + 0x000000000088)
-#define SEND_EGRESS_ERR_SOURCE (TXE + 0x000000000F08)
-#define SEND_EGRESS_ERR_STATUS (TXE + 0x000000000080)
-#define SEND_EGRESS_ERR_STATUS_TX_CONFIG_PARITY_ERR_SMASK 0x8000ull
-#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_OVERRUN_ERR_SMASK \
- 0x200000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_PARITY_ERR_SMASK \
- 0x20000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK \
- 0x800000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_COR_ERR_SMASK \
- 0x2000000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_UNC_ERR_SMASK \
- 0x200000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR_SMASK \
- 0x8ull
-#define SEND_EGRESS_ERR_STATUS_TX_HCRC_INSERTION_ERR_SMASK \
- 0x400000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_ILLEGAL_VL_ERR_SMASK 0x1000ull
-#define SEND_EGRESS_ERR_STATUS_TX_INCORRECT_LINK_STATE_ERR_SMASK 0x20ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_CSR_PARITY_ERR_SMASK 0x2000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO0_COR_ERR_SMASK \
- 0x1000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR_SMASK \
- 0x100000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO1_COR_ERR_SMASK \
- 0x2000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR_SMASK \
- 0x200000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO2_COR_ERR_SMASK \
- 0x4000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR_SMASK \
- 0x400000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO3_COR_ERR_SMASK \
- 0x8000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR_SMASK \
- 0x800000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO4_COR_ERR_SMASK \
- 0x10000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR_SMASK \
- 0x1000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO5_COR_ERR_SMASK \
- 0x20000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR_SMASK \
- 0x2000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO6_COR_ERR_SMASK \
- 0x40000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR_SMASK \
- 0x4000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO7_COR_ERR_SMASK \
- 0x80000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR_SMASK \
- 0x8000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO8_COR_ERR_SMASK \
- 0x100000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR_SMASK \
- 0x10000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_LINKDOWN_ERR_SMASK 0x10ull
-#define SEND_EGRESS_ERR_STATUS_TX_PIO_LAUNCH_INTF_PARITY_ERR_SMASK 0x80ull
-#define SEND_EGRESS_ERR_STATUS_TX_PKT_INTEGRITY_MEM_COR_ERR_SMASK 0x1ull
-#define SEND_EGRESS_ERR_STATUS_TX_PKT_INTEGRITY_MEM_UNC_ERR_SMASK 0x2ull
-#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_COR_ERR_SMASK \
- 0x1000000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_CSR_UNC_ERR_SMASK \
- 0x8000000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_READ_PIO_MEMORY_UNC_ERR_SMASK \
- 0x100000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_COR_ERR_SMASK \
- 0x800000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_CSR_UNC_ERR_SMASK \
- 0x4000000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_READ_SDMA_MEMORY_UNC_ERR_SMASK \
- 0x80000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SB_HDR_COR_ERR_SMASK 0x400000000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SB_HDR_UNC_ERR_SMASK 0x40000000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SBRD_CTL_CSR_PARITY_ERR_SMASK 0x4000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR_SMASK \
- 0x800ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA0_DISALLOWED_PACKET_ERR_SMASK \
- 0x10000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA10_DISALLOWED_PACKET_ERR_SMASK \
- 0x4000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA11_DISALLOWED_PACKET_ERR_SMASK \
- 0x8000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA12_DISALLOWED_PACKET_ERR_SMASK \
- 0x10000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA13_DISALLOWED_PACKET_ERR_SMASK \
- 0x20000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA14_DISALLOWED_PACKET_ERR_SMASK \
- 0x40000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA15_DISALLOWED_PACKET_ERR_SMASK \
- 0x80000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA1_DISALLOWED_PACKET_ERR_SMASK \
- 0x20000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA2_DISALLOWED_PACKET_ERR_SMASK \
- 0x40000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA3_DISALLOWED_PACKET_ERR_SMASK \
- 0x80000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA4_DISALLOWED_PACKET_ERR_SMASK \
- 0x100000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA5_DISALLOWED_PACKET_ERR_SMASK \
- 0x200000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA6_DISALLOWED_PACKET_ERR_SMASK \
- 0x400000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA7_DISALLOWED_PACKET_ERR_SMASK \
- 0x800000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA8_DISALLOWED_PACKET_ERR_SMASK \
- 0x1000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA9_DISALLOWED_PACKET_ERR_SMASK \
- 0x2000000ull
-#define SEND_EGRESS_ERR_STATUS_TX_SDMA_LAUNCH_INTF_PARITY_ERR_SMASK \
- 0x100ull
-#define SEND_EGRESS_SEND_DMA_STATUS (TXE + 0x000000000E00)
-#define SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT 0
-#define SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
- 0x3FFFull
-#define SEND_ERR_CLEAR (TXE + 0x0000000000F0)
-#define SEND_ERR_MASK (TXE + 0x0000000000E8)
-#define SEND_ERR_STATUS (TXE + 0x0000000000E0)
-#define SEND_ERR_STATUS_SEND_CSR_PARITY_ERR_SMASK 0x1ull
-#define SEND_ERR_STATUS_SEND_CSR_READ_BAD_ADDR_ERR_SMASK 0x2ull
-#define SEND_ERR_STATUS_SEND_CSR_WRITE_BAD_ADDR_ERR_SMASK 0x4ull
-#define SEND_HIGH_PRIORITY_LIMIT (TXE + 0x000000000030)
-#define SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK 0x3FFFull
-#define SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT 0
-#define SEND_HIGH_PRIORITY_LIST (TXE + 0x000000000180)
-#define SEND_LEN_CHECK0 (TXE + 0x0000000000D0)
-#define SEND_LEN_CHECK0_LEN_VL0_MASK 0xFFFull
-#define SEND_LEN_CHECK0_LEN_VL1_SHIFT 12
-#define SEND_LEN_CHECK1 (TXE + 0x0000000000D8)
-#define SEND_LEN_CHECK1_LEN_VL15_MASK 0xFFFull
-#define SEND_LEN_CHECK1_LEN_VL15_SHIFT 48
-#define SEND_LEN_CHECK1_LEN_VL4_MASK 0xFFFull
-#define SEND_LEN_CHECK1_LEN_VL5_SHIFT 12
-#define SEND_LOW_PRIORITY_LIST (TXE + 0x000000000100)
-#define SEND_LOW_PRIORITY_LIST_VL_MASK 0x7ull
-#define SEND_LOW_PRIORITY_LIST_VL_SHIFT 16
-#define SEND_LOW_PRIORITY_LIST_WEIGHT_MASK 0xFFull
-#define SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT 0
-#define SEND_PIO_ERR_CLEAR (TXE + 0x000000000050)
-#define SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK 0x20000ull
-#define SEND_PIO_ERR_MASK (TXE + 0x000000000048)
-#define SEND_PIO_ERR_STATUS (TXE + 0x000000000040)
-#define SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
- 0x1000000ull
-#define SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK 0x8000ull
-#define SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK 0x4ull
-#define SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
- 0x100000000ull
-#define SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK 0x100000ull
-#define SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK 0x80000ull
-#define SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK 0x20000ull
-#define SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
- 0x200000000ull
-#define SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK 0x20ull
-#define SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
- 0x400000000ull
-#define SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK 0x40ull
-#define SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK \
- 0x800000000ull
-#define SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK 0x200ull
-#define SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK 0x40000ull
-#define SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK 0x10000000ull
-#define SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK 0x10000ull
-#define SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK 0x20000000ull
-#define SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK 0x8ull
-#define SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK 0x10ull
-#define SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK 0x80ull
-#define SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
- 0x100ull
-#define SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK 0x400ull
-#define SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK 0x400000ull
-#define SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK 0x8000000ull
-#define SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK 0x4000000ull
-#define SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK 0x2000000ull
-#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK 0x2000ull
-#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK 0x800ull
-#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK 0x4000ull
-#define SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK 0x1000ull
-#define SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK 0x2ull
-#define SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK 0x1ull
-#define SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK 0x200000ull
-#define SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK 0x800000ull
-#define SEND_PIO_INIT_CTXT (TXE + 0x000000000038)
-#define SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK 0x1ull
-#define SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK 0xFFull
-#define SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT 8
-#define SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK 0x8ull
-#define SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK 0x4ull
-#define SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK 0x2ull
-#define SEND_PIO_MEM_SIZE (TXE + 0x000000000020)
-#define SEND_SC2VLT0 (TXE + 0x0000000000B0)
-#define SEND_SC2VLT0_SC0_SHIFT 0
-#define SEND_SC2VLT0_SC1_SHIFT 8
-#define SEND_SC2VLT0_SC2_SHIFT 16
-#define SEND_SC2VLT0_SC3_SHIFT 24
-#define SEND_SC2VLT0_SC4_SHIFT 32
-#define SEND_SC2VLT0_SC5_SHIFT 40
-#define SEND_SC2VLT0_SC6_SHIFT 48
-#define SEND_SC2VLT0_SC7_SHIFT 56
-#define SEND_SC2VLT1 (TXE + 0x0000000000B8)
-#define SEND_SC2VLT1_SC10_SHIFT 16
-#define SEND_SC2VLT1_SC11_SHIFT 24
-#define SEND_SC2VLT1_SC12_SHIFT 32
-#define SEND_SC2VLT1_SC13_SHIFT 40
-#define SEND_SC2VLT1_SC14_SHIFT 48
-#define SEND_SC2VLT1_SC15_SHIFT 56
-#define SEND_SC2VLT1_SC8_SHIFT 0
-#define SEND_SC2VLT1_SC9_SHIFT 8
-#define SEND_SC2VLT2 (TXE + 0x0000000000C0)
-#define SEND_SC2VLT2_SC16_SHIFT 0
-#define SEND_SC2VLT2_SC17_SHIFT 8
-#define SEND_SC2VLT2_SC18_SHIFT 16
-#define SEND_SC2VLT2_SC19_SHIFT 24
-#define SEND_SC2VLT2_SC20_SHIFT 32
-#define SEND_SC2VLT2_SC21_SHIFT 40
-#define SEND_SC2VLT2_SC22_SHIFT 48
-#define SEND_SC2VLT2_SC23_SHIFT 56
-#define SEND_SC2VLT3 (TXE + 0x0000000000C8)
-#define SEND_SC2VLT3_SC24_SHIFT 0
-#define SEND_SC2VLT3_SC25_SHIFT 8
-#define SEND_SC2VLT3_SC26_SHIFT 16
-#define SEND_SC2VLT3_SC27_SHIFT 24
-#define SEND_SC2VLT3_SC28_SHIFT 32
-#define SEND_SC2VLT3_SC29_SHIFT 40
-#define SEND_SC2VLT3_SC30_SHIFT 48
-#define SEND_SC2VLT3_SC31_SHIFT 56
-#define SEND_STATIC_RATE_CONTROL (TXE + 0x0000000000A8)
-#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT 0
-#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK 0xFFFFull
-#define PCIE_CFG_REG_PL2 (PCIE + 0x000000000708)
-#define PCIE_CFG_REG_PL3 (PCIE + 0x00000000070C)
-#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT 27
-#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK 0x38000000
-#define PCIE_CFG_REG_PL102 (PCIE + 0x000000000898)
-#define PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT 12
-#define PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT 6
-#define PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT 0
-#define PCIE_CFG_REG_PL103 (PCIE + 0x00000000089C)
-#define PCIE_CFG_REG_PL105 (PCIE + 0x0000000008A4)
-#define PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK 0x1ull
-#define PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT 24
-#define PCIE_CFG_REG_PL100 (PCIE + 0x000000000890)
-#define PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK 0x400ull
-#define PCIE_CFG_REG_PL101 (PCIE + 0x000000000894)
-#define PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT 6
-#define PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT 0
-#define PCIE_CFG_REG_PL106 (PCIE + 0x0000000008A8)
-#define PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT 8
-#define PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK 0x20ull
-#define PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK 0x10ull
-#define CCE_INT_BLOCKED (CCE + 0x000000110C00)
-#define SEND_DMA_IDLE_CNT (TXE + 0x000000200040)
-#define SEND_DMA_DESC_FETCHED_CNT (TXE + 0x000000200058)
-#define CCE_MSIX_PBA_OFFSET 0X0110000
-
-#endif /* DEF_CHIP_REG */
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h
deleted file mode 100644
index e9b6bb322..000000000
--- a/drivers/staging/rdma/hfi1/common.h
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef _COMMON_H
-#define _COMMON_H
-
-#include <rdma/hfi/hfi1_user.h>
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * HFI1_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fast path code
- * HFI1_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in fast path code
- * _HFI1_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- */
-
-/*
- * If a packet's QP[23:16] bits match this value, then it is
- * a PSM packet and the hardware will expect a KDETH header
- * following the BTH.
- */
-#define DEFAULT_KDETH_QP 0x80
-
-/* driver/hw feature set bitmask */
-#define HFI1_CAP_USER_SHIFT 24
-#define HFI1_CAP_MASK ((1UL << HFI1_CAP_USER_SHIFT) - 1)
-/* locked flag - if set, only HFI1_CAP_WRITABLE_MASK bits can be set */
-#define HFI1_CAP_LOCKED_SHIFT 63
-#define HFI1_CAP_LOCKED_MASK 0x1ULL
-#define HFI1_CAP_LOCKED_SMASK (HFI1_CAP_LOCKED_MASK << HFI1_CAP_LOCKED_SHIFT)
-/* extra bits used between kernel and user processes */
-#define HFI1_CAP_MISC_SHIFT (HFI1_CAP_USER_SHIFT * 2)
-#define HFI1_CAP_MISC_MASK ((1ULL << (HFI1_CAP_LOCKED_SHIFT - \
- HFI1_CAP_MISC_SHIFT)) - 1)
-
-#define HFI1_CAP_KSET(cap) ({ hfi1_cap_mask |= HFI1_CAP_##cap; hfi1_cap_mask; })
-#define HFI1_CAP_KCLEAR(cap) \
- ({ \
- hfi1_cap_mask &= ~HFI1_CAP_##cap; \
- hfi1_cap_mask; \
- })
-#define HFI1_CAP_USET(cap) \
- ({ \
- hfi1_cap_mask |= (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
- hfi1_cap_mask; \
- })
-#define HFI1_CAP_UCLEAR(cap) \
- ({ \
- hfi1_cap_mask &= ~(HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT); \
- hfi1_cap_mask; \
- })
-#define HFI1_CAP_SET(cap) \
- ({ \
- hfi1_cap_mask |= (HFI1_CAP_##cap | (HFI1_CAP_##cap << \
- HFI1_CAP_USER_SHIFT)); \
- hfi1_cap_mask; \
- })
-#define HFI1_CAP_CLEAR(cap) \
- ({ \
- hfi1_cap_mask &= ~(HFI1_CAP_##cap | \
- (HFI1_CAP_##cap << HFI1_CAP_USER_SHIFT)); \
- hfi1_cap_mask; \
- })
-#define HFI1_CAP_LOCK() \
- ({ hfi1_cap_mask |= HFI1_CAP_LOCKED_SMASK; hfi1_cap_mask; })
-#define HFI1_CAP_LOCKED() (!!(hfi1_cap_mask & HFI1_CAP_LOCKED_SMASK))
-/*
- * The set of capability bits that can be changed after initial load
- * This set is the same for kernel and user contexts. However, for
- * user contexts, the set can be further filtered by using the
- * HFI1_CAP_RESERVED_MASK bits.
- */
-#define HFI1_CAP_WRITABLE_MASK (HFI1_CAP_SDMA_AHG | \
- HFI1_CAP_HDRSUPP | \
- HFI1_CAP_MULTI_PKT_EGR | \
- HFI1_CAP_NODROP_RHQ_FULL | \
- HFI1_CAP_NODROP_EGR_FULL | \
- HFI1_CAP_ALLOW_PERM_JKEY | \
- HFI1_CAP_STATIC_RATE_CTRL | \
- HFI1_CAP_PRINT_UNIMPL | \
- HFI1_CAP_TID_UNMAP)
-/*
- * A set of capability bits that are "global" and are not allowed to be
- * set in the user bitmask.
- */
-#define HFI1_CAP_RESERVED_MASK ((HFI1_CAP_SDMA | \
- HFI1_CAP_USE_SDMA_HEAD | \
- HFI1_CAP_EXTENDED_PSN | \
- HFI1_CAP_PRINT_UNIMPL | \
- HFI1_CAP_NO_INTEGRITY | \
- HFI1_CAP_PKEY_CHECK) << \
- HFI1_CAP_USER_SHIFT)
-/*
- * Set of capabilities that need to be enabled for kernel context in
- * order to be allowed for user contexts, as well.
- */
-#define HFI1_CAP_MUST_HAVE_KERN (HFI1_CAP_STATIC_RATE_CTRL)
-/* Default enabled capabilities (both kernel and user) */
-#define HFI1_CAP_MASK_DEFAULT (HFI1_CAP_HDRSUPP | \
- HFI1_CAP_NODROP_RHQ_FULL | \
- HFI1_CAP_NODROP_EGR_FULL | \
- HFI1_CAP_SDMA | \
- HFI1_CAP_PRINT_UNIMPL | \
- HFI1_CAP_STATIC_RATE_CTRL | \
- HFI1_CAP_PKEY_CHECK | \
- HFI1_CAP_MULTI_PKT_EGR | \
- HFI1_CAP_EXTENDED_PSN | \
- ((HFI1_CAP_HDRSUPP | \
- HFI1_CAP_MULTI_PKT_EGR | \
- HFI1_CAP_STATIC_RATE_CTRL | \
- HFI1_CAP_PKEY_CHECK | \
- HFI1_CAP_EARLY_CREDIT_RETURN) << \
- HFI1_CAP_USER_SHIFT))
-/*
- * A bitmask of kernel/global capabilities that should be communicated
- * to user level processes.
- */
-#define HFI1_CAP_K2U (HFI1_CAP_SDMA | \
- HFI1_CAP_EXTENDED_PSN | \
- HFI1_CAP_PKEY_CHECK | \
- HFI1_CAP_NO_INTEGRITY)
-
-#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR)
-
-#ifndef HFI1_KERN_TYPE
-#define HFI1_KERN_TYPE 0
-#endif
-
-/*
- * Similarly, this is the kernel version going back to the user. It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a Intel release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-Intel and 1 for Intel-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of hfi1_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define HFI1_KERN_SWVERSION ((HFI1_KERN_TYPE << 31) | HFI1_USER_SWVERSION)
-
-/*
- * Define the driver version number. This is something that refers only
- * to the driver itself, not the software interfaces it supports.
- */
-#ifndef HFI1_DRIVER_VERSION_BASE
-#define HFI1_DRIVER_VERSION_BASE "0.9-294"
-#endif
-
-/* create the final driver version string */
-#ifdef HFI1_IDSTR
-#define HFI1_DRIVER_VERSION HFI1_DRIVER_VERSION_BASE " " HFI1_IDSTR
-#else
-#define HFI1_DRIVER_VERSION HFI1_DRIVER_VERSION_BASE
-#endif
-
-/*
- * Diagnostics can send a packet by writing the following
- * struct to the diag packet special file.
- *
- * This allows a custom PBC qword, so that special modes and deliberate
- * changes to CRCs can be used.
- */
-#define _DIAG_PKT_VERS 1
-struct diag_pkt {
- __u16 version; /* structure version */
- __u16 unit; /* which device */
- __u16 sw_index; /* send sw index to use */
- __u16 len; /* data length, in bytes */
- __u16 port; /* port number */
- __u16 unused;
- __u32 flags; /* call flags */
- __u64 data; /* user data pointer */
- __u64 pbc; /* PBC for the packet */
-};
-
-/* diag_pkt flags */
-#define F_DIAGPKT_WAIT 0x1 /* wait until packet is sent */
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software.
- */
-
-/*
- * Receive Header Flags
- */
-#define RHF_PKT_LEN_SHIFT 0
-#define RHF_PKT_LEN_MASK 0xfffull
-#define RHF_PKT_LEN_SMASK (RHF_PKT_LEN_MASK << RHF_PKT_LEN_SHIFT)
-
-#define RHF_RCV_TYPE_SHIFT 12
-#define RHF_RCV_TYPE_MASK 0x7ull
-#define RHF_RCV_TYPE_SMASK (RHF_RCV_TYPE_MASK << RHF_RCV_TYPE_SHIFT)
-
-#define RHF_USE_EGR_BFR_SHIFT 15
-#define RHF_USE_EGR_BFR_MASK 0x1ull
-#define RHF_USE_EGR_BFR_SMASK (RHF_USE_EGR_BFR_MASK << RHF_USE_EGR_BFR_SHIFT)
-
-#define RHF_EGR_INDEX_SHIFT 16
-#define RHF_EGR_INDEX_MASK 0x7ffull
-#define RHF_EGR_INDEX_SMASK (RHF_EGR_INDEX_MASK << RHF_EGR_INDEX_SHIFT)
-
-#define RHF_DC_INFO_SHIFT 27
-#define RHF_DC_INFO_MASK 0x1ull
-#define RHF_DC_INFO_SMASK (RHF_DC_INFO_MASK << RHF_DC_INFO_SHIFT)
-
-#define RHF_RCV_SEQ_SHIFT 28
-#define RHF_RCV_SEQ_MASK 0xfull
-#define RHF_RCV_SEQ_SMASK (RHF_RCV_SEQ_MASK << RHF_RCV_SEQ_SHIFT)
-
-#define RHF_EGR_OFFSET_SHIFT 32
-#define RHF_EGR_OFFSET_MASK 0xfffull
-#define RHF_EGR_OFFSET_SMASK (RHF_EGR_OFFSET_MASK << RHF_EGR_OFFSET_SHIFT)
-#define RHF_HDRQ_OFFSET_SHIFT 44
-#define RHF_HDRQ_OFFSET_MASK 0x1ffull
-#define RHF_HDRQ_OFFSET_SMASK (RHF_HDRQ_OFFSET_MASK << RHF_HDRQ_OFFSET_SHIFT)
-#define RHF_K_HDR_LEN_ERR (0x1ull << 53)
-#define RHF_DC_UNC_ERR (0x1ull << 54)
-#define RHF_DC_ERR (0x1ull << 55)
-#define RHF_RCV_TYPE_ERR_SHIFT 56
-#define RHF_RCV_TYPE_ERR_MASK 0x7ul
-#define RHF_RCV_TYPE_ERR_SMASK (RHF_RCV_TYPE_ERR_MASK << RHF_RCV_TYPE_ERR_SHIFT)
-#define RHF_TID_ERR (0x1ull << 59)
-#define RHF_LEN_ERR (0x1ull << 60)
-#define RHF_ECC_ERR (0x1ull << 61)
-#define RHF_VCRC_ERR (0x1ull << 62)
-#define RHF_ICRC_ERR (0x1ull << 63)
-
-#define RHF_ERROR_SMASK 0xffe0000000000000ull /* bits 63:53 */
-
-/* RHF receive types */
-#define RHF_RCV_TYPE_EXPECTED 0
-#define RHF_RCV_TYPE_EAGER 1
-#define RHF_RCV_TYPE_IB 2 /* normal IB, IB Raw, or IPv6 */
-#define RHF_RCV_TYPE_ERROR 3
-#define RHF_RCV_TYPE_BYPASS 4
-#define RHF_RCV_TYPE_INVALID5 5
-#define RHF_RCV_TYPE_INVALID6 6
-#define RHF_RCV_TYPE_INVALID7 7
-
-/* RHF receive type error - expected packet errors */
-#define RHF_RTE_EXPECTED_FLOW_SEQ_ERR 0x2
-#define RHF_RTE_EXPECTED_FLOW_GEN_ERR 0x4
-
-/* RHF receive type error - eager packet errors */
-#define RHF_RTE_EAGER_NO_ERR 0x0
-
-/* RHF receive type error - IB packet errors */
-#define RHF_RTE_IB_NO_ERR 0x0
-
-/* RHF receive type error - error packet errors */
-#define RHF_RTE_ERROR_NO_ERR 0x0
-#define RHF_RTE_ERROR_OP_CODE_ERR 0x1
-#define RHF_RTE_ERROR_KHDR_MIN_LEN_ERR 0x2
-#define RHF_RTE_ERROR_KHDR_HCRC_ERR 0x3
-#define RHF_RTE_ERROR_KHDR_KVER_ERR 0x4
-#define RHF_RTE_ERROR_CONTEXT_ERR 0x5
-#define RHF_RTE_ERROR_KHDR_TID_ERR 0x6
-
-/* RHF receive type error - bypass packet errors */
-#define RHF_RTE_BYPASS_NO_ERR 0x0
-
-/*
- * This structure contains the first field common to all protocols
- * that employ this chip.
- */
-struct hfi1_message_header {
- __be16 lrh[4];
-};
-
-/* IB - LRH header constants */
-#define HFI1_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
-#define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define LIM_MGMT_P_KEY 0x7FFF
-#define FULL_MGMT_P_KEY 0xFFFF
-
-#define DEFAULT_P_KEY LIM_MGMT_P_KEY
-#define HFI1_AETH_CREDIT_SHIFT 24
-#define HFI1_AETH_CREDIT_MASK 0x1F
-#define HFI1_AETH_CREDIT_INVAL 0x1F
-#define HFI1_MSN_MASK 0xFFFFFF
-#define HFI1_FECN_SHIFT 31
-#define HFI1_FECN_MASK 1
-#define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT)
-#define HFI1_BECN_SHIFT 30
-#define HFI1_BECN_MASK 1
-#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT)
-
-static inline __u64 rhf_to_cpu(const __le32 *rbuf)
-{
- return __le64_to_cpu(*((__le64 *)rbuf));
-}
-
-static inline u64 rhf_err_flags(u64 rhf)
-{
- return rhf & RHF_ERROR_SMASK;
-}
-
-static inline u32 rhf_rcv_type(u64 rhf)
-{
- return (rhf >> RHF_RCV_TYPE_SHIFT) & RHF_RCV_TYPE_MASK;
-}
-
-static inline u32 rhf_rcv_type_err(u64 rhf)
-{
- return (rhf >> RHF_RCV_TYPE_ERR_SHIFT) & RHF_RCV_TYPE_ERR_MASK;
-}
-
-/* return size is in bytes, not DWORDs */
-static inline u32 rhf_pkt_len(u64 rhf)
-{
- return ((rhf & RHF_PKT_LEN_SMASK) >> RHF_PKT_LEN_SHIFT) << 2;
-}
-
-static inline u32 rhf_egr_index(u64 rhf)
-{
- return (rhf >> RHF_EGR_INDEX_SHIFT) & RHF_EGR_INDEX_MASK;
-}
-
-static inline u32 rhf_rcv_seq(u64 rhf)
-{
- return (rhf >> RHF_RCV_SEQ_SHIFT) & RHF_RCV_SEQ_MASK;
-}
-
-/* returned offset is in DWORDS */
-static inline u32 rhf_hdrq_offset(u64 rhf)
-{
- return (rhf >> RHF_HDRQ_OFFSET_SHIFT) & RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline u64 rhf_use_egr_bfr(u64 rhf)
-{
- return rhf & RHF_USE_EGR_BFR_SMASK;
-}
-
-static inline u64 rhf_dc_info(u64 rhf)
-{
- return rhf & RHF_DC_INFO_SMASK;
-}
-
-static inline u32 rhf_egr_buf_offset(u64 rhf)
-{
- return (rhf >> RHF_EGR_OFFSET_SHIFT) & RHF_EGR_OFFSET_MASK;
-}
-#endif /* _COMMON_H */
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c
deleted file mode 100644
index dbab9d9cc..000000000
--- a/drivers/staging/rdma/hfi1/debugfs.c
+++ /dev/null
@@ -1,1145 +0,0 @@
-#ifdef CONFIG_DEBUG_FS
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/module.h>
-
-#include "hfi.h"
-#include "debugfs.h"
-#include "device.h"
-#include "qp.h"
-#include "sdma.h"
-
-static struct dentry *hfi1_dbg_root;
-
-#define private2dd(file) (file_inode(file)->i_private)
-#define private2ppd(file) (file_inode(file)->i_private)
-
-#define DEBUGFS_SEQ_FILE_OPS(name) \
-static const struct seq_operations _##name##_seq_ops = { \
- .start = _##name##_seq_start, \
- .next = _##name##_seq_next, \
- .stop = _##name##_seq_stop, \
- .show = _##name##_seq_show \
-}
-
-#define DEBUGFS_SEQ_FILE_OPEN(name) \
-static int _##name##_open(struct inode *inode, struct file *s) \
-{ \
- struct seq_file *seq; \
- int ret; \
- ret = seq_open(s, &_##name##_seq_ops); \
- if (ret) \
- return ret; \
- seq = s->private_data; \
- seq->private = inode->i_private; \
- return 0; \
-}
-
-#define DEBUGFS_FILE_OPS(name) \
-static const struct file_operations _##name##_file_ops = { \
- .owner = THIS_MODULE, \
- .open = _##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = seq_release \
-}
-
-#define DEBUGFS_FILE_CREATE(name, parent, data, ops, mode) \
-do { \
- struct dentry *ent; \
- ent = debugfs_create_file(name, mode, parent, \
- data, ops); \
- if (!ent) \
- pr_warn("create of %s failed\n", name); \
-} while (0)
-
-#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
- DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
-
-static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
-{
- struct hfi1_opcode_stats_perctx *opstats;
-
- rcu_read_lock();
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct hfi1_opcode_stats_perctx *opstats;
-
- ++*pos;
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int _opcode_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
- loff_t i = *spos, j;
- u64 n_packets = 0, n_bytes = 0;
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
-
- for (j = 0; j < dd->first_user_ctxt; j++) {
- if (!dd->rcd[j])
- continue;
- n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
- n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
- }
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu\n", i,
- (unsigned long long)n_packets,
- (unsigned long long)n_bytes);
-
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(opcode_stats);
-DEBUGFS_SEQ_FILE_OPEN(opcode_stats)
-DEBUGFS_FILE_OPS(opcode_stats);
-
-static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
-
- if (!*pos)
- return SEQ_START_TOKEN;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN)
- return pos;
-
- ++*pos;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void _ctx_stats_seq_stop(struct seq_file *s, void *v)
-{
- /* nothing allocated */
-}
-
-static int _ctx_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos;
- loff_t i, j;
- u64 n_packets = 0;
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN) {
- seq_puts(s, "Ctx:npkts\n");
- return 0;
- }
-
- spos = v;
- i = *spos;
-
- if (!dd->rcd[i])
- return SEQ_SKIP;
-
- for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
- n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
-
- if (!n_packets)
- return SEQ_SKIP;
-
- seq_printf(s, " %llu:%llu\n", i, n_packets);
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(ctx_stats);
-DEBUGFS_SEQ_FILE_OPEN(ctx_stats)
-DEBUGFS_FILE_OPS(ctx_stats);
-
-static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
-{
- struct qp_iter *iter;
- loff_t n = *pos;
-
- rcu_read_lock();
- iter = qp_iter_init(s->private);
- if (!iter)
- return NULL;
-
- while (n--) {
- if (qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
- }
-
- return iter;
-}
-
-static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
- loff_t *pos)
-{
- struct qp_iter *iter = iter_ptr;
-
- (*pos)++;
-
- if (qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
-
- return iter;
-}
-
-static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
-__releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
-{
- struct qp_iter *iter = iter_ptr;
-
- if (!iter)
- return 0;
-
- qp_iter_print(s, iter);
-
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(qp_stats);
-DEBUGFS_SEQ_FILE_OPEN(qp_stats)
-DEBUGFS_FILE_OPS(qp_stats);
-
-static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
-{
- struct hfi1_ibdev *ibd;
- struct hfi1_devdata *dd;
-
- rcu_read_lock();
- ibd = (struct hfi1_ibdev *)s->private;
- dd = dd_from_dev(ibd);
- if (!dd->per_sdma || *pos >= dd->num_sdma)
- return NULL;
- return pos;
-}
-
-static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
-
- ++*pos;
- if (!dd->per_sdma || *pos >= dd->num_sdma)
- return NULL;
- return pos;
-}
-
-static void _sdes_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int _sdes_seq_show(struct seq_file *s, void *v)
-{
- struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
- struct hfi1_devdata *dd = dd_from_dev(ibd);
- loff_t *spos = v;
- loff_t i = *spos;
-
- sdma_seqfile_dump_sde(s, &dd->per_sdma[i]);
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(sdes);
-DEBUGFS_SEQ_FILE_OPEN(sdes)
-DEBUGFS_FILE_OPS(sdes);
-
-/* read the per-device counters */
-static ssize_t dev_counters_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct hfi1_devdata *dd;
- ssize_t rval;
-
- rcu_read_lock();
- dd = private2dd(file);
- avail = hfi1_read_cntrs(dd, NULL, &counters);
- rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
- rcu_read_unlock();
- return rval;
-}
-
-/* read the per-device counters */
-static ssize_t dev_names_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct hfi1_devdata *dd;
- ssize_t rval;
-
- rcu_read_lock();
- dd = private2dd(file);
- avail = hfi1_read_cntrs(dd, &names, NULL);
- rval = simple_read_from_buffer(buf, count, ppos, names, avail);
- rcu_read_unlock();
- return rval;
-}
-
-struct counter_info {
- char *name;
- const struct file_operations ops;
-};
-
-/*
- * Could use file_inode(file)->i_ino to figure out which file,
- * instead of separate routine for each, but for now, this works...
- */
-
-/* read the per-port names (same for each port) */
-static ssize_t portnames_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct hfi1_devdata *dd;
- ssize_t rval;
-
- rcu_read_lock();
- dd = private2dd(file);
- avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
- rval = simple_read_from_buffer(buf, count, ppos, names, avail);
- rcu_read_unlock();
- return rval;
-}
-
-/* read the per-port counters */
-static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct hfi1_pportdata *ppd;
- ssize_t rval;
-
- rcu_read_lock();
- ppd = private2ppd(file);
- avail = hfi1_read_portcntrs(ppd, NULL, &counters);
- rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
- rcu_read_unlock();
- return rval;
-}
-
-static void check_dyn_flag(u64 scratch0, char *p, int size, int *used,
- int this_hfi, int hfi, u32 flag, const char *what)
-{
- u32 mask;
-
- mask = flag << (hfi ? CR_DYN_SHIFT : 0);
- if (scratch0 & mask) {
- *used += scnprintf(p + *used, size - *used,
- " 0x%08x - HFI%d %s in use, %s device\n",
- mask, hfi, what,
- this_hfi == hfi ? "this" : "other");
- }
-}
-
-static ssize_t asic_flags_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct hfi1_pportdata *ppd;
- struct hfi1_devdata *dd;
- u64 scratch0;
- char *tmp;
- int ret = 0;
- int size;
- int used;
- int i;
-
- rcu_read_lock();
- ppd = private2ppd(file);
- dd = ppd->dd;
- size = PAGE_SIZE;
- used = 0;
- tmp = kmalloc(size, GFP_KERNEL);
- if (!tmp) {
- rcu_read_unlock();
- return -ENOMEM;
- }
-
- scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
- used += scnprintf(tmp + used, size - used,
- "Resource flags: 0x%016llx\n", scratch0);
-
- /* check permanent flag */
- if (scratch0 & CR_THERM_INIT) {
- used += scnprintf(tmp + used, size - used,
- " 0x%08x - thermal monitoring initialized\n",
- (u32)CR_THERM_INIT);
- }
-
- /* check each dynamic flag on each HFI */
- for (i = 0; i < 2; i++) {
- check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
- CR_SBUS, "SBus");
- check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
- CR_EPROM, "EPROM");
- check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
- CR_I2C1, "i2c chain 1");
- check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
- CR_I2C2, "i2c chain 2");
- }
- used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
-
- ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
- rcu_read_unlock();
- kfree(tmp);
- return ret;
-}
-
-static ssize_t asic_flags_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct hfi1_pportdata *ppd;
- struct hfi1_devdata *dd;
- char *buff;
- int ret;
- unsigned long long value;
- u64 scratch0;
- u64 clear;
-
- rcu_read_lock();
- ppd = private2ppd(file);
- dd = ppd->dd;
-
- buff = kmalloc(count + 1, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto do_return;
- }
-
- ret = copy_from_user(buff, buf, count);
- if (ret > 0) {
- ret = -EFAULT;
- goto do_free;
- }
-
- /* zero terminate and read the expected integer */
- buff[count] = 0;
- ret = kstrtoull(buff, 0, &value);
- if (ret)
- goto do_free;
- clear = value;
-
- /* obtain exclusive access */
- mutex_lock(&dd->asic_data->asic_resource_mutex);
- acquire_hw_mutex(dd);
-
- scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
- scratch0 &= ~clear;
- write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
- /* force write to be visible to other HFI on another OS */
- (void)read_csr(dd, ASIC_CFG_SCRATCH);
-
- release_hw_mutex(dd);
- mutex_unlock(&dd->asic_data->asic_resource_mutex);
-
- /* return the number of bytes written */
- ret = count;
-
- do_free:
- kfree(buff);
- do_return:
- rcu_read_unlock();
- return ret;
-}
-
-/*
- * read the per-port QSFP data for ppd
- */
-static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct hfi1_pportdata *ppd;
- char *tmp;
- int ret;
-
- rcu_read_lock();
- ppd = private2ppd(file);
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp) {
- rcu_read_unlock();
- return -ENOMEM;
- }
-
- ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- rcu_read_unlock();
- kfree(tmp);
- return ret;
-}
-
-/* Do an i2c write operation on the chain for the given HFI. */
-static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos, u32 target)
-{
- struct hfi1_pportdata *ppd;
- char *buff;
- int ret;
- int i2c_addr;
- int offset;
- int total_written;
-
- rcu_read_lock();
- ppd = private2ppd(file);
-
- /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
- i2c_addr = (*ppos >> 16) & 0xffff;
- offset = *ppos & 0xffff;
-
- /* explicitly reject invalid address 0 to catch cp and cat */
- if (i2c_addr == 0) {
- ret = -EINVAL;
- goto _return;
- }
-
- buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
-
- ret = copy_from_user(buff, buf, count);
- if (ret > 0) {
- ret = -EFAULT;
- goto _free;
- }
-
- total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count);
- if (total_written < 0) {
- ret = total_written;
- goto _free;
- }
-
- *ppos += total_written;
-
- ret = total_written;
-
- _free:
- kfree(buff);
- _return:
- rcu_read_unlock();
- return ret;
-}
-
-/* Do an i2c write operation on chain for HFI 0. */
-static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __i2c_debugfs_write(file, buf, count, ppos, 0);
-}
-
-/* Do an i2c write operation on chain for HFI 1. */
-static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __i2c_debugfs_write(file, buf, count, ppos, 1);
-}
-
-/* Do an i2c read operation on the chain for the given HFI. */
-static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos, u32 target)
-{
- struct hfi1_pportdata *ppd;
- char *buff;
- int ret;
- int i2c_addr;
- int offset;
- int total_read;
-
- rcu_read_lock();
- ppd = private2ppd(file);
-
- /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
- i2c_addr = (*ppos >> 16) & 0xffff;
- offset = *ppos & 0xffff;
-
- /* explicitly reject invalid address 0 to catch cp and cat */
- if (i2c_addr == 0) {
- ret = -EINVAL;
- goto _return;
- }
-
- buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
-
- total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
- if (total_read < 0) {
- ret = total_read;
- goto _free;
- }
-
- *ppos += total_read;
-
- ret = copy_to_user(buf, buff, total_read);
- if (ret > 0) {
- ret = -EFAULT;
- goto _free;
- }
-
- ret = total_read;
-
- _free:
- kfree(buff);
- _return:
- rcu_read_unlock();
- return ret;
-}
-
-/* Do an i2c read operation on chain for HFI 0. */
-static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __i2c_debugfs_read(file, buf, count, ppos, 0);
-}
-
-/* Do an i2c read operation on chain for HFI 1. */
-static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __i2c_debugfs_read(file, buf, count, ppos, 1);
-}
-
-/* Do a QSFP write operation on the i2c chain for the given HFI. */
-static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos, u32 target)
-{
- struct hfi1_pportdata *ppd;
- char *buff;
- int ret;
- int total_written;
-
- rcu_read_lock();
- if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
- ret = -EINVAL;
- goto _return;
- }
-
- ppd = private2ppd(file);
-
- buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
-
- ret = copy_from_user(buff, buf, count);
- if (ret > 0) {
- ret = -EFAULT;
- goto _free;
- }
-
- total_written = qsfp_write(ppd, target, *ppos, buff, count);
- if (total_written < 0) {
- ret = total_written;
- goto _free;
- }
-
- *ppos += total_written;
-
- ret = total_written;
-
- _free:
- kfree(buff);
- _return:
- rcu_read_unlock();
- return ret;
-}
-
-/* Do a QSFP write operation on i2c chain for HFI 0. */
-static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __qsfp_debugfs_write(file, buf, count, ppos, 0);
-}
-
-/* Do a QSFP write operation on i2c chain for HFI 1. */
-static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __qsfp_debugfs_write(file, buf, count, ppos, 1);
-}
-
-/* Do a QSFP read operation on the i2c chain for the given HFI. */
-static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos, u32 target)
-{
- struct hfi1_pportdata *ppd;
- char *buff;
- int ret;
- int total_read;
-
- rcu_read_lock();
- if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
- ret = -EINVAL;
- goto _return;
- }
-
- ppd = private2ppd(file);
-
- buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
-
- total_read = qsfp_read(ppd, target, *ppos, buff, count);
- if (total_read < 0) {
- ret = total_read;
- goto _free;
- }
-
- *ppos += total_read;
-
- ret = copy_to_user(buf, buff, total_read);
- if (ret > 0) {
- ret = -EFAULT;
- goto _free;
- }
-
- ret = total_read;
-
- _free:
- kfree(buff);
- _return:
- rcu_read_unlock();
- return ret;
-}
-
-/* Do a QSFP read operation on i2c chain for HFI 0. */
-static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __qsfp_debugfs_read(file, buf, count, ppos, 0);
-}
-
-/* Do a QSFP read operation on i2c chain for HFI 1. */
-static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return __qsfp_debugfs_read(file, buf, count, ppos, 1);
-}
-
-static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
-{
- struct hfi1_pportdata *ppd;
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- ppd = private2ppd(fp);
-
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
-}
-
-static int i2c1_debugfs_open(struct inode *in, struct file *fp)
-{
- return __i2c_debugfs_open(in, fp, 0);
-}
-
-static int i2c2_debugfs_open(struct inode *in, struct file *fp)
-{
- return __i2c_debugfs_open(in, fp, 1);
-}
-
-static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
-{
- struct hfi1_pportdata *ppd;
-
- ppd = private2ppd(fp);
-
- release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
-
- return 0;
-}
-
-static int i2c1_debugfs_release(struct inode *in, struct file *fp)
-{
- return __i2c_debugfs_release(in, fp, 0);
-}
-
-static int i2c2_debugfs_release(struct inode *in, struct file *fp)
-{
- return __i2c_debugfs_release(in, fp, 1);
-}
-
-static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
-{
- struct hfi1_pportdata *ppd;
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- ppd = private2ppd(fp);
-
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
-}
-
-static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
-{
- return __qsfp_debugfs_open(in, fp, 0);
-}
-
-static int qsfp2_debugfs_open(struct inode *in, struct file *fp)
-{
- return __qsfp_debugfs_open(in, fp, 1);
-}
-
-static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
-{
- struct hfi1_pportdata *ppd;
-
- ppd = private2ppd(fp);
-
- release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
-
- return 0;
-}
-
-static int qsfp1_debugfs_release(struct inode *in, struct file *fp)
-{
- return __qsfp_debugfs_release(in, fp, 0);
-}
-
-static int qsfp2_debugfs_release(struct inode *in, struct file *fp)
-{
- return __qsfp_debugfs_release(in, fp, 1);
-}
-
-#define DEBUGFS_OPS(nm, readroutine, writeroutine) \
-{ \
- .name = nm, \
- .ops = { \
- .read = readroutine, \
- .write = writeroutine, \
- .llseek = generic_file_llseek, \
- }, \
-}
-
-#define DEBUGFS_XOPS(nm, readf, writef, openf, releasef) \
-{ \
- .name = nm, \
- .ops = { \
- .read = readf, \
- .write = writef, \
- .llseek = generic_file_llseek, \
- .open = openf, \
- .release = releasef \
- }, \
-}
-
-static const struct counter_info cntr_ops[] = {
- DEBUGFS_OPS("counter_names", dev_names_read, NULL),
- DEBUGFS_OPS("counters", dev_counters_read, NULL),
- DEBUGFS_OPS("portcounter_names", portnames_read, NULL),
-};
-
-static const struct counter_info port_cntr_ops[] = {
- DEBUGFS_OPS("port%dcounters", portcntrs_debugfs_read, NULL),
- DEBUGFS_XOPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write,
- i2c1_debugfs_open, i2c1_debugfs_release),
- DEBUGFS_XOPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write,
- i2c2_debugfs_open, i2c2_debugfs_release),
- DEBUGFS_OPS("qsfp_dump%d", qsfp_debugfs_dump, NULL),
- DEBUGFS_XOPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write,
- qsfp1_debugfs_open, qsfp1_debugfs_release),
- DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
- qsfp2_debugfs_open, qsfp2_debugfs_release),
- DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
-};
-
-void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
-{
- char name[sizeof("port0counters") + 1];
- char link[10];
- struct hfi1_devdata *dd = dd_from_dev(ibd);
- struct hfi1_pportdata *ppd;
- int unit = dd->unit;
- int i, j;
-
- if (!hfi1_dbg_root)
- return;
- snprintf(name, sizeof(name), "%s_%d", class_name(), unit);
- snprintf(link, sizeof(link), "%d", unit);
- ibd->hfi1_ibdev_dbg = debugfs_create_dir(name, hfi1_dbg_root);
- if (!ibd->hfi1_ibdev_dbg) {
- pr_warn("create of %s failed\n", name);
- return;
- }
- ibd->hfi1_ibdev_link =
- debugfs_create_symlink(link, hfi1_dbg_root, name);
- if (!ibd->hfi1_ibdev_link) {
- pr_warn("create of %s symlink failed\n", name);
- return;
- }
- DEBUGFS_SEQ_FILE_CREATE(opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
- DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
- /* dev counter files */
- for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
- DEBUGFS_FILE_CREATE(cntr_ops[i].name,
- ibd->hfi1_ibdev_dbg,
- dd,
- &cntr_ops[i].ops, S_IRUGO);
- /* per port files */
- for (ppd = dd->pport, j = 0; j < dd->num_pports; j++, ppd++)
- for (i = 0; i < ARRAY_SIZE(port_cntr_ops); i++) {
- snprintf(name,
- sizeof(name),
- port_cntr_ops[i].name,
- j + 1);
- DEBUGFS_FILE_CREATE(name,
- ibd->hfi1_ibdev_dbg,
- ppd,
- &port_cntr_ops[i].ops,
- !port_cntr_ops[i].ops.write ?
- S_IRUGO : S_IRUGO | S_IWUSR);
- }
-}
-
-void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
-{
- if (!hfi1_dbg_root)
- goto out;
- debugfs_remove(ibd->hfi1_ibdev_link);
- debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
-out:
- ibd->hfi1_ibdev_dbg = NULL;
- synchronize_rcu();
-}
-
-/*
- * driver stats field names, one line per stat, single string. Used by
- * programs like hfistats to print the stats in a way which works for
- * different versions of drivers, without changing program source.
- * if hfi1_ib_stats changes, this needs to change. Names need to be
- * 12 chars or less (w/o newline), for proper display by hfistats utility.
- */
-static const char * const hfi1_statnames[] = {
- /* must be element 0*/
- "KernIntr",
- "ErrorIntr",
- "Tx_Errs",
- "Rcv_Errs",
- "H/W_Errs",
- "NoPIOBufs",
- "CtxtsOpen",
- "RcvLen_Errs",
- "EgrBufFull",
- "EgrHdrFull"
-};
-
-static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
-{
- rcu_read_lock();
- if (*pos >= ARRAY_SIZE(hfi1_statnames))
- return NULL;
- return pos;
-}
-
-static void *_driver_stats_names_seq_next(
- struct seq_file *s,
- void *v,
- loff_t *pos)
-{
- ++*pos;
- if (*pos >= ARRAY_SIZE(hfi1_statnames))
- return NULL;
- return pos;
-}
-
-static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
-
- seq_printf(s, "%s\n", hfi1_statnames[*spos]);
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(driver_stats_names);
-DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
-DEBUGFS_FILE_OPS(driver_stats_names);
-
-static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
-{
- rcu_read_lock();
- if (*pos >= ARRAY_SIZE(hfi1_statnames))
- return NULL;
- return pos;
-}
-
-static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- ++*pos;
- if (*pos >= ARRAY_SIZE(hfi1_statnames))
- return NULL;
- return pos;
-}
-
-static void _driver_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static u64 hfi1_sps_ints(void)
-{
- unsigned long flags;
- struct hfi1_devdata *dd;
- u64 sps_ints = 0;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- list_for_each_entry(dd, &hfi1_dev_list, list) {
- sps_ints += get_all_cpu_total(dd->int_counter);
- }
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- return sps_ints;
-}
-
-static int _driver_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
- char *buffer;
- u64 *stats = (u64 *)&hfi1_stats;
- size_t sz = seq_get_buf(s, &buffer);
-
- if (sz < sizeof(u64))
- return SEQ_SKIP;
- /* special case for interrupts */
- if (*spos == 0)
- *(u64 *)buffer = hfi1_sps_ints();
- else
- *(u64 *)buffer = stats[*spos];
- seq_commit(s, sizeof(u64));
- return 0;
-}
-
-DEBUGFS_SEQ_FILE_OPS(driver_stats);
-DEBUGFS_SEQ_FILE_OPEN(driver_stats)
-DEBUGFS_FILE_OPS(driver_stats);
-
-void hfi1_dbg_init(void)
-{
- hfi1_dbg_root = debugfs_create_dir(DRIVER_NAME, NULL);
- if (!hfi1_dbg_root)
- pr_warn("init of debugfs failed\n");
- DEBUGFS_SEQ_FILE_CREATE(driver_stats_names, hfi1_dbg_root, NULL);
- DEBUGFS_SEQ_FILE_CREATE(driver_stats, hfi1_dbg_root, NULL);
-}
-
-void hfi1_dbg_exit(void)
-{
- debugfs_remove_recursive(hfi1_dbg_root);
- hfi1_dbg_root = NULL;
-}
-
-#endif
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/staging/rdma/hfi1/debugfs.h
deleted file mode 100644
index b6fb6814f..000000000
--- a/drivers/staging/rdma/hfi1/debugfs.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _HFI1_DEBUGFS_H
-#define _HFI1_DEBUGFS_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-struct hfi1_ibdev;
-#ifdef CONFIG_DEBUG_FS
-void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
-void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
-void hfi1_dbg_init(void);
-void hfi1_dbg_exit(void);
-#else
-static inline void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
-{
-}
-
-void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
-{
-}
-
-void hfi1_dbg_init(void)
-{
-}
-
-void hfi1_dbg_exit(void)
-{
-}
-
-#endif
-
-#endif /* _HFI1_DEBUGFS_H */
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c
deleted file mode 100644
index c05c39da8..000000000
--- a/drivers/staging/rdma/hfi1/device.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/cdev.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-
-#include "hfi.h"
-#include "device.h"
-
-static struct class *class;
-static struct class *user_class;
-static dev_t hfi1_dev;
-
-int hfi1_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev *cdev, struct device **devp,
- bool user_accessible)
-{
- const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
- struct device *device = NULL;
- int ret;
-
- cdev_init(cdev, fops);
- cdev->owner = THIS_MODULE;
- kobject_set_name(&cdev->kobj, name);
-
- ret = cdev_add(cdev, dev, 1);
- if (ret < 0) {
- pr_err("Could not add cdev for minor %d, %s (err %d)\n",
- minor, name, -ret);
- goto done;
- }
-
- if (user_accessible)
- device = device_create(user_class, NULL, dev, NULL, "%s", name);
- else
- device = device_create(class, NULL, dev, NULL, "%s", name);
-
- if (!IS_ERR(device))
- goto done;
- ret = PTR_ERR(device);
- device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
- cdev_del(cdev);
-done:
- *devp = device;
- return ret;
-}
-
-void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp)
-{
- struct device *device = *devp;
-
- if (device) {
- device_unregister(device);
- *devp = NULL;
-
- cdev_del(cdev);
- }
-}
-
-static const char *hfi1_class_name = "hfi1";
-
-const char *class_name(void)
-{
- return hfi1_class_name;
-}
-
-static char *hfi1_devnode(struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0600;
- return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
-}
-
-static const char *hfi1_class_name_user = "hfi1_user";
-static const char *class_name_user(void)
-{
- return hfi1_class_name_user;
-}
-
-static char *hfi1_user_devnode(struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
-}
-
-int __init dev_init(void)
-{
- int ret;
-
- ret = alloc_chrdev_region(&hfi1_dev, 0, HFI1_NMINORS, DRIVER_NAME);
- if (ret < 0) {
- pr_err("Could not allocate chrdev region (err %d)\n", -ret);
- goto done;
- }
-
- class = class_create(THIS_MODULE, class_name());
- if (IS_ERR(class)) {
- ret = PTR_ERR(class);
- pr_err("Could not create device class (err %d)\n", -ret);
- unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
- goto done;
- }
- class->devnode = hfi1_devnode;
-
- user_class = class_create(THIS_MODULE, class_name_user());
- if (IS_ERR(user_class)) {
- ret = PTR_ERR(user_class);
- pr_err("Could not create device class for user accessible files (err %d)\n",
- -ret);
- class_destroy(class);
- class = NULL;
- user_class = NULL;
- unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
- goto done;
- }
- user_class->devnode = hfi1_user_devnode;
-
-done:
- return ret;
-}
-
-void dev_cleanup(void)
-{
- class_destroy(class);
- class = NULL;
-
- class_destroy(user_class);
- user_class = NULL;
-
- unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
-}
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h
deleted file mode 100644
index 5bb3e83cf..000000000
--- a/drivers/staging/rdma/hfi1/device.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef _HFI1_DEVICE_H
-#define _HFI1_DEVICE_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-int hfi1_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev *cdev, struct device **devp,
- bool user_accessible);
-void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
-const char *class_name(void);
-int __init dev_init(void);
-void dev_cleanup(void);
-
-#endif /* _HFI1_DEVICE_H */
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
deleted file mode 100644
index c5b520bf6..000000000
--- a/drivers/staging/rdma/hfi1/diag.c
+++ /dev/null
@@ -1,1924 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
- * This file contains support for diagnostic functions. It is accessed by
- * opening the hfi1_diag device, normally minor number 129. Diagnostic use
- * of the chip may render the chip or board unusable until the driver
- * is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <rdma/ib_smi.h>
-#include "hfi.h"
-#include "device.h"
-#include "common.h"
-#include "verbs_txreq.h"
-#include "trace.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-#define snoop_dbg(fmt, ...) \
- hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
-
-/* Snoop option mask */
-#define SNOOP_DROP_SEND BIT(0)
-#define SNOOP_USE_METADATA BIT(1)
-#define SNOOP_SET_VL0TOVL15 BIT(2)
-
-static u8 snoop_flags;
-
-/*
- * Extract packet length from LRH header.
- * This is in Dwords so multiply by 4 to get size in bytes
- */
-#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2)
-
-enum hfi1_filter_status {
- HFI1_FILTER_HIT,
- HFI1_FILTER_ERR,
- HFI1_FILTER_MISS
-};
-
-/* snoop processing functions */
-rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = {
- [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler,
- [RHF_RCV_TYPE_EAGER] = snoop_recv_handler,
- [RHF_RCV_TYPE_IB] = snoop_recv_handler,
- [RHF_RCV_TYPE_ERROR] = snoop_recv_handler,
- [RHF_RCV_TYPE_BYPASS] = snoop_recv_handler,
- [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
- [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
- [RHF_RCV_TYPE_INVALID7] = process_receive_invalid
-};
-
-/* Snoop packet structure */
-struct snoop_packet {
- struct list_head list;
- u32 total_len;
- u8 data[];
-};
-
-/* Do not make these an enum or it will blow up the capture_md */
-#define PKT_DIR_EGRESS 0x0
-#define PKT_DIR_INGRESS 0x1
-
-/* Packet capture metadata returned to the user with the packet. */
-struct capture_md {
- u8 port;
- u8 dir;
- u8 reserved[6];
- union {
- u64 pbc;
- u64 rhf;
- } u;
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev diagpkt_cdev;
-static struct device *diagpkt_device;
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
- .owner = THIS_MODULE,
- .write = diagpkt_write,
- .llseek = noop_llseek,
-};
-
-/*
- * This is used for communication with user space for snoop extended IOCTLs
- */
-struct hfi1_link_info {
- __be64 node_guid;
- u8 port_mode;
- u8 port_state;
- u16 link_speed_active;
- u16 link_width_active;
- u16 vl15_init;
- u8 port_number;
- /*
- * Add padding to make this a full IB SMP payload. Note: changing the
- * size of this structure will make the IOCTLs created with _IOWR
- * change.
- * Be sure to run tests on all IOCTLs when making changes to this
- * structure.
- */
- u8 res[47];
-};
-
-/*
- * This starts our ioctl sequence numbers *way* off from the ones
- * defined in ib_core.
- */
-#define SNOOP_CAPTURE_VERSION 0x1
-
-#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl-number.txt */
-#define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC
-#define HFI1_SNOOP_IOC_BASE_SEQ 0x80
-
-#define HFI1_SNOOP_IOCGETLINKSTATE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
-#define HFI1_SNOOP_IOCSETLINKSTATE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1)
-#define HFI1_SNOOP_IOCCLEARQUEUE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2)
-#define HFI1_SNOOP_IOCCLEARFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3)
-#define HFI1_SNOOP_IOCSETFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4)
-#define HFI1_SNOOP_IOCGETVERSION \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5)
-#define HFI1_SNOOP_IOCSET_OPTS \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6)
-
-/*
- * These offsets +6/+7 could change, but these are already known and used
- * IOCTL numbers so don't change them without a good reason.
- */
-#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \
- struct hfi1_link_info)
-#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \
- struct hfi1_link_info)
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp);
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
- size_t pkt_len, loff_t *off);
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-static unsigned int hfi1_snoop_poll(struct file *fp,
- struct poll_table_struct *wait);
-static int hfi1_snoop_release(struct inode *in, struct file *fp);
-
-struct hfi1_packet_filter_command {
- int opcode;
- int length;
- void *value_ptr;
-};
-
-/* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */
-#define HFI1_SNOOP_INGRESS 0x1
-#define HFI1_SNOOP_EGRESS 0x2
-
-enum hfi1_packet_filter_opcodes {
- FILTER_BY_LID,
- FILTER_BY_DLID,
- FILTER_BY_MAD_MGMT_CLASS,
- FILTER_BY_QP_NUMBER,
- FILTER_BY_PKT_TYPE,
- FILTER_BY_SERVICE_LEVEL,
- FILTER_BY_PKEY,
- FILTER_BY_DIRECTION,
-};
-
-static const struct file_operations snoop_file_ops = {
- .owner = THIS_MODULE,
- .open = hfi1_snoop_open,
- .read = hfi1_snoop_read,
- .unlocked_ioctl = hfi1_ioctl,
- .poll = hfi1_snoop_poll,
- .write = hfi1_snoop_write,
- .release = hfi1_snoop_release
-};
-
-struct hfi1_filter_array {
- int (*filter)(void *, void *, void *);
-};
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
-
-static const struct hfi1_filter_array hfi1_filters[] = {
- { hfi1_filter_lid },
- { hfi1_filter_dlid },
- { hfi1_filter_mad_mgmt_class },
- { hfi1_filter_qp_number },
- { hfi1_filter_ibpacket_type },
- { hfi1_filter_ib_service_level },
- { hfi1_filter_ib_pkey },
- { hfi1_filter_direction },
-};
-
-#define HFI1_MAX_FILTERS ARRAY_SIZE(hfi1_filters)
-#define HFI1_DIAG_MINOR_BASE 129
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name);
-
-int hfi1_diag_add(struct hfi1_devdata *dd)
-{
- char name[16];
- int ret = 0;
-
- snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(),
- dd->unit);
- /*
- * Do this for each device as opposed to the normal diagpkt
- * interface which is one per host
- */
- ret = hfi1_snoop_add(dd, name);
- if (ret)
- dd_dev_err(dd, "Unable to init snoop/capture device");
-
- snprintf(name, sizeof(name), "%s_diagpkt", class_name());
- if (atomic_inc_return(&diagpkt_count) == 1) {
- ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
- &diagpkt_file_ops, &diagpkt_cdev,
- &diagpkt_device, false);
- }
-
- return ret;
-}
-
-/* this must be called w/ dd->snoop_in_lock held */
-static void drain_snoop_list(struct list_head *queue)
-{
- struct list_head *pos, *q;
- struct snoop_packet *packet;
-
- list_for_each_safe(pos, q, queue) {
- packet = list_entry(pos, struct snoop_packet, list);
- list_del(pos);
- kfree(packet);
- }
-}
-
-static void hfi1_snoop_remove(struct hfi1_devdata *dd)
-{
- unsigned long flags = 0;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-}
-
-void hfi1_diag_remove(struct hfi1_devdata *dd)
-{
- hfi1_snoop_remove(dd);
- if (atomic_dec_and_test(&diagpkt_count))
- hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
- hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
-}
-
-/*
- * Allocated structure shared between the credit return mechanism and
- * diagpkt_send().
- */
-struct diagpkt_wait {
- struct completion credits_returned;
- int code;
- atomic_t count;
-};
-
-/*
- * When each side is finished with the structure, they call this.
- * The last user frees the structure.
- */
-static void put_diagpkt_wait(struct diagpkt_wait *wait)
-{
- if (atomic_dec_and_test(&wait->count))
- kfree(wait);
-}
-
-/*
- * Callback from the credit return code. Set the complete, which
- * will let diapkt_send() continue.
- */
-static void diagpkt_complete(void *arg, int code)
-{
- struct diagpkt_wait *wait = (struct diagpkt_wait *)arg;
-
- wait->code = code;
- complete(&wait->credits_returned);
- put_diagpkt_wait(wait); /* finished with the structure */
-}
-
-/**
- * diagpkt_send - send a packet
- * @dp: diag packet descriptor
- */
-static ssize_t diagpkt_send(struct diag_pkt *dp)
-{
- struct hfi1_devdata *dd;
- struct send_context *sc;
- struct pio_buf *pbuf;
- u32 *tmpbuf = NULL;
- ssize_t ret = 0;
- u32 pkt_len, total_len;
- pio_release_cb credit_cb = NULL;
- void *credit_arg = NULL;
- struct diagpkt_wait *wait = NULL;
- int trycount = 0;
-
- dd = hfi1_lookup(dp->unit);
- if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
- if (!(dd->flags & HFI1_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -ENODEV;
- goto bail;
- }
-
- if (dp->version != _DIAG_PKT_VERS) {
- dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
- dp->version);
- ret = -EINVAL;
- goto bail;
- }
-
- /* send count must be an exact number of dwords */
- if (dp->len & 3) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* there is only port 1 */
- if (dp->port != 1) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* need a valid context */
- if (dp->sw_index >= dd->num_send_contexts) {
- ret = -EINVAL;
- goto bail;
- }
- /* can only use kernel contexts */
- if (dd->send_contexts[dp->sw_index].type != SC_KERNEL) {
- ret = -EINVAL;
- goto bail;
- }
- /* must be allocated */
- sc = dd->send_contexts[dp->sw_index].sc;
- if (!sc) {
- ret = -EINVAL;
- goto bail;
- }
- /* must be enabled */
- if (!(sc->flags & SCF_ENABLED)) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* allocate a buffer and copy the data in */
- tmpbuf = vmalloc(dp->len);
- if (!tmpbuf) {
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmpbuf,
- (const void __user *)(unsigned long)dp->data,
- dp->len)) {
- ret = -EFAULT;
- goto bail;
- }
-
- /*
- * pkt_len is how much data we have to write, includes header and data.
- * total_len is length of the packet in Dwords plus the PBC should not
- * include the CRC.
- */
- pkt_len = dp->len >> 2;
- total_len = pkt_len + 2; /* PBC + packet */
-
- /* if 0, fill in a default */
- if (dp->pbc == 0) {
- struct hfi1_pportdata *ppd = dd->pport;
-
- hfi1_cdbg(PKT, "Generating PBC");
- dp->pbc = create_pbc(ppd, 0, 0, 0, total_len);
- } else {
- hfi1_cdbg(PKT, "Using passed in PBC");
- }
-
- hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc);
-
- /*
- * The caller wants to wait until the packet is sent and to
- * check for errors. The best we can do is wait until
- * the buffer credits are returned and check if any packet
- * error has occurred. If there are any late errors, this
- * could miss it. If there are other senders who generate
- * an error, this may find it. However, in general, it
- * should catch most.
- */
- if (dp->flags & F_DIAGPKT_WAIT) {
- /* always force a credit return */
- dp->pbc |= PBC_CREDIT_RETURN;
- /* turn on credit return interrupts */
- sc_add_credit_return_intr(sc);
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- ret = -ENOMEM;
- goto bail;
- }
- init_completion(&wait->credits_returned);
- atomic_set(&wait->count, 2);
- wait->code = PRC_OK;
-
- credit_cb = diagpkt_complete;
- credit_arg = wait;
- }
-
-retry:
- pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
- if (!pbuf) {
- if (trycount == 0) {
- /* force a credit return and try again */
- sc_return_credits(sc);
- trycount = 1;
- goto retry;
- }
- /*
- * No send buffer means no credit callback. Undo
- * the wait set-up that was done above. We free wait
- * because the callback will never be called.
- */
- if (dp->flags & F_DIAGPKT_WAIT) {
- sc_del_credit_return_intr(sc);
- kfree(wait);
- wait = NULL;
- }
- ret = -ENOSPC;
- goto bail;
- }
-
- pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len);
- /* no flush needed as the HW knows the packet size */
-
- ret = sizeof(*dp);
-
- if (dp->flags & F_DIAGPKT_WAIT) {
- /* wait for credit return */
- ret = wait_for_completion_interruptible(
- &wait->credits_returned);
- /*
- * If the wait returns an error, the wait was interrupted,
- * e.g. with a ^C in the user program. The callback is
- * still pending. This is OK as the wait structure is
- * kmalloc'ed and the structure will free itself when
- * all users are done with it.
- *
- * A context disable occurs on a send context restart, so
- * include that in the list of errors below to check for.
- * NOTE: PRC_FILL_ERR is at best informational and cannot
- * be depended on.
- */
- if (!ret && (((wait->code & PRC_STATUS_ERR) ||
- (wait->code & PRC_FILL_ERR) ||
- (wait->code & PRC_SC_DISABLE))))
- ret = -EIO;
-
- put_diagpkt_wait(wait); /* finished with the structure */
- sc_del_credit_return_intr(sc);
- }
-
-bail:
- vfree(tmpbuf);
- return ret;
-}
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct hfi1_devdata *dd;
- struct send_context *sc;
- u8 vl;
-
- struct diag_pkt dp;
-
- if (count != sizeof(dp))
- return -EINVAL;
-
- if (copy_from_user(&dp, data, sizeof(dp)))
- return -EFAULT;
-
- /*
- * The Send Context is derived from the PbcVL value
- * if PBC is populated
- */
- if (dp.pbc) {
- dd = hfi1_lookup(dp.unit);
- if (!dd)
- return -ENODEV;
- vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
- sc = dd->vld[vl].sc;
- if (sc) {
- dp.sw_index = sc->sw_index;
- hfi1_cdbg(
- PKT,
- "Packet sent over VL %d via Send Context %u(%u)",
- vl, sc->sw_index, sc->hw_context);
- }
- }
-
- return diagpkt_send(&dp);
-}
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
-{
- int ret = 0;
-
- dd->hfi1_snoop.mode_flag = 0;
- spin_lock_init(&dd->hfi1_snoop.snoop_lock);
- INIT_LIST_HEAD(&dd->hfi1_snoop.queue);
- init_waitqueue_head(&dd->hfi1_snoop.waitq);
-
- ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
- &snoop_file_ops,
- &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
- false);
-
- if (ret) {
- dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
- hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
- &dd->hfi1_snoop.class_dev);
- }
-
- return ret;
-}
-
-static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
-{
- int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE;
- struct hfi1_devdata *dd;
-
- dd = hfi1_lookup(unit);
- return dd;
-}
-
-/* clear or restore send context integrity checks */
-static void adjust_integrity_checks(struct hfi1_devdata *dd)
-{
- struct send_context *sc;
- unsigned long sc_flags;
- int i;
-
- spin_lock_irqsave(&dd->sc_lock, sc_flags);
- for (i = 0; i < dd->num_send_contexts; i++) {
- int enable;
-
- sc = dd->send_contexts[i].sc;
-
- if (!sc)
- continue; /* not allocated */
-
- enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
- dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE;
-
- set_pio_integrity(sc);
-
- if (enable) /* take HFI_CAP_* flags into account */
- hfi1_init_ctxt(sc);
- }
- spin_unlock_irqrestore(&dd->sc_lock, sc_flags);
-}
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp)
-{
- int ret;
- int mode_flag = 0;
- unsigned long flags = 0;
- struct hfi1_devdata *dd;
- struct list_head *queue;
-
- mutex_lock(&hfi1_mutex);
-
- dd = hfi1_dd_from_sc_inode(in);
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- /*
- * File mode determines snoop or capture. Some existing user
- * applications expect the capture device to be able to be opened RDWR
- * because they expect a dedicated capture device. For this reason we
- * support a module param to force capture mode even if the file open
- * mode matches snoop.
- */
- if ((fp->f_flags & O_ACCMODE) == O_RDONLY) {
- snoop_dbg("Capture Enabled");
- mode_flag = HFI1_PORT_CAPTURE_MODE;
- } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) {
- snoop_dbg("Snoop Enabled");
- mode_flag = HFI1_PORT_SNOOP_MODE;
- } else {
- snoop_dbg("Invalid");
- ret = -EINVAL;
- goto bail;
- }
- queue = &dd->hfi1_snoop.queue;
-
- /*
- * We are not supporting snoop and capture at the same time.
- */
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (dd->hfi1_snoop.mode_flag) {
- ret = -EBUSY;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- goto bail;
- }
-
- dd->hfi1_snoop.mode_flag = mode_flag;
- drain_snoop_list(queue);
-
- dd->hfi1_snoop.filter_callback = NULL;
- dd->hfi1_snoop.filter_value = NULL;
-
- /*
- * Send side packet integrity checks are not helpful when snooping so
- * disable and re-enable when we stop snooping.
- */
- if (mode_flag == HFI1_PORT_SNOOP_MODE) {
- /* clear after snoop mode is on */
- adjust_integrity_checks(dd); /* clear */
-
- /*
- * We also do not want to be doing the DLID LMC check for
- * ingressed packets.
- */
- dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1);
- write_csr(dd, DCC_CFG_PORT_CONFIG1,
- (dd->hfi1_snoop.dcc_cfg >> 32) << 32);
- }
-
- /*
- * As soon as we set these function pointers the recv and send handlers
- * are active. This is a race condition so we must make sure to drain
- * the queue and init filter values above. Technically we should add
- * locking here but all that will happen is on recv a packet will get
- * allocated and get stuck on the snoop_lock before getting added to the
- * queue. Same goes for send.
- */
- dd->rhf_rcv_function_map = snoop_rhf_rcv_functions;
- dd->process_pio_send = snoop_send_pio_handler;
- dd->process_dma_send = snoop_send_pio_handler;
- dd->pio_inline_send = snoop_inline_pio_send;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- ret = 0;
-
-bail:
- mutex_unlock(&hfi1_mutex);
-
- return ret;
-}
-
-static int hfi1_snoop_release(struct inode *in, struct file *fp)
-{
- unsigned long flags = 0;
- struct hfi1_devdata *dd;
- int mode_flag;
-
- dd = hfi1_dd_from_sc_inode(in);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- /* clear the snoop mode before re-adjusting send context CSRs */
- mode_flag = dd->hfi1_snoop.mode_flag;
- dd->hfi1_snoop.mode_flag = 0;
-
- /*
- * Drain the queue and clear the filters we are done with it. Don't
- * forget to restore the packet integrity checks
- */
- drain_snoop_list(&dd->hfi1_snoop.queue);
- if (mode_flag == HFI1_PORT_SNOOP_MODE) {
- /* restore after snoop mode is clear */
- adjust_integrity_checks(dd); /* restore */
-
- /*
- * Also should probably reset the DCC_CONFIG1 register for DLID
- * checking on incoming packets again. Use the value saved when
- * opening the snoop device.
- */
- write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg);
- }
-
- dd->hfi1_snoop.filter_callback = NULL;
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = NULL;
-
- /*
- * User is done snooping and capturing, return control to the normal
- * handler. Re-enable SDMA handling.
- */
- dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
- dd->process_pio_send = hfi1_verbs_send_pio;
- dd->process_dma_send = hfi1_verbs_send_dma;
- dd->pio_inline_send = pio_copy;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
- snoop_dbg("snoop/capture device released");
-
- return 0;
-}
-
-static unsigned int hfi1_snoop_poll(struct file *fp,
- struct poll_table_struct *wait)
-{
- int ret = 0;
- unsigned long flags = 0;
-
- struct hfi1_devdata *dd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- poll_wait(fp, &dd->hfi1_snoop.waitq, wait);
- if (!list_empty(&dd->hfi1_snoop.queue))
- ret |= POLLIN | POLLRDNORM;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- return ret;
-}
-
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct diag_pkt dpkt;
- struct hfi1_devdata *dd;
- size_t ret;
- u8 byte_two, sl, sc5, sc4, vl, byte_one;
- struct send_context *sc;
- u32 len;
- u64 pbc;
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- ppd = dd->pport;
- snoop_dbg("received %lu bytes from user", count);
-
- memset(&dpkt, 0, sizeof(struct diag_pkt));
- dpkt.version = _DIAG_PKT_VERS;
- dpkt.unit = dd->unit;
- dpkt.port = 1;
-
- if (likely(!(snoop_flags & SNOOP_USE_METADATA))) {
- /*
- * We need to generate the PBC and not let diagpkt_send do it,
- * to do this we need the VL and the length in dwords.
- * The VL can be determined by using the SL and looking up the
- * SC. Then the SC can be converted into VL. The exception to
- * this is those packets which are from an SMI queue pair.
- * Since we can't detect anything about the QP here we have to
- * rely on the SC. If its 0xF then we assume its SMI and
- * do not look at the SL.
- */
- if (copy_from_user(&byte_one, data, 1))
- return -EINVAL;
-
- if (copy_from_user(&byte_two, data + 1, 1))
- return -EINVAL;
-
- sc4 = (byte_one >> 4) & 0xf;
- if (sc4 == 0xF) {
- snoop_dbg("Detected VL15 packet ignoring SL in packet");
- vl = sc4;
- } else {
- sl = (byte_two >> 4) & 0xf;
- ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1);
- sc5 = ibp->sl_to_sc[sl];
- vl = sc_to_vlt(dd, sc5);
- if (vl != sc4) {
- snoop_dbg("VL %d does not match SC %d of packet",
- vl, sc4);
- return -EINVAL;
- }
- }
-
- sc = dd->vld[vl].sc; /* Look up the context based on VL */
- if (sc) {
- dpkt.sw_index = sc->sw_index;
- snoop_dbg("Sending on context %u(%u)", sc->sw_index,
- sc->hw_context);
- } else {
- snoop_dbg("Could not find context for vl %d", vl);
- return -EINVAL;
- }
-
- len = (count >> 2) + 2; /* Add in PBC */
- pbc = create_pbc(ppd, 0, 0, vl, len);
- } else {
- if (copy_from_user(&pbc, data, sizeof(pbc)))
- return -EINVAL;
- vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
- sc = dd->vld[vl].sc; /* Look up the context based on VL */
- if (sc) {
- dpkt.sw_index = sc->sw_index;
- } else {
- snoop_dbg("Could not find context for vl %d", vl);
- return -EINVAL;
- }
- data += sizeof(pbc);
- count -= sizeof(pbc);
- }
- dpkt.len = count;
- dpkt.data = (unsigned long)data;
-
- snoop_dbg("PBC: vl=0x%llx Length=0x%llx",
- (pbc >> 12) & 0xf,
- (pbc & 0xfff));
-
- dpkt.pbc = pbc;
- ret = diagpkt_send(&dpkt);
- /*
- * diagpkt_send only returns number of bytes in the diagpkt so patch
- * that up here before returning.
- */
- if (ret == sizeof(dpkt))
- return count;
-
- return ret;
-}
-
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
- size_t pkt_len, loff_t *off)
-{
- ssize_t ret = 0;
- unsigned long flags = 0;
- struct snoop_packet *packet = NULL;
- struct hfi1_devdata *dd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- while (list_empty(&dd->hfi1_snoop.queue)) {
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
- if (fp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- if (wait_event_interruptible(
- dd->hfi1_snoop.waitq,
- !list_empty(&dd->hfi1_snoop.queue)))
- return -EINTR;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- }
-
- if (!list_empty(&dd->hfi1_snoop.queue)) {
- packet = list_entry(dd->hfi1_snoop.queue.next,
- struct snoop_packet, list);
- list_del(&packet->list);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- if (pkt_len >= packet->total_len) {
- if (copy_to_user(data, packet->data,
- packet->total_len))
- ret = -EFAULT;
- else
- ret = packet->total_len;
- } else {
- ret = -EINVAL;
- }
-
- kfree(packet);
- } else {
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- }
-
- return ret;
-}
-
-/**
- * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others
- * @ppd : ptr to hfi1 port data
- * @value : options from user space
- *
- * Assumes the rest of the CM credit registers are zero from a
- * previous global or credit reset.
- * Leave shared count at zero for both global and all vls.
- * In snoop mode ideally we don't use shared credits
- * Reserve 8.5k for VL15
- * If total credits less than 8.5kbytes return error.
- * Divide the rest of the credits across VL0 to VL7 and if
- * each of these levels has less than 34 credits (at least 2048 + 128 bytes)
- * return with an error.
- * The credit registers will be reset to zero on link negotiation or link up
- * so this function should be activated from user space only if the port has
- * gone past link negotiation and link up.
- *
- * Return -- 0 if successful else error condition
- *
- */
-static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd,
- int value)
-{
-#define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */
- struct buffer_control t;
- int i;
- struct hfi1_devdata *dd = ppd->dd;
- u16 total_credits = (value >> 16) & 0xffff;
- u16 vl15_credits = dd->vl15_init / 2;
- u16 per_vl_credits;
- __be16 be_per_vl_credits;
-
- if (!(ppd->host_link_state & HLS_UP))
- goto err_exit;
- if (total_credits < vl15_credits)
- goto err_exit;
-
- per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL;
-
- if (per_vl_credits < OPA_MIN_PER_VL_CREDITS)
- goto err_exit;
-
- memset(&t, 0, sizeof(t));
- be_per_vl_credits = cpu_to_be16(per_vl_credits);
-
- for (i = 0; i < TXE_NUM_DATA_VL; i++)
- t.vl[i].dedicated = be_per_vl_credits;
-
- t.vl[15].dedicated = cpu_to_be16(vl15_credits);
- return set_buffer_control(ppd, &t);
-
-err_exit:
- snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d",
- ppd->host_link_state, total_credits, vl15_credits);
-
- return -EINVAL;
-}
-
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- struct hfi1_devdata *dd;
- void *filter_value = NULL;
- long ret = 0;
- int value = 0;
- u8 phys_state = 0;
- u8 link_state = 0;
- u16 dev_state = 0;
- unsigned long flags = 0;
- unsigned long *argp = NULL;
- struct hfi1_packet_filter_command filter_cmd = {0};
- int mode_flag = 0;
- struct hfi1_pportdata *ppd = NULL;
- unsigned int index;
- struct hfi1_link_info link_info;
- int read_cmd, write_cmd, read_ok, write_ok;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- mode_flag = dd->hfi1_snoop.mode_flag;
- read_cmd = _IOC_DIR(cmd) & _IOC_READ;
- write_cmd = _IOC_DIR(cmd) & _IOC_WRITE;
- write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
- read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
-
- if ((read_cmd && !write_ok) || (write_cmd && !read_ok))
- return -EFAULT;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
- (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
- (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
- (cmd != HFI1_SNOOP_IOCSETFILTER))
- /* Capture devices are allowed only 3 operations
- * 1.Clear capture queue
- * 2.Clear capture filter
- * 3.Set capture filter
- * Other are invalid.
- */
- return -EINVAL;
-
- switch (cmd) {
- case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
- memset(&link_info, 0, sizeof(link_info));
-
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- return -EFAULT;
-
- value = link_info.port_state;
- index = link_info.port_number;
- if (index > dd->num_pports - 1)
- return -EINVAL;
-
- ppd = &dd->pport[index];
- if (!ppd)
- return -EINVAL;
-
- /* What we want to transition to */
- phys_state = (value >> 4) & 0xF;
- link_state = value & 0xF;
- snoop_dbg("Setting link state 0x%x", value);
-
- switch (link_state) {
- case IB_PORT_NOP:
- if (phys_state == 0)
- break;
- /* fall through */
- case IB_PORT_DOWN:
- switch (phys_state) {
- case 0:
- dev_state = HLS_DN_DOWNDEF;
- break;
- case 2:
- dev_state = HLS_DN_POLL;
- break;
- case 3:
- dev_state = HLS_DN_DISABLE;
- break;
- default:
- return -EINVAL;
- }
- ret = set_link_state(ppd, dev_state);
- break;
- case IB_PORT_ARMED:
- ret = set_link_state(ppd, HLS_UP_ARMED);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ARM);
- break;
- case IB_PORT_ACTIVE:
- ret = set_link_state(ppd, HLS_UP_ACTIVE);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ACTIVE);
- break;
- default:
- return -EINVAL;
- }
-
- if (ret)
- break;
- /* fall through */
- case HFI1_SNOOP_IOCGETLINKSTATE:
- case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
- if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
- memset(&link_info, 0, sizeof(link_info));
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- return -EFAULT;
- index = link_info.port_number;
- } else {
- ret = __get_user(index, (int __user *)arg);
- if (ret != 0)
- break;
- }
-
- if (index > dd->num_pports - 1)
- return -EINVAL;
-
- ppd = &dd->pport[index];
- if (!ppd)
- return -EINVAL;
-
- value = hfi1_ibphys_portstate(ppd);
- value <<= 4;
- value |= driver_lstate(ppd);
-
- snoop_dbg("Link port | Link State: %d", value);
-
- if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
- (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
- link_info.port_state = value;
- link_info.node_guid = cpu_to_be64(ppd->guid);
- link_info.link_speed_active =
- ppd->link_speed_active;
- link_info.link_width_active =
- ppd->link_width_active;
- if (copy_to_user((struct hfi1_link_info __user *)arg,
- &link_info, sizeof(link_info)))
- return -EFAULT;
- } else {
- ret = __put_user(value, (int __user *)arg);
- }
- break;
-
- case HFI1_SNOOP_IOCCLEARQUEUE:
- snoop_dbg("Clearing snoop queue");
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
-
- case HFI1_SNOOP_IOCCLEARFILTER:
- snoop_dbg("Clearing filter");
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (dd->hfi1_snoop.filter_callback) {
- /* Drain packets first */
- drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback = NULL;
- }
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = NULL;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
-
- case HFI1_SNOOP_IOCSETFILTER:
- snoop_dbg("Setting filter");
- /* just copy command structure */
- argp = (unsigned long *)arg;
- if (copy_from_user(&filter_cmd, (void __user *)argp,
- sizeof(filter_cmd)))
- return -EFAULT;
-
- if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
- pr_alert("Invalid opcode in request\n");
- return -EINVAL;
- }
-
- snoop_dbg("Opcode %d Len %d Ptr %p",
- filter_cmd.opcode, filter_cmd.length,
- filter_cmd.value_ptr);
-
- filter_value = kcalloc(filter_cmd.length, sizeof(u8),
- GFP_KERNEL);
- if (!filter_value)
- return -ENOMEM;
-
- /* copy remaining data from userspace */
- if (copy_from_user((u8 *)filter_value,
- (void __user *)filter_cmd.value_ptr,
- filter_cmd.length)) {
- kfree(filter_value);
- return -EFAULT;
- }
- /* Drain packets first */
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback =
- hfi1_filters[filter_cmd.opcode].filter;
- /* just in case we see back to back sets */
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = filter_value;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
- case HFI1_SNOOP_IOCGETVERSION:
- value = SNOOP_CAPTURE_VERSION;
- snoop_dbg("Getting version: %d", value);
- ret = __put_user(value, (int __user *)arg);
- break;
- case HFI1_SNOOP_IOCSET_OPTS:
- snoop_flags = 0;
- ret = __get_user(value, (int __user *)arg);
- if (ret != 0)
- break;
-
- snoop_dbg("Setting snoop option %d", value);
- if (value & SNOOP_DROP_SEND)
- snoop_flags |= SNOOP_DROP_SEND;
- if (value & SNOOP_USE_METADATA)
- snoop_flags |= SNOOP_USE_METADATA;
- if (value & (SNOOP_SET_VL0TOVL15)) {
- ppd = &dd->pport[0]; /* first port will do */
- ret = hfi1_assign_snoop_link_credits(ppd, value);
- }
- break;
- default:
- return -ENOTTY;
- }
-
- return ret;
-}
-
-static void snoop_list_add_tail(struct snoop_packet *packet,
- struct hfi1_devdata *dd)
-{
- unsigned long flags = 0;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) ||
- (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) {
- list_add_tail(&packet->list, &dd->hfi1_snoop.queue);
- snoop_dbg("Added packet to list");
- }
-
- /*
- * Technically we can could have closed the snoop device while waiting
- * on the above lock and it is gone now. The snoop mode_flag will
- * prevent us from adding the packet to the queue though.
- */
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- wake_up_interruptible(&dd->hfi1_snoop.waitq);
-}
-
-static inline int hfi1_filter_check(void *val, const char *msg)
-{
- if (!val) {
- snoop_dbg("Error invalid %s value for filter", msg);
- return HFI1_FILTER_ERR;
- }
- return 0;
-}
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */
- return HFI1_FILTER_HIT; /* matched */
-
- return HFI1_FILTER_MISS; /* Not matched */
-}
-
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1]))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-/* Not valid for outgoing packets, send handler passes null for data*/
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
- void *value)
-{
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- struct ib_smp *smp = NULL;
- u32 qpn = 0;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(packet_data, "packet_data");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- /* Check for GRH */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
- else
- ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
-
- qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF;
- if (qpn <= 1) {
- smp = (struct ib_smp *)packet_data;
- if (*((u8 *)value) == smp->mgmt_class)
- return HFI1_FILTER_HIT;
- else
- return HFI1_FILTER_MISS;
- }
- return HFI1_FILTER_ERR;
-}
-
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- /* Check for GRH */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
- else
- ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
- if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
- void *value)
-{
- u32 lnh = 0;
- u8 opcode = 0;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
-
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- return HFI1_FILTER_ERR;
-
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
- if (*((u8 *)value) == ((opcode >> 5) & 0x7))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
- void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
-{
- u32 lnh = 0;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- return HFI1_FILTER_ERR;
-
- /* P_key is 16-bit entity, however top most bit indicates
- * type of membership. 0 for limited and 1 for Full.
- * Limited members cannot accept information from other
- * Limited members, but communication is allowed between
- * every other combination of membership.
- * Hence we'll omit comparing top-most bit while filtering
- */
-
- if ((*(u16 *)value & 0x7FFF) ==
- ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-/*
- * If packet_data is NULL then this is coming from one of the send functions.
- * Thus we know if its an ingressed or egressed packet.
- */
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value)
-{
- u8 user_dir = *(u8 *)value;
- int ret;
-
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- if (packet_data) {
- /* Incoming packet */
- if (user_dir & HFI1_SNOOP_INGRESS)
- return HFI1_FILTER_HIT;
- } else {
- /* Outgoing packet */
- if (user_dir & HFI1_SNOOP_EGRESS)
- return HFI1_FILTER_HIT;
- }
-
- return HFI1_FILTER_MISS;
-}
-
-/*
- * Allocate a snoop packet. The structure that is stored in the ring buffer, not
- * to be confused with an hfi packet type.
- */
-static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
- u32 data_len,
- u32 md_len)
-{
- struct snoop_packet *packet;
-
- packet = kzalloc(sizeof(*packet) + hdr_len + data_len
- + md_len,
- GFP_ATOMIC | __GFP_NOWARN);
- if (likely(packet))
- INIT_LIST_HEAD(&packet->list);
-
- return packet;
-}
-
-/*
- * Instead of having snoop and capture code intermixed with the recv functions,
- * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call
- * and land in here for snoop/capture but if not enabled the call will go
- * through as before. This gives us a single point to constrain all of the snoop
- * snoop recv logic. There is nothing special that needs to happen for bypass
- * packets. This routine should not try to look into the packet. It just copied
- * it. There is no guarantee for filters when it comes to bypass packets as
- * there is no specific support. Bottom line is this routine does now even know
- * what a bypass packet is.
- */
-int snoop_recv_handler(struct hfi1_packet *packet)
-{
- struct hfi1_pportdata *ppd = packet->rcd->ppd;
- struct hfi1_ib_header *hdr = packet->hdr;
- int header_size = packet->hlen;
- void *data = packet->ebuf;
- u32 tlen = packet->tlen;
- struct snoop_packet *s_packet = NULL;
- int ret;
- int snoop_mode = 0;
- u32 md_len = 0;
- struct capture_md md;
-
- snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen,
- data);
-
- trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size,
- data);
-
- if (!ppd->dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data,
- ppd->dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- break;
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- break;
- case HFI1_FILTER_HIT:
-
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- s_packet = allocate_snoop_packet(header_size,
- tlen - header_size,
- md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
- break;
- }
-
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_INGRESS;
- md.u.rhf = packet->rhf;
- memcpy(s_packet->data, &md, md_len);
- }
-
- /* We should always have a header */
- if (hdr) {
- memcpy(s_packet->data + md_len, hdr, header_size);
- } else {
- dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n");
- kfree(s_packet);
- break;
- }
-
- /*
- * Packets with no data are possible. If there is no data needed
- * to take care of the last 4 bytes which are normally included
- * with data buffers and are included in tlen. Since we kzalloc
- * the buffer we do not need to set any values but if we decide
- * not to use kzalloc we should zero them.
- */
- if (data)
- memcpy(s_packet->data + header_size + md_len, data,
- tlen - header_size);
-
- s_packet->total_len = tlen + md_len;
- snoop_list_add_tail(s_packet, ppd->dd);
-
- /*
- * If we are snooping the packet not capturing then throw away
- * after adding to the list.
- */
- snoop_dbg("Capturing packet");
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) {
- snoop_dbg("Throwing packet away");
- /*
- * If we are dropping the packet we still may need to
- * handle the case where error flags are set, this is
- * normally done by the type specific handler but that
- * won't be called in this case.
- */
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
-
- /* throw the packet on the floor */
- return RHF_RCV_CONTINUE;
- }
- break;
- default:
- break;
- }
-
- /*
- * We do not care what type of packet came in here - just pass it off
- * to the normal handler.
- */
- return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)]
- (packet);
-}
-
-/*
- * Handle snooping and capturing packets when sdma is being used.
- */
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
- snoop_dbg("Unsupported Operation");
- return hfi1_verbs_send_dma(qp, ps, 0);
-}
-
-/*
- * Handle snooping and capturing packets when pio is being used. Does not handle
- * bypass packets. The only way to send a bypass packet currently is to use the
- * diagpkt interface. When that interface is enable snoop/capture is not.
- */
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
- u32 dwords = (len + 3) >> 2;
- u32 plen = hdrwords + dwords + 2; /* includes pbc */
- struct hfi1_pportdata *ppd = ps->ppd;
- struct snoop_packet *s_packet = NULL;
- u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
- u32 length = 0;
- struct rvt_sge_state temp_ss;
- void *data = NULL;
- void *data_start = NULL;
- int ret;
- int snoop_mode = 0;
- int md_len = 0;
- struct capture_md md;
- u32 vl;
- u32 hdr_len = hdrwords << 2;
- u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr);
-
- md.u.pbc = 0;
-
- snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u",
- hdrwords, len, plen, dwords, tlen);
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- /* not using ss->total_len as arg 2 b/c that does not count CRC */
- s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
- goto out;
- }
-
- s_packet->total_len = tlen + md_len;
-
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_EGRESS;
- if (likely(pbc == 0)) {
- vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12;
- md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
- } else {
- md.u.pbc = 0;
- }
- memcpy(s_packet->data, &md, md_len);
- } else {
- md.u.pbc = pbc;
- }
-
- /* Copy header */
- if (likely(hdr)) {
- memcpy(s_packet->data + md_len, hdr, hdr_len);
- } else {
- dd_dev_err(ppd->dd,
- "Unable to copy header to snoop/capture packet\n");
- kfree(s_packet);
- goto out;
- }
-
- if (ss) {
- data = s_packet->data + hdr_len + md_len;
- data_start = data;
-
- /*
- * Copy SGE State
- * The update_sge() function below will not modify the
- * individual SGEs in the array. It will make a copy each time
- * and operate on that. So we only need to copy this instance
- * and it won't impact PIO.
- */
- temp_ss = *ss;
- length = len;
-
- snoop_dbg("Need to copy %d bytes", length);
- while (length) {
- void *addr = temp_ss.sge.vaddr;
- u32 slen = temp_ss.sge.length;
-
- if (slen > length) {
- slen = length;
- snoop_dbg("slen %d > len %d", slen, length);
- }
- snoop_dbg("copy %d to %p", slen, addr);
- memcpy(data, addr, slen);
- update_sge(&temp_ss, slen);
- length -= slen;
- data += slen;
- snoop_dbg("data is now %p bytes left %d", data, length);
- }
- snoop_dbg("Completed SGE copy");
- }
-
- /*
- * Why do the filter check down here? Because the event tracing has its
- * own filtering and we need to have the walked the SGE list.
- */
- if (!ppd->dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set\n");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = ppd->dd->hfi1_snoop.filter_callback(
- &ps->s_txreq->phdr.hdr,
- NULL,
- ppd->dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- /* fall through */
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- kfree(s_packet);
- break;
- case HFI1_FILTER_HIT:
- snoop_dbg("Capturing packet");
- snoop_list_add_tail(s_packet, ppd->dd);
-
- if (unlikely((snoop_flags & SNOOP_DROP_SEND) &&
- (ppd->dd->hfi1_snoop.mode_flag &
- HFI1_PORT_SNOOP_MODE))) {
- unsigned long flags;
-
- snoop_dbg("Dropping packet");
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(
- qp,
- qp->s_wqe,
- IB_WC_SUCCESS);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_rc_send_complete(qp,
- &ps->s_txreq->phdr.hdr);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
-
- /*
- * If snoop is dropping the packet we need to put the
- * txreq back because no one else will.
- */
- hfi1_put_txreq(ps->s_txreq);
- return 0;
- }
- break;
- default:
- kfree(s_packet);
- break;
- }
-out:
- return hfi1_verbs_send_pio(qp, ps, md.u.pbc);
-}
-
-/*
- * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently
- * this can be used anywhere, but the intention is for inline ACKs for RC and
- * CCA packets. We don't restrict this usage though.
- */
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count)
-{
- int snoop_mode = 0;
- int md_len = 0;
- struct capture_md md;
- struct snoop_packet *s_packet = NULL;
-
- /*
- * count is in dwords so we need to convert to bytes.
- * We also need to account for CRC which would be tacked on by hardware.
- */
- int packet_len = (count << 2) + 4;
- int ret;
-
- snoop_dbg("ACK OUT: len %d", packet_len);
-
- if (!dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = dd->hfi1_snoop.filter_callback(
- (struct hfi1_ib_header *)from,
- NULL,
- dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- /* fall through */
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- break;
- case HFI1_FILTER_HIT:
- snoop_dbg("Capturing packet");
- if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- s_packet = allocate_snoop_packet(packet_len, 0, md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
- goto inline_pio_out;
- }
-
- s_packet->total_len = packet_len + md_len;
-
- /* Fill in the metadata for the packet */
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_EGRESS;
- md.u.pbc = pbc;
- memcpy(s_packet->data, &md, md_len);
- }
-
- /* Add the packet data which is a single buffer */
- memcpy(s_packet->data + md_len, from, packet_len);
-
- snoop_list_add_tail(s_packet, dd);
-
- if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) {
- snoop_dbg("Dropping packet");
- return;
- }
- break;
- default:
- break;
- }
-
-inline_pio_out:
- pio_copy(dd, pbuf, pbc, from, count);
-}
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/staging/rdma/hfi1/dma.c
deleted file mode 100644
index 7e8dab892..000000000
--- a/drivers/staging/rdma/hfi1/dma.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-
-#include "verbs.h"
-
-#define BAD_DMA_ADDRESS ((u64)0)
-
-/*
- * The following functions implement driver specific replacements
- * for the ib_dma_*() functions.
- *
- * These functions return kernel virtual addresses instead of
- * device bus addresses since the driver uses the CPU to copy
- * data instead of using hardware DMA.
- */
-
-static int hfi1_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
- return dma_addr == BAD_DMA_ADDRESS;
-}
-
-static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction)
-{
- if (WARN_ON(!valid_dma_direction(direction)))
- return BAD_DMA_ADDRESS;
-
- return (u64)cpu_addr;
-}
-
-static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- /* This is a stub, nothing to be done here */
-}
-
-static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- u64 addr;
-
- if (WARN_ON(!valid_dma_direction(direction)))
- return BAD_DMA_ADDRESS;
-
- if (offset + size > PAGE_SIZE)
- return BAD_DMA_ADDRESS;
-
- addr = (u64)page_address(page);
- if (addr)
- addr += offset;
-
- return addr;
-}
-
-static void hfi1_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
- enum dma_data_direction direction)
-{
- /* This is a stub, nothing to be done here */
-}
-
-static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- u64 addr;
- int i;
- int ret = nents;
-
- if (WARN_ON(!valid_dma_direction(direction)))
- return BAD_DMA_ADDRESS;
-
- for_each_sg(sgl, sg, nents, i) {
- addr = (u64)page_address(sg_page(sg));
- if (!addr) {
- ret = 0;
- break;
- }
- sg->dma_address = addr + sg->offset;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- sg->dma_length = sg->length;
-#endif
- }
- return ret;
-}
-
-static void hfi1_unmap_sg(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- /* This is a stub, nothing to be done here */
-}
-
-static void hfi1_sync_single_for_cpu(struct ib_device *dev, u64 addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static void hfi1_sync_single_for_device(struct ib_device *dev, u64 addr,
- size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size,
- u64 *dma_handle, gfp_t flag)
-{
- struct page *p;
- void *addr = NULL;
-
- p = alloc_pages(flag, get_order(size));
- if (p)
- addr = page_address(p);
- if (dma_handle)
- *dma_handle = (u64)addr;
- return addr;
-}
-
-static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size,
- void *cpu_addr, u64 dma_handle)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-struct ib_dma_mapping_ops hfi1_dma_mapping_ops = {
- .mapping_error = hfi1_mapping_error,
- .map_single = hfi1_dma_map_single,
- .unmap_single = hfi1_dma_unmap_single,
- .map_page = hfi1_dma_map_page,
- .unmap_page = hfi1_dma_unmap_page,
- .map_sg = hfi1_map_sg,
- .unmap_sg = hfi1_unmap_sg,
- .sync_single_for_cpu = hfi1_sync_single_for_cpu,
- .sync_single_for_device = hfi1_sync_single_for_device,
- .alloc_coherent = hfi1_dma_alloc_coherent,
- .free_coherent = hfi1_dma_free_coherent
-};
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c
deleted file mode 100644
index 34511e5df..000000000
--- a/drivers/staging/rdma/hfi1/driver.c
+++ /dev/null
@@ -1,1403 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/prefetch.h>
-#include <rdma/ib_verbs.h>
-
-#include "hfi.h"
-#include "trace.h"
-#include "qp.h"
-#include "sdma.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-
-/*
- * The size has to be longer than this string, so we can append
- * board/chip information to it in the initialization code.
- */
-const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
-
-DEFINE_SPINLOCK(hfi1_devs_lock);
-LIST_HEAD(hfi1_dev_list);
-DEFINE_MUTEX(hfi1_mutex); /* general driver use */
-
-unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
-module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
-MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is 8192");
-
-unsigned int hfi1_cu = 1;
-module_param_named(cu, hfi1_cu, uint, S_IRUGO);
-MODULE_PARM_DESC(cu, "Credit return units");
-
-unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
-static int hfi1_caps_set(const char *, const struct kernel_param *);
-static int hfi1_caps_get(char *, const struct kernel_param *);
-static const struct kernel_param_ops cap_ops = {
- .set = hfi1_caps_set,
- .get = hfi1_caps_get
-};
-module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
-MODULE_VERSION(HFI1_DRIVER_VERSION);
-
-/*
- * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
- */
-#define MAX_PKT_RECV 64
-#define EGR_HEAD_UPDATE_THRESHOLD 16
-
-struct hfi1_ib_stats hfi1_stats;
-
-static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
-{
- int ret = 0;
- unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
- cap_mask = *cap_mask_ptr, value, diff,
- write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
- HFI1_CAP_WRITABLE_MASK);
-
- ret = kstrtoul(val, 0, &value);
- if (ret) {
- pr_warn("Invalid module parameter value for 'cap_mask'\n");
- goto done;
- }
- /* Get the changed bits (except the locked bit) */
- diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
-
- /* Remove any bits that are not allowed to change after driver load */
- if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
- pr_warn("Ignoring non-writable capability bits %#lx\n",
- diff & ~write_mask);
- diff &= write_mask;
- }
-
- /* Mask off any reserved bits */
- diff &= ~HFI1_CAP_RESERVED_MASK;
- /* Clear any previously set and changing bits */
- cap_mask &= ~diff;
- /* Update the bits with the new capability */
- cap_mask |= (value & diff);
- /* Check for any kernel/user restrictions */
- diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
- ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
- cap_mask &= ~diff;
- /* Set the bitmask to the final set */
- *cap_mask_ptr = cap_mask;
-done:
- return ret;
-}
-
-static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
-{
- unsigned long cap_mask = *(unsigned long *)kp->arg;
-
- cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
- cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
-
- return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
-}
-
-const char *get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
- return iname;
-}
-
-const char *get_card_name(struct rvt_dev_info *rdi)
-{
- struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
- struct hfi1_devdata *dd = container_of(ibdev,
- struct hfi1_devdata, verbs_dev);
- return get_unit_name(dd->unit);
-}
-
-struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
-{
- struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
- struct hfi1_devdata *dd = container_of(ibdev,
- struct hfi1_devdata, verbs_dev);
- return dd->pcidev;
-}
-
-/*
- * Return count of units with at least one port ACTIVE.
- */
-int hfi1_count_active_units(void)
-{
- struct hfi1_devdata *dd;
- struct hfi1_pportdata *ppd;
- unsigned long flags;
- int pidx, nunits_active = 0;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- list_for_each_entry(dd, &hfi1_dev_list, list) {
- if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase)
- continue;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && ppd->linkup) {
- nunits_active++;
- break;
- }
- }
- }
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- return nunits_active;
-}
-
-/*
- * Return count of all units, optionally return in arguments
- * the number of usable (present) units, and the number of
- * ports that are up.
- */
-int hfi1_count_units(int *npresentp, int *nupp)
-{
- int nunits = 0, npresent = 0, nup = 0;
- struct hfi1_devdata *dd;
- unsigned long flags;
- int pidx;
- struct hfi1_pportdata *ppd;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
-
- list_for_each_entry(dd, &hfi1_dev_list, list) {
- nunits++;
- if ((dd->flags & HFI1_PRESENT) && dd->kregbase)
- npresent++;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && ppd->linkup)
- nup++;
- }
- }
-
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
-
- if (npresentp)
- *npresentp = npresent;
- if (nupp)
- *nupp = nup;
-
- return nunits;
-}
-
-/*
- * Get address of eager buffer from it's index (allocated in chunks, not
- * contiguous).
- */
-static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
- u8 *update)
-{
- u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
-
- *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
- return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
- (offset * RCV_BUF_BLOCK_SIZE));
-}
-
-/*
- * Validate and encode the a given RcvArray Buffer size.
- * The function will check whether the given size falls within
- * allowed size ranges for the respective type and, optionally,
- * return the proper encoding.
- */
-inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
-{
- if (unlikely(!PAGE_ALIGNED(size)))
- return 0;
- if (unlikely(size < MIN_EAGER_BUFFER))
- return 0;
- if (size >
- (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
- return 0;
- if (encoded)
- *encoded = ilog2(size / PAGE_SIZE) + 1;
- return 1;
-}
-
-static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
- struct hfi1_packet *packet)
-{
- struct hfi1_message_header *rhdr = packet->hdr;
- u32 rte = rhf_rcv_type_err(packet->rhf);
- int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
- struct hfi1_ibport *ibp = &ppd->ibport_data;
- struct hfi1_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
-
- if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
- return;
-
- if (packet->rhf & RHF_TID_ERR) {
- /* For TIDERR and RC QPs preemptively schedule a NAK */
- struct hfi1_ib_header *hdr = (struct hfi1_ib_header *)rhdr;
- struct hfi1_other_headers *ohdr = NULL;
- u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
- u16 lid = be16_to_cpu(hdr->lrh[1]);
- u32 qp_num;
- u32 rcv_flags = 0;
-
- /* Sanity check packet */
- if (tlen < 24)
- goto drop;
-
- /* Check for GRH */
- if (lnh == HFI1_LRH_BTH) {
- ohdr = &hdr->u.oth;
- } else if (lnh == HFI1_LRH_GRH) {
- u32 vtf;
-
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- rcv_flags |= HFI1_HAS_GRH;
- } else {
- goto drop;
- }
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- struct rvt_qp *qp;
- unsigned long flags;
-
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
- if (!qp) {
- rcu_read_unlock();
- goto drop;
- }
-
- /*
- * Handle only RC QPs - for other QP types drop error
- * packet.
- */
- spin_lock_irqsave(&qp->r_lock, flags);
-
- /* Check for valid receive state. */
- if (!(ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- hfi1_rc_hdrerr(
- rcd,
- hdr,
- rcv_flags,
- qp);
- break;
- default:
- /* For now don't handle any other QP types */
- break;
- }
-
- spin_unlock_irqrestore(&qp->r_lock, flags);
- rcu_read_unlock();
- } /* Unicast QP */
- } /* Valid packet with TIDErr */
-
- /* handle "RcvTypeErr" flags */
- switch (rte) {
- case RHF_RTE_ERROR_OP_CODE_ERR:
- {
- u32 opcode;
- void *ebuf = NULL;
- __be32 *bth = NULL;
-
- if (rhf_use_egr_bfr(packet->rhf))
- ebuf = packet->ebuf;
-
- if (!ebuf)
- goto drop; /* this should never happen */
-
- if (lnh == HFI1_LRH_BTH)
- bth = (__be32 *)ebuf;
- else if (lnh == HFI1_LRH_GRH)
- bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh));
- else
- goto drop;
-
- opcode = be32_to_cpu(bth[0]) >> 24;
- opcode &= 0xff;
-
- if (opcode == IB_OPCODE_CNP) {
- /*
- * Only in pre-B0 h/w is the CNP_OPCODE handled
- * via this code path.
- */
- struct rvt_qp *qp = NULL;
- u32 lqpn, rqpn;
- u16 rlid;
- u8 svc_type, sl, sc5;
-
- sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf;
- if (rhf_dc_info(packet->rhf))
- sc5 |= 0x10;
- sl = ibp->sc_to_sl[sc5];
-
- lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
- if (!qp) {
- rcu_read_unlock();
- goto drop;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_UD:
- rlid = 0;
- rqpn = 0;
- svc_type = IB_CC_SVCTYPE_UD;
- break;
- case IB_QPT_UC:
- rlid = be16_to_cpu(rhdr->lrh[3]);
- rqpn = qp->remote_qpn;
- svc_type = IB_CC_SVCTYPE_UC;
- break;
- default:
- goto drop;
- }
-
- process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
- rcu_read_unlock();
- }
-
- packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
- break;
- }
- default:
- break;
- }
-
-drop:
- return;
-}
-
-static inline void init_packet(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet *packet)
-{
- packet->rsize = rcd->rcvhdrqentsize; /* words */
- packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
- packet->rcd = rcd;
- packet->updegr = 0;
- packet->etail = -1;
- packet->rhf_addr = get_rhf_addr(rcd);
- packet->rhf = rhf_to_cpu(packet->rhf_addr);
- packet->rhqoff = rcd->head;
- packet->numpkt = 0;
- packet->rcv_flags = 0;
-}
-
-static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
- struct hfi1_other_headers *ohdr,
- u64 rhf, u32 bth1, struct ib_grh *grh)
-{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u32 rqpn = 0;
- u16 rlid;
- u8 sc5, svc_type;
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- rlid = be16_to_cpu(hdr->lrh[3]);
- rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
- svc_type = IB_CC_SVCTYPE_UD;
- break;
- case IB_QPT_UC:
- rlid = qp->remote_ah_attr.dlid;
- rqpn = qp->remote_qpn;
- svc_type = IB_CC_SVCTYPE_UC;
- break;
- case IB_QPT_RC:
- rlid = qp->remote_ah_attr.dlid;
- rqpn = qp->remote_qpn;
- svc_type = IB_CC_SVCTYPE_RC;
- break;
- default:
- return;
- }
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- if (rhf_dc_info(rhf))
- sc5 |= 0x10;
-
- if (bth1 & HFI1_FECN_SMASK) {
- u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
- u16 dlid = be16_to_cpu(hdr->lrh[1]);
-
- return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh);
- }
-
- if (bth1 & HFI1_BECN_SMASK) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 lqpn = bth1 & RVT_QPN_MASK;
- u8 sl = ibp->sc_to_sl[sc5];
-
- process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
- }
-}
-
-struct ps_mdata {
- struct hfi1_ctxtdata *rcd;
- u32 rsize;
- u32 maxcnt;
- u32 ps_head;
- u32 ps_tail;
- u32 ps_seq;
-};
-
-static inline void init_ps_mdata(struct ps_mdata *mdata,
- struct hfi1_packet *packet)
-{
- struct hfi1_ctxtdata *rcd = packet->rcd;
-
- mdata->rcd = rcd;
- mdata->rsize = packet->rsize;
- mdata->maxcnt = packet->maxcnt;
- mdata->ps_head = packet->rhqoff;
-
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
- mdata->ps_tail = get_rcvhdrtail(rcd);
- if (rcd->ctxt == HFI1_CTRL_CTXT)
- mdata->ps_seq = rcd->seq_cnt;
- else
- mdata->ps_seq = 0; /* not used with DMA_RTAIL */
- } else {
- mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
- mdata->ps_seq = rcd->seq_cnt;
- }
-}
-
-static inline int ps_done(struct ps_mdata *mdata, u64 rhf,
- struct hfi1_ctxtdata *rcd)
-{
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
- return mdata->ps_head == mdata->ps_tail;
- return mdata->ps_seq != rhf_rcv_seq(rhf);
-}
-
-static inline int ps_skip(struct ps_mdata *mdata, u64 rhf,
- struct hfi1_ctxtdata *rcd)
-{
- /*
- * Control context can potentially receive an invalid rhf.
- * Drop such packets.
- */
- if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail))
- return mdata->ps_seq != rhf_rcv_seq(rhf);
-
- return 0;
-}
-
-static inline void update_ps_mdata(struct ps_mdata *mdata,
- struct hfi1_ctxtdata *rcd)
-{
- mdata->ps_head += mdata->rsize;
- if (mdata->ps_head >= mdata->maxcnt)
- mdata->ps_head = 0;
-
- /* Control context must do seq counting */
- if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
- (rcd->ctxt == HFI1_CTRL_CTXT)) {
- if (++mdata->ps_seq > 13)
- mdata->ps_seq = 1;
- }
-}
-
-/*
- * prescan_rxq - search through the receive queue looking for packets
- * containing Excplicit Congestion Notifications (FECNs, or BECNs).
- * When an ECN is found, process the Congestion Notification, and toggle
- * it off.
- * This is declared as a macro to allow quick checking of the port to avoid
- * the overhead of a function call if not enabled.
- */
-#define prescan_rxq(rcd, packet) \
- do { \
- if (rcd->ppd->cc_prescan) \
- __prescan_rxq(packet); \
- } while (0)
-static void __prescan_rxq(struct hfi1_packet *packet)
-{
- struct hfi1_ctxtdata *rcd = packet->rcd;
- struct ps_mdata mdata;
-
- init_ps_mdata(&mdata, packet);
-
- while (1) {
- struct hfi1_devdata *dd = rcd->dd;
- struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
- __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
- dd->rhf_offset;
- struct rvt_qp *qp;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr;
- struct ib_grh *grh = NULL;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- u64 rhf = rhf_to_cpu(rhf_addr);
- u32 etype = rhf_rcv_type(rhf), qpn, bth1;
- int is_ecn = 0;
- u8 lnh;
-
- if (ps_done(&mdata, rhf, rcd))
- break;
-
- if (ps_skip(&mdata, rhf, rcd))
- goto next;
-
- if (etype != RHF_RCV_TYPE_IB)
- goto next;
-
- hdr = (struct hfi1_ib_header *)
- hfi1_get_msgheader(dd, rhf_addr);
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
-
- if (lnh == HFI1_LRH_BTH) {
- ohdr = &hdr->u.oth;
- } else if (lnh == HFI1_LRH_GRH) {
- ohdr = &hdr->u.l.oth;
- grh = &hdr->u.l.grh;
- } else {
- goto next; /* just in case */
- }
- bth1 = be32_to_cpu(ohdr->bth[1]);
- is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
-
- if (!is_ecn)
- goto next;
-
- qpn = bth1 & RVT_QPN_MASK;
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
-
- if (!qp) {
- rcu_read_unlock();
- goto next;
- }
-
- process_ecn(qp, hdr, ohdr, rhf, bth1, grh);
- rcu_read_unlock();
-
- /* turn off BECN, FECN */
- bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
- ohdr->bth[1] = cpu_to_be32(bth1);
-next:
- update_ps_mdata(&mdata, rcd);
- }
-}
-
-static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
-{
- int ret = RCV_PKT_OK;
-
- /* Set up for the next packet */
- packet->rhqoff += packet->rsize;
- if (packet->rhqoff >= packet->maxcnt)
- packet->rhqoff = 0;
-
- packet->numpkt++;
- if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
- if (thread) {
- cond_resched();
- } else {
- ret = RCV_PKT_LIMIT;
- this_cpu_inc(*packet->rcd->dd->rcv_limit);
- }
- }
-
- packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
- packet->rcd->dd->rhf_offset;
- packet->rhf = rhf_to_cpu(packet->rhf_addr);
-
- return ret;
-}
-
-static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
-{
- int ret = RCV_PKT_OK;
-
- packet->hdr = hfi1_get_msgheader(packet->rcd->dd,
- packet->rhf_addr);
- packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
- packet->etype = rhf_rcv_type(packet->rhf);
- /* total length */
- packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
- /* retrieve eager buffer details */
- packet->ebuf = NULL;
- if (rhf_use_egr_bfr(packet->rhf)) {
- packet->etail = rhf_egr_index(packet->rhf);
- packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
- &packet->updegr);
- /*
- * Prefetch the contents of the eager buffer. It is
- * OK to send a negative length to prefetch_range().
- * The +2 is the size of the RHF.
- */
- prefetch_range(packet->ebuf,
- packet->tlen - ((packet->rcd->rcvhdrqentsize -
- (rhf_hdrq_offset(packet->rhf)
- + 2)) * 4));
- }
-
- /*
- * Call a type specific handler for the packet. We
- * should be able to trust that etype won't be beyond
- * the range of valid indexes. If so something is really
- * wrong and we can probably just let things come
- * crashing down. There is no need to eat another
- * comparison in this performance critical code.
- */
- packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
- packet->numpkt++;
-
- /* Set up for the next packet */
- packet->rhqoff += packet->rsize;
- if (packet->rhqoff >= packet->maxcnt)
- packet->rhqoff = 0;
-
- if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) {
- if (thread) {
- cond_resched();
- } else {
- ret = RCV_PKT_LIMIT;
- this_cpu_inc(*packet->rcd->dd->rcv_limit);
- }
- }
-
- packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
- packet->rcd->dd->rhf_offset;
- packet->rhf = rhf_to_cpu(packet->rhf_addr);
-
- return ret;
-}
-
-static inline void process_rcv_update(int last, struct hfi1_packet *packet)
-{
- /*
- * Update head regs etc., every 16 packets, if not last pkt,
- * to help prevent rcvhdrq overflows, when many packets
- * are processed and queue is nearly full.
- * Don't request an interrupt for intermediate updates.
- */
- if (!last && !(packet->numpkt & 0xf)) {
- update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
- packet->etail, 0, 0);
- packet->updegr = 0;
- }
- packet->rcv_flags = 0;
-}
-
-static inline void finish_packet(struct hfi1_packet *packet)
-{
- /*
- * Nothing we need to free for the packet.
- *
- * The only thing we need to do is a final update and call for an
- * interrupt
- */
- update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
- packet->etail, rcv_intr_dynamic, packet->numpkt);
-}
-
-static inline void process_rcv_qp_work(struct hfi1_packet *packet)
-{
- struct hfi1_ctxtdata *rcd;
- struct rvt_qp *qp, *nqp;
-
- rcd = packet->rcd;
- rcd->head = packet->rhqoff;
-
- /*
- * Iterate over all QPs waiting to respond.
- * The list won't change since the IRQ is only run on one CPU.
- */
- list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
- list_del_init(&qp->rspwait);
- if (qp->r_flags & RVT_R_RSP_NAK) {
- qp->r_flags &= ~RVT_R_RSP_NAK;
- hfi1_send_rc_ack(rcd, qp, 0);
- }
- if (qp->r_flags & RVT_R_RSP_SEND) {
- unsigned long flags;
-
- qp->r_flags &= ~RVT_R_RSP_SEND;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_OR_FLUSH_SEND)
- hfi1_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-}
-
-/*
- * Handle receive interrupts when using the no dma rtail option.
- */
-int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
-{
- u32 seq;
- int last = RCV_PKT_OK;
- struct hfi1_packet packet;
-
- init_packet(rcd, &packet);
- seq = rhf_rcv_seq(packet.rhf);
- if (seq != rcd->seq_cnt) {
- last = RCV_PKT_DONE;
- goto bail;
- }
-
- prescan_rxq(rcd, &packet);
-
- while (last == RCV_PKT_OK) {
- last = process_rcv_packet(&packet, thread);
- seq = rhf_rcv_seq(packet.rhf);
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (seq != rcd->seq_cnt)
- last = RCV_PKT_DONE;
- process_rcv_update(last, &packet);
- }
- process_rcv_qp_work(&packet);
-bail:
- finish_packet(&packet);
- return last;
-}
-
-int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
-{
- u32 hdrqtail;
- int last = RCV_PKT_OK;
- struct hfi1_packet packet;
-
- init_packet(rcd, &packet);
- hdrqtail = get_rcvhdrtail(rcd);
- if (packet.rhqoff == hdrqtail) {
- last = RCV_PKT_DONE;
- goto bail;
- }
- smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
-
- prescan_rxq(rcd, &packet);
-
- while (last == RCV_PKT_OK) {
- last = process_rcv_packet(&packet, thread);
- if (packet.rhqoff == hdrqtail)
- last = RCV_PKT_DONE;
- process_rcv_update(last, &packet);
- }
- process_rcv_qp_work(&packet);
-bail:
- finish_packet(&packet);
- return last;
-}
-
-static inline void set_all_nodma_rtail(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
- dd->rcd[i]->do_interrupt =
- &handle_receive_interrupt_nodma_rtail;
-}
-
-static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
- dd->rcd[i]->do_interrupt =
- &handle_receive_interrupt_dma_rtail;
-}
-
-void set_all_slowpath(struct hfi1_devdata *dd)
-{
- int i;
-
- /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
- for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
- dd->rcd[i]->do_interrupt = &handle_receive_interrupt;
-}
-
-static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet packet,
- struct hfi1_devdata *dd)
-{
- struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
- struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
- packet.rhf_addr);
-
- if (hdr2sc(hdr, packet.rhf) != 0xf) {
- int hwstate = read_logical_state(dd);
-
- if (hwstate != LSTATE_ACTIVE) {
- dd_dev_info(dd, "Unexpected link state %d\n", hwstate);
- return 0;
- }
-
- queue_work(rcd->ppd->hfi1_wq, lsaw);
- return 1;
- }
- return 0;
-}
-
-/*
- * handle_receive_interrupt - receive a packet
- * @rcd: the context
- *
- * Called from interrupt handler for errors or receive interrupt.
- * This is the slow path interrupt handler.
- */
-int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
-{
- struct hfi1_devdata *dd = rcd->dd;
- u32 hdrqtail;
- int needset, last = RCV_PKT_OK;
- struct hfi1_packet packet;
- int skip_pkt = 0;
-
- /* Control context will always use the slow path interrupt handler */
- needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
-
- init_packet(rcd, &packet);
-
- if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
- u32 seq = rhf_rcv_seq(packet.rhf);
-
- if (seq != rcd->seq_cnt) {
- last = RCV_PKT_DONE;
- goto bail;
- }
- hdrqtail = 0;
- } else {
- hdrqtail = get_rcvhdrtail(rcd);
- if (packet.rhqoff == hdrqtail) {
- last = RCV_PKT_DONE;
- goto bail;
- }
- smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
-
- /*
- * Control context can potentially receive an invalid
- * rhf. Drop such packets.
- */
- if (rcd->ctxt == HFI1_CTRL_CTXT) {
- u32 seq = rhf_rcv_seq(packet.rhf);
-
- if (seq != rcd->seq_cnt)
- skip_pkt = 1;
- }
- }
-
- prescan_rxq(rcd, &packet);
-
- while (last == RCV_PKT_OK) {
- if (unlikely(dd->do_drop &&
- atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
- DROP_PACKET_ON)) {
- dd->do_drop = 0;
-
- /* On to the next packet */
- packet.rhqoff += packet.rsize;
- packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
- packet.rhqoff +
- dd->rhf_offset;
- packet.rhf = rhf_to_cpu(packet.rhf_addr);
-
- } else if (skip_pkt) {
- last = skip_rcv_packet(&packet, thread);
- skip_pkt = 0;
- } else {
- /* Auto activate link on non-SC15 packet receive */
- if (unlikely(rcd->ppd->host_link_state ==
- HLS_UP_ARMED) &&
- set_armed_to_active(rcd, packet, dd))
- goto bail;
- last = process_rcv_packet(&packet, thread);
- }
-
- if (!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
- u32 seq = rhf_rcv_seq(packet.rhf);
-
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (seq != rcd->seq_cnt)
- last = RCV_PKT_DONE;
- if (needset) {
- dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
- set_all_nodma_rtail(dd);
- needset = 0;
- }
- } else {
- if (packet.rhqoff == hdrqtail)
- last = RCV_PKT_DONE;
- /*
- * Control context can potentially receive an invalid
- * rhf. Drop such packets.
- */
- if (rcd->ctxt == HFI1_CTRL_CTXT) {
- u32 seq = rhf_rcv_seq(packet.rhf);
-
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (!last && (seq != rcd->seq_cnt))
- skip_pkt = 1;
- }
-
- if (needset) {
- dd_dev_info(dd,
- "Switching to DMA_RTAIL\n");
- set_all_dma_rtail(dd);
- needset = 0;
- }
- }
-
- process_rcv_update(last, &packet);
- }
-
- process_rcv_qp_work(&packet);
-
-bail:
- /*
- * Always write head at end, and setup rcv interrupt, even
- * if no packets were processed.
- */
- finish_packet(&packet);
- return last;
-}
-
-/*
- * We may discover in the interrupt that the hardware link state has
- * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
- * and we need to update the driver's notion of the link state. We cannot
- * run set_link_state from interrupt context, so we queue this function on
- * a workqueue.
- *
- * We delay the regular interrupt processing until after the state changes
- * so that the link will be in the correct state by the time any application
- * we wake up attempts to send a reply to any message it received.
- * (Subsequent receive interrupts may possibly force the wakeup before we
- * update the link state.)
- *
- * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
- * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
- * so we're safe from use-after-free of the rcd.
- */
-void receive_interrupt_work(struct work_struct *work)
-{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- linkstate_active_work);
- struct hfi1_devdata *dd = ppd->dd;
- int i;
-
- /* Received non-SC15 packet implies neighbor_normal */
- ppd->neighbor_normal = 1;
- set_link_state(ppd, HLS_UP_ACTIVE);
-
- /*
- * Interrupt all kernel contexts that could have had an
- * interrupt during auto activation.
- */
- for (i = HFI1_CTRL_CTXT; i < dd->first_user_ctxt; i++)
- force_recv_intr(dd->rcd[i]);
-}
-
-/*
- * Convert a given MTU size to the on-wire MAD packet enumeration.
- * Return -1 if the size is invalid.
- */
-int mtu_to_enum(u32 mtu, int default_if_bad)
-{
- switch (mtu) {
- case 0: return OPA_MTU_0;
- case 256: return OPA_MTU_256;
- case 512: return OPA_MTU_512;
- case 1024: return OPA_MTU_1024;
- case 2048: return OPA_MTU_2048;
- case 4096: return OPA_MTU_4096;
- case 8192: return OPA_MTU_8192;
- case 10240: return OPA_MTU_10240;
- }
- return default_if_bad;
-}
-
-u16 enum_to_mtu(int mtu)
-{
- switch (mtu) {
- case OPA_MTU_0: return 0;
- case OPA_MTU_256: return 256;
- case OPA_MTU_512: return 512;
- case OPA_MTU_1024: return 1024;
- case OPA_MTU_2048: return 2048;
- case OPA_MTU_4096: return 4096;
- case OPA_MTU_8192: return 8192;
- case OPA_MTU_10240: return 10240;
- default: return 0xffff;
- }
-}
-
-/*
- * set_mtu - set the MTU
- * @ppd: the per port data
- *
- * We can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size. We do not deal with what happens
- * to programs that are already running when the size changes.
- */
-int set_mtu(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- int i, drain, ret = 0, is_up = 0;
-
- ppd->ibmtu = 0;
- for (i = 0; i < ppd->vls_supported; i++)
- if (ppd->ibmtu < dd->vld[i].mtu)
- ppd->ibmtu = dd->vld[i].mtu;
- ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
-
- mutex_lock(&ppd->hls_lock);
- if (ppd->host_link_state == HLS_UP_INIT ||
- ppd->host_link_state == HLS_UP_ARMED ||
- ppd->host_link_state == HLS_UP_ACTIVE)
- is_up = 1;
-
- drain = !is_ax(dd) && is_up;
-
- if (drain)
- /*
- * MTU is specified per-VL. To ensure that no packet gets
- * stuck (due, e.g., to the MTU for the packet's VL being
- * reduced), empty the per-VL FIFOs before adjusting MTU.
- */
- ret = stop_drain_data_vls(dd);
-
- if (ret) {
- dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
- __func__);
- goto err;
- }
-
- hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
-
- if (drain)
- open_fill_data_vls(dd); /* reopen all VLs */
-
-err:
- mutex_unlock(&ppd->hls_lock);
-
- return ret;
-}
-
-int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- ppd->lid = lid;
- ppd->lmc = lmc;
- hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
-
- dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
-
- return 0;
-}
-
-void shutdown_led_override(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- /*
- * This pairs with the memory barrier in hfi1_start_led_override to
- * ensure that we read the correct state of LED beaconing represented
- * by led_override_timer_active
- */
- smp_rmb();
- if (atomic_read(&ppd->led_override_timer_active)) {
- del_timer_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- /* Ensure the atomic_set is visible to all CPUs */
- smp_wmb();
- }
-
- /* Hand control of the LED to the DC for normal operation */
- write_csr(dd, DCC_CFG_LED_CNTRL, 0);
-}
-
-static void run_led_override(unsigned long opaque)
-{
- struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
- struct hfi1_devdata *dd = ppd->dd;
- unsigned long timeout;
- int phase_idx;
-
- if (!(dd->flags & HFI1_INITTED))
- return;
-
- phase_idx = ppd->led_override_phase & 1;
-
- setextled(dd, phase_idx);
-
- timeout = ppd->led_override_vals[phase_idx];
-
- /* Set up for next phase */
- ppd->led_override_phase = !ppd->led_override_phase;
-
- mod_timer(&ppd->led_override_timer, jiffies + timeout);
-}
-
-/*
- * To have the LED blink in a particular pattern, provide timeon and timeoff
- * in milliseconds.
- * To turn off custom blinking and return to normal operation, use
- * shutdown_led_override()
- */
-void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
- unsigned int timeoff)
-{
- if (!(ppd->dd->flags & HFI1_INITTED))
- return;
-
- /* Convert to jiffies for direct use in timer */
- ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
- ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
-
- /* Arbitrarily start from LED on phase */
- ppd->led_override_phase = 1;
-
- /*
- * If the timer has not already been started, do so. Use a "quick"
- * timeout so the handler will be called soon to look at our request.
- */
- if (!timer_pending(&ppd->led_override_timer)) {
- setup_timer(&ppd->led_override_timer, run_led_override,
- (unsigned long)ppd);
- ppd->led_override_timer.expires = jiffies + 1;
- add_timer(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 1);
- /* Ensure the atomic_set is visible to all CPUs */
- smp_wmb();
- }
-}
-
-/**
- * hfi1_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload). We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize. For
- * now, we only allow this if no user contexts are open that use chip resources
- */
-int hfi1_reset_device(int unit)
-{
- int ret, i;
- struct hfi1_devdata *dd = hfi1_lookup(unit);
- struct hfi1_pportdata *ppd;
- unsigned long flags;
- int pidx;
-
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- dd_dev_info(dd, "Reset on unit %u requested\n", unit);
-
- if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
- dd_dev_info(dd,
- "Invalid unit number %u or not initialized or not present\n",
- unit);
- ret = -ENXIO;
- goto bail;
- }
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- if (dd->rcd)
- for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
- if (!dd->rcd[i] || !dd->rcd[i]->cnt)
- continue;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- ret = -EBUSY;
- goto bail;
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- shutdown_led_override(ppd);
- }
- if (dd->flags & HFI1_HAS_SEND_DMA)
- sdma_exit(dd);
-
- hfi1_reset_cpu_counters(dd);
-
- ret = hfi1_init(dd, 1);
-
- if (ret)
- dd_dev_err(dd,
- "Reinitialize unit %u after reset failed with %d\n",
- unit, ret);
- else
- dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
- unit);
-
-bail:
- return ret;
-}
-
-void handle_eflags(struct hfi1_packet *packet)
-{
- struct hfi1_ctxtdata *rcd = packet->rcd;
- u32 rte = rhf_rcv_type_err(packet->rhf);
-
- rcv_hdrerr(rcd, rcd->ppd, packet);
- if (rhf_err_flags(packet->rhf))
- dd_dev_err(rcd->dd,
- "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
- rcd->ctxt, packet->rhf,
- packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
- packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
- packet->rhf & RHF_DC_ERR ? "dc " : "",
- packet->rhf & RHF_TID_ERR ? "tid " : "",
- packet->rhf & RHF_LEN_ERR ? "len " : "",
- packet->rhf & RHF_ECC_ERR ? "ecc " : "",
- packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
- packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
- rte);
-}
-
-/*
- * The following functions are called by the interrupt handler. They are type
- * specific handlers for each packet type.
- */
-int process_receive_ib(struct hfi1_packet *packet)
-{
- trace_hfi1_rcvhdr(packet->rcd->ppd->dd,
- packet->rcd->ctxt,
- rhf_err_flags(packet->rhf),
- RHF_RCV_TYPE_IB,
- packet->hlen,
- packet->tlen,
- packet->updegr,
- rhf_egr_index(packet->rhf));
-
- if (unlikely(rhf_err_flags(packet->rhf))) {
- handle_eflags(packet);
- return RHF_RCV_CONTINUE;
- }
-
- hfi1_ib_rcv(packet);
- return RHF_RCV_CONTINUE;
-}
-
-int process_receive_bypass(struct hfi1_packet *packet)
-{
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
-
- dd_dev_err(packet->rcd->dd,
- "Bypass packets are not supported in normal operation. Dropping\n");
- return RHF_RCV_CONTINUE;
-}
-
-int process_receive_error(struct hfi1_packet *packet)
-{
- handle_eflags(packet);
-
- if (unlikely(rhf_err_flags(packet->rhf)))
- dd_dev_err(packet->rcd->dd,
- "Unhandled error packet received. Dropping.\n");
-
- return RHF_RCV_CONTINUE;
-}
-
-int kdeth_process_expected(struct hfi1_packet *packet)
-{
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
-
- dd_dev_err(packet->rcd->dd,
- "Unhandled expected packet received. Dropping.\n");
- return RHF_RCV_CONTINUE;
-}
-
-int kdeth_process_eager(struct hfi1_packet *packet)
-{
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
-
- dd_dev_err(packet->rcd->dd,
- "Unhandled eager packet received. Dropping.\n");
- return RHF_RCV_CONTINUE;
-}
-
-int process_receive_invalid(struct hfi1_packet *packet)
-{
- dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
- rhf_rcv_type(packet->rhf));
- return RHF_RCV_CONTINUE;
-}
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/staging/rdma/hfi1/efivar.c
deleted file mode 100644
index 106349fc1..000000000
--- a/drivers/staging/rdma/hfi1/efivar.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "efivar.h"
-
-/* GUID for HFI1 variables in EFI */
-#define HFI1_EFIVAR_GUID EFI_GUID(0xc50a953e, 0xa8b2, 0x42a6, \
- 0xbf, 0x89, 0xd3, 0x33, 0xa6, 0xe9, 0xe6, 0xd4)
-/* largest EFI data size we expect */
-#define EFI_DATA_SIZE 4096
-
-/*
- * Read the named EFI variable. Return the size of the actual data in *size
- * and a kmalloc'ed buffer in *return_data. The caller must free the
- * data. It is guaranteed that *return_data will be NULL and *size = 0
- * if this routine fails.
- *
- * Return 0 on success, -errno on failure.
- */
-static int read_efi_var(const char *name, unsigned long *size,
- void **return_data)
-{
- efi_status_t status;
- efi_char16_t *uni_name;
- efi_guid_t guid;
- unsigned long temp_size;
- void *temp_buffer;
- void *data;
- int i;
- int ret;
-
- /* set failure return values */
- *size = 0;
- *return_data = NULL;
-
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
- return -EOPNOTSUPP;
-
- uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL);
- temp_buffer = kzalloc(EFI_DATA_SIZE, GFP_KERNEL);
-
- if (!uni_name || !temp_buffer) {
- ret = -ENOMEM;
- goto fail;
- }
-
- /* input: the size of the buffer */
- temp_size = EFI_DATA_SIZE;
-
- /* convert ASCII to unicode - it is a 1:1 mapping */
- for (i = 0; name[i]; i++)
- uni_name[i] = name[i];
-
- /* need a variable for our GUID */
- guid = HFI1_EFIVAR_GUID;
-
- /* call into EFI runtime services */
- status = efi.get_variable(
- uni_name,
- &guid,
- NULL,
- &temp_size,
- temp_buffer);
-
- /*
- * It would be nice to call efi_status_to_err() here, but that
- * is in the EFIVAR_FS code and may not be compiled in.
- * However, even that is insufficient since it does not cover
- * EFI_BUFFER_TOO_SMALL which could be an important return.
- * For now, just split out succces or not found.
- */
- ret = status == EFI_SUCCESS ? 0 :
- status == EFI_NOT_FOUND ? -ENOENT :
- -EINVAL;
- if (ret)
- goto fail;
-
- /*
- * We have successfully read the EFI variable into our
- * temporary buffer. Now allocate a correctly sized
- * buffer.
- */
- data = kmemdup(temp_buffer, temp_size, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto fail;
- }
-
- *size = temp_size;
- *return_data = data;
-
-fail:
- kfree(uni_name);
- kfree(temp_buffer);
-
- return ret;
-}
-
-/*
- * Read an HFI1 EFI variable of the form:
- * <PCIe address>-<kind>
- * Return an kalloc'ed array and size of the data.
- *
- * Returns 0 on success, -errno on failure.
- */
-int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
- unsigned long *size, void **return_data)
-{
- char name[64];
-
- /* create a common prefix */
- snprintf(name, sizeof(name), "%04x:%02x:%02x.%x-%s",
- pci_domain_nr(dd->pcidev->bus),
- dd->pcidev->bus->number,
- PCI_SLOT(dd->pcidev->devfn),
- PCI_FUNC(dd->pcidev->devfn),
- kind);
-
- return read_efi_var(name, size, return_data);
-}
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/staging/rdma/hfi1/efivar.h
deleted file mode 100644
index 94e9e70de..000000000
--- a/drivers/staging/rdma/hfi1/efivar.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#ifndef _HFI1_EFIVAR_H
-#define _HFI1_EFIVAR_H
-
-#include <linux/efi.h>
-
-#include "hfi.h"
-
-int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
- unsigned long *size, void **return_data);
-
-#endif /* _HFI1_EFIVAR_H */
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
deleted file mode 100644
index bd8771570..000000000
--- a/drivers/staging/rdma/hfi1/eprom.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/delay.h>
-#include "hfi.h"
-#include "common.h"
-#include "eprom.h"
-
-/*
- * The EPROM is logically divided into three partitions:
- * partition 0: the first 128K, visible from PCI ROM BAR
- * partition 1: 4K config file (sector size)
- * partition 2: the rest
- */
-#define P0_SIZE (128 * 1024)
-#define P1_SIZE (4 * 1024)
-#define P1_START P0_SIZE
-#define P2_START (P0_SIZE + P1_SIZE)
-
-/* erase sizes supported by the controller */
-#define SIZE_4KB (4 * 1024)
-#define MASK_4KB (SIZE_4KB - 1)
-
-#define SIZE_32KB (32 * 1024)
-#define MASK_32KB (SIZE_32KB - 1)
-
-#define SIZE_64KB (64 * 1024)
-#define MASK_64KB (SIZE_64KB - 1)
-
-/* controller page size, in bytes */
-#define EP_PAGE_SIZE 256
-#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
-
-/* controller commands */
-#define CMD_SHIFT 24
-#define CMD_NOP (0)
-#define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr)
-#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
-#define CMD_READ_SR1 ((0x05 << CMD_SHIFT))
-#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr)
-#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
-#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT))
-#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT))
-#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr)
-
-/* controller interface speeds */
-#define EP_SPEED_FULL 0x2 /* full speed */
-
-/* controller status register 1 bits */
-#define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */
-
-/* sleep length while waiting for controller */
-#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */
-#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US))
-
-/* GPIO pins */
-#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */
-
-/*
- * How long to wait for the EPROM to become available, in ms.
- * The spec 32 Mb EPROM takes around 40s to erase then write.
- * Double it for safety.
- */
-#define EPROM_TIMEOUT 80000 /* ms */
-
-/*
- * Turn on external enable line that allows writing on the flash.
- */
-static void write_enable(struct hfi1_devdata *dd)
-{
- /* raise signal */
- write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
- /* raise enable */
- write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
-}
-
-/*
- * Turn off external enable line that allows writing on the flash.
- */
-static void write_disable(struct hfi1_devdata *dd)
-{
- /* lower signal */
- write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
- /* lower enable */
- write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
-}
-
-/*
- * Wait for the device to become not busy. Must be called after all
- * write or erase operations.
- */
-static int wait_for_not_busy(struct hfi1_devdata *dd)
-{
- unsigned long count = 0;
- u64 reg;
- int ret = 0;
-
- /* starts page mode */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1);
- while (1) {
- udelay(WAIT_SLEEP_US);
- usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5);
- count++;
- reg = read_csr(dd, ASIC_EEP_DATA);
- if ((reg & SR1_BUSY) == 0)
- break;
- /* 200s is the largest time for a 128Mb device */
- if (count > COUNT_DELAY_SEC(200)) {
- dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n");
- ret = -ETIMEDOUT;
- break; /* break, not goto - must stop page mode */
- }
- }
-
- /* stop page mode with a NOP */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP);
-
- return ret;
-}
-
-/*
- * Read the device ID from the SPI controller.
- */
-static u32 read_device_id(struct hfi1_devdata *dd)
-{
- /* read the Manufacture Device ID */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID);
- return (u32)read_csr(dd, ASIC_EEP_DATA);
-}
-
-/*
- * Erase the whole flash.
- */
-static int erase_chip(struct hfi1_devdata *dd)
-{
- int ret;
-
- write_enable(dd);
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE);
- ret = wait_for_not_busy(dd);
-
- write_disable(dd);
-
- return ret;
-}
-
-/*
- * Erase a range.
- */
-static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
-{
- u32 end = start + len;
- int ret = 0;
-
- if (end < start)
- return -EINVAL;
-
- /* check the end points for the minimum erase */
- if ((start & MASK_4KB) || (end & MASK_4KB)) {
- dd_dev_err(dd,
- "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
- __func__, start, end);
- return -EINVAL;
- }
-
- write_enable(dd);
-
- while (start < end) {
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- /* check in order of largest to smallest */
- if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) {
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_64KB(start));
- start += SIZE_64KB;
- } else if (((start & MASK_32KB) == 0) &&
- (start + SIZE_32KB <= end)) {
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_32KB(start));
- start += SIZE_32KB;
- } else { /* 4KB will work */
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_4KB(start));
- start += SIZE_4KB;
- }
- ret = wait_for_not_busy(dd);
- if (ret)
- goto done;
- }
-
-done:
- write_disable(dd);
-
- return ret;
-}
-
-/*
- * Read a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
-{
- int i;
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
- for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++)
- result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
-}
-
-/*
- * Read length bytes starting at offset. Copy to user address addr.
- */
-static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
- u32 offset;
- u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
- int ret = 0;
-
- /* reject anything not on an EPROM page boundary */
- if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
- return -EINVAL;
-
- for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
- read_page(dd, start + offset, buffer);
- if (copy_to_user((void __user *)(addr + offset),
- buffer, EP_PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-/*
- * Write a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
-{
- int i;
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- write_csr(dd, ASIC_EEP_DATA, data[0]);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
- for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++)
- write_csr(dd, ASIC_EEP_DATA, data[i]);
- /* will close the open page */
- return wait_for_not_busy(dd);
-}
-
-/*
- * Write length bytes starting at offset. Read from user address addr.
- */
-static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
- u32 offset;
- u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
- int ret = 0;
-
- /* reject anything not on an EPROM page boundary */
- if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
- return -EINVAL;
-
- write_enable(dd);
-
- for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
- if (copy_from_user(buffer, (void __user *)(addr + offset),
- EP_PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- ret = write_page(dd, start + offset, buffer);
- if (ret)
- goto done;
- }
-
-done:
- write_disable(dd);
- return ret;
-}
-
-/* convert an range composite to a length, in bytes */
-static inline u32 extract_rlen(u32 composite)
-{
- return (composite & 0xffff) * EP_PAGE_SIZE;
-}
-
-/* convert an range composite to a start, in bytes */
-static inline u32 extract_rstart(u32 composite)
-{
- return (composite >> 16) * EP_PAGE_SIZE;
-}
-
-/*
- * Perform the given operation on the EPROM. Called from user space. The
- * user credentials have already been checked.
- *
- * Return 0 on success, -ERRNO on error
- */
-int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
-{
- struct hfi1_devdata *dd;
- u32 dev_id;
- u32 rlen; /* range length */
- u32 rstart; /* range start */
- int i_minor;
- int ret = 0;
-
- /*
- * Map the device file to device data using the relative minor.
- * The device file minor number is the unit number + 1. 0 is
- * the generic device file - reject it.
- */
- i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
- if (i_minor <= 0)
- return -EINVAL;
- dd = hfi1_lookup(i_minor - 1);
- if (!dd) {
- pr_err("%s: cannot find unit %d!\n", __func__, i_minor);
- return -EINVAL;
- }
-
- /* some devices do not have an EPROM */
- if (!dd->eprom_available)
- return -EOPNOTSUPP;
-
- ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
- if (ret) {
- dd_dev_err(dd, "%s: unable to acquire EPROM resource\n",
- __func__);
- goto done_asic;
- }
-
- dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
- __func__, cmd->type, cmd->len, cmd->addr);
-
- switch (cmd->type) {
- case HFI1_CMD_EP_INFO:
- if (cmd->len != sizeof(u32)) {
- ret = -ERANGE;
- break;
- }
- dev_id = read_device_id(dd);
- /* addr points to a u32 user buffer */
- if (copy_to_user((void __user *)cmd->addr, &dev_id,
- sizeof(u32)))
- ret = -EFAULT;
- break;
-
- case HFI1_CMD_EP_ERASE_CHIP:
- ret = erase_chip(dd);
- break;
-
- case HFI1_CMD_EP_ERASE_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = erase_range(dd, rstart, rlen);
- break;
-
- case HFI1_CMD_EP_READ_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = read_length(dd, rstart, rlen, cmd->addr);
- break;
-
- case HFI1_CMD_EP_WRITE_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = write_length(dd, rstart, rlen, cmd->addr);
- break;
-
- default:
- dd_dev_err(dd, "%s: unexpected command %d\n",
- __func__, cmd->type);
- ret = -EINVAL;
- break;
- }
-
- release_chip_resource(dd, CR_EPROM);
-done_asic:
- return ret;
-}
-
-/*
- * Initialize the EPROM handler.
- */
-int eprom_init(struct hfi1_devdata *dd)
-{
- int ret = 0;
-
- /* only the discrete chip has an EPROM */
- if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
- return 0;
-
- /*
- * It is OK if both HFIs reset the EPROM as long as they don't
- * do it at the same time.
- */
- ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
- if (ret) {
- dd_dev_err(dd,
- "%s: unable to acquire EPROM resource, no EPROM support\n",
- __func__);
- goto done_asic;
- }
-
- /* reset EPROM to be sure it is in a good state */
-
- /* set reset */
- write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
- /* clear reset, set speed */
- write_csr(dd, ASIC_EEP_CTL_STAT,
- EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
-
- /* wake the device with command "release powerdown NoID" */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
-
- dd->eprom_available = true;
- release_chip_resource(dd, CR_EPROM);
-done_asic:
- return ret;
-}
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/staging/rdma/hfi1/eprom.h
deleted file mode 100644
index d41f0b1af..000000000
--- a/drivers/staging/rdma/hfi1/eprom.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-struct hfi1_cmd;
-struct hfi1_devdata;
-
-int eprom_init(struct hfi1_devdata *dd);
-int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd);
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
deleted file mode 100644
index c1c5bf82a..000000000
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ /dev/null
@@ -1,1773 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/vmalloc.h>
-#include <linux/io.h>
-
-#include <rdma/ib.h>
-
-#include "hfi.h"
-#include "pio.h"
-#include "device.h"
-#include "common.h"
-#include "trace.h"
-#include "user_sdma.h"
-#include "user_exp_rcv.h"
-#include "eprom.h"
-#include "aspm.h"
-#include "mmu_rb.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-
-#define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
-
-/*
- * File operation functions
- */
-static int hfi1_file_open(struct inode *, struct file *);
-static int hfi1_file_close(struct inode *, struct file *);
-static ssize_t hfi1_file_write(struct file *, const char __user *,
- size_t, loff_t *);
-static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
-static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
-static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
-
-static u64 kvirt_to_phys(void *);
-static int assign_ctxt(struct file *, struct hfi1_user_info *);
-static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
-static int user_init(struct file *);
-static int get_ctxt_info(struct file *, void __user *, __u32);
-static int get_base_info(struct file *, void __user *, __u32);
-static int setup_ctxt(struct file *);
-static int setup_subctxt(struct hfi1_ctxtdata *);
-static int get_user_context(struct file *, struct hfi1_user_info *,
- int, unsigned);
-static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
-static int allocate_ctxt(struct file *, struct hfi1_devdata *,
- struct hfi1_user_info *);
-static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
-static unsigned int poll_next(struct file *, struct poll_table_struct *);
-static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
-static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
-static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
-static int vma_fault(struct vm_area_struct *, struct vm_fault *);
-
-static const struct file_operations hfi1_file_ops = {
- .owner = THIS_MODULE,
- .write = hfi1_file_write,
- .write_iter = hfi1_write_iter,
- .open = hfi1_file_open,
- .release = hfi1_file_close,
- .poll = hfi1_poll,
- .mmap = hfi1_file_mmap,
- .llseek = noop_llseek,
-};
-
-static struct vm_operations_struct vm_ops = {
- .fault = vma_fault,
-};
-
-/*
- * Types of memories mapped into user processes' space
- */
-enum mmap_types {
- PIO_BUFS = 1,
- PIO_BUFS_SOP,
- PIO_CRED,
- RCV_HDRQ,
- RCV_EGRBUF,
- UREGS,
- EVENTS,
- STATUS,
- RTAIL,
- SUBCTXT_UREGS,
- SUBCTXT_RCV_HDRQ,
- SUBCTXT_EGRBUF,
- SDMA_COMP
-};
-
-/*
- * Masks and offsets defining the mmap tokens
- */
-#define HFI1_MMAP_OFFSET_MASK 0xfffULL
-#define HFI1_MMAP_OFFSET_SHIFT 0
-#define HFI1_MMAP_SUBCTXT_MASK 0xfULL
-#define HFI1_MMAP_SUBCTXT_SHIFT 12
-#define HFI1_MMAP_CTXT_MASK 0xffULL
-#define HFI1_MMAP_CTXT_SHIFT 16
-#define HFI1_MMAP_TYPE_MASK 0xfULL
-#define HFI1_MMAP_TYPE_SHIFT 24
-#define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
-#define HFI1_MMAP_MAGIC_SHIFT 32
-
-#define HFI1_MMAP_MAGIC 0xdabbad00
-
-#define HFI1_MMAP_TOKEN_SET(field, val) \
- (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
-#define HFI1_MMAP_TOKEN_GET(field, token) \
- (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
-#define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
- (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
- HFI1_MMAP_TOKEN_SET(TYPE, type) | \
- HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
- HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
- HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
-
-#define dbg(fmt, ...) \
- pr_info(fmt, ##__VA_ARGS__)
-
-static inline int is_valid_mmap(u64 token)
-{
- return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
-}
-
-static int hfi1_file_open(struct inode *inode, struct file *fp)
-{
- /* The real work is performed later in assign_ctxt() */
- fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
- if (fp->private_data) /* no cpu affinity by default */
- ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
- return fp->private_data ? 0 : -ENOMEM;
-}
-
-static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
- size_t count, loff_t *offset)
-{
- const struct hfi1_cmd __user *ucmd;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_cmd cmd;
- struct hfi1_user_info uinfo;
- struct hfi1_tid_info tinfo;
- unsigned long addr;
- ssize_t consumed = 0, copy = 0, ret = 0;
- void *dest = NULL;
- __u64 user_val = 0;
- int uctxt_required = 1;
- int must_be_root = 0;
-
- /* FIXME: This interface cannot continue out of staging */
- if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
- return -EACCES;
-
- if (count < sizeof(cmd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- ucmd = (const struct hfi1_cmd __user *)data;
- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed = sizeof(cmd);
-
- switch (cmd.type) {
- case HFI1_CMD_ASSIGN_CTXT:
- uctxt_required = 0; /* assigned user context not required */
- copy = sizeof(uinfo);
- dest = &uinfo;
- break;
- case HFI1_CMD_SDMA_STATUS_UPD:
- case HFI1_CMD_CREDIT_UPD:
- copy = 0;
- break;
- case HFI1_CMD_TID_UPDATE:
- case HFI1_CMD_TID_FREE:
- case HFI1_CMD_TID_INVAL_READ:
- copy = sizeof(tinfo);
- dest = &tinfo;
- break;
- case HFI1_CMD_USER_INFO:
- case HFI1_CMD_RECV_CTRL:
- case HFI1_CMD_POLL_TYPE:
- case HFI1_CMD_ACK_EVENT:
- case HFI1_CMD_CTXT_INFO:
- case HFI1_CMD_SET_PKEY:
- case HFI1_CMD_CTXT_RESET:
- copy = 0;
- user_val = cmd.addr;
- break;
- case HFI1_CMD_EP_INFO:
- case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_RANGE:
- case HFI1_CMD_EP_READ_RANGE:
- case HFI1_CMD_EP_WRITE_RANGE:
- uctxt_required = 0; /* assigned user context not required */
- must_be_root = 1; /* validate user */
- copy = 0;
- break;
- default:
- ret = -EINVAL;
- goto bail;
- }
-
- /* If the command comes with user data, copy it. */
- if (copy) {
- if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
- ret = -EFAULT;
- goto bail;
- }
- consumed += copy;
- }
-
- /*
- * Make sure there is a uctxt when needed.
- */
- if (uctxt_required && !uctxt) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* only root can do these operations */
- if (must_be_root && !capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto bail;
- }
-
- switch (cmd.type) {
- case HFI1_CMD_ASSIGN_CTXT:
- ret = assign_ctxt(fp, &uinfo);
- if (ret < 0)
- goto bail;
- ret = setup_ctxt(fp);
- if (ret)
- goto bail;
- ret = user_init(fp);
- break;
- case HFI1_CMD_CTXT_INFO:
- ret = get_ctxt_info(fp, (void __user *)(unsigned long)
- user_val, cmd.len);
- break;
- case HFI1_CMD_USER_INFO:
- ret = get_base_info(fp, (void __user *)(unsigned long)
- user_val, cmd.len);
- break;
- case HFI1_CMD_SDMA_STATUS_UPD:
- break;
- case HFI1_CMD_CREDIT_UPD:
- if (uctxt && uctxt->sc)
- sc_return_credits(uctxt->sc);
- break;
- case HFI1_CMD_TID_UPDATE:
- ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
- if (!ret) {
- /*
- * Copy the number of tidlist entries we used
- * and the length of the buffer we registered.
- * These fields are adjacent in the structure so
- * we can copy them at the same time.
- */
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt) +
- sizeof(tinfo.length)))
- ret = -EFAULT;
- }
- break;
- case HFI1_CMD_TID_INVAL_READ:
- ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
- if (ret)
- break;
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- ret = -EFAULT;
- break;
- case HFI1_CMD_TID_FREE:
- ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
- if (ret)
- break;
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- ret = -EFAULT;
- break;
- case HFI1_CMD_RECV_CTRL:
- ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
- break;
- case HFI1_CMD_POLL_TYPE:
- uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
- break;
- case HFI1_CMD_ACK_EVENT:
- ret = user_event_ack(uctxt, fd->subctxt, user_val);
- break;
- case HFI1_CMD_SET_PKEY:
- if (HFI1_CAP_IS_USET(PKEY_CHECK))
- ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
- else
- ret = -EPERM;
- break;
- case HFI1_CMD_CTXT_RESET: {
- struct send_context *sc;
- struct hfi1_devdata *dd;
-
- if (!uctxt || !uctxt->dd || !uctxt->sc) {
- ret = -EINVAL;
- break;
- }
- /*
- * There is no protection here. User level has to
- * guarantee that no one will be writing to the send
- * context while it is being re-initialized.
- * If user level breaks that guarantee, it will break
- * it's own context and no one else's.
- */
- dd = uctxt->dd;
- sc = uctxt->sc;
- /*
- * Wait until the interrupt handler has marked the
- * context as halted or frozen. Report error if we time
- * out.
- */
- wait_event_interruptible_timeout(
- sc->halt_wait, (sc->flags & SCF_HALTED),
- msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (!(sc->flags & SCF_HALTED)) {
- ret = -ENOLCK;
- break;
- }
- /*
- * If the send context was halted due to a Freeze,
- * wait until the device has been "unfrozen" before
- * resetting the context.
- */
- if (sc->flags & SCF_FROZEN) {
- wait_event_interruptible_timeout(
- dd->event_queue,
- !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
- msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (dd->flags & HFI1_FROZEN) {
- ret = -ENOLCK;
- break;
- }
- if (dd->flags & HFI1_FORCED_FREEZE) {
- /*
- * Don't allow context reset if we are into
- * forced freeze
- */
- ret = -ENODEV;
- break;
- }
- sc_disable(sc);
- ret = sc_enable(sc);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
- uctxt->ctxt);
- } else {
- ret = sc_restart(sc);
- }
- if (!ret)
- sc_return_credits(sc);
- break;
- }
- case HFI1_CMD_EP_INFO:
- case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_RANGE:
- case HFI1_CMD_EP_READ_RANGE:
- case HFI1_CMD_EP_WRITE_RANGE:
- ret = handle_eprom_command(fp, &cmd);
- break;
- }
-
- if (ret >= 0)
- ret = consumed;
-bail:
- return ret;
-}
-
-static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
-{
- struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
- struct hfi1_user_sdma_comp_q *cq = fd->cq;
- int ret = 0, done = 0, reqs = 0;
- unsigned long dim = from->nr_segs;
-
- if (!cq || !pq) {
- ret = -EIO;
- goto done;
- }
-
- if (!iter_is_iovec(from) || !dim) {
- ret = -EINVAL;
- goto done;
- }
-
- hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
- fd->uctxt->ctxt, fd->subctxt, dim);
-
- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
- ret = -ENOSPC;
- goto done;
- }
-
- while (dim) {
- unsigned long count = 0;
-
- ret = hfi1_user_sdma_process_request(
- kiocb->ki_filp, (struct iovec *)(from->iov + done),
- dim, &count);
- if (ret)
- goto done;
- dim -= count;
- done += count;
- reqs++;
- }
-done:
- return ret ? ret : reqs;
-}
-
-static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd;
- unsigned long flags, pfn;
- u64 token = vma->vm_pgoff << PAGE_SHIFT,
- memaddr = 0;
- u8 subctxt, mapio = 0, vmf = 0, type;
- ssize_t memlen = 0;
- int ret = 0;
- u16 ctxt;
-
- if (!is_valid_mmap(token) || !uctxt ||
- !(vma->vm_flags & VM_SHARED)) {
- ret = -EINVAL;
- goto done;
- }
- dd = uctxt->dd;
- ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
- subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
- type = HFI1_MMAP_TOKEN_GET(TYPE, token);
- if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
- ret = -EINVAL;
- goto done;
- }
-
- flags = vma->vm_flags;
-
- switch (type) {
- case PIO_BUFS:
- case PIO_BUFS_SOP:
- memaddr = ((dd->physaddr + TXE_PIO_SEND) +
- /* chip pio base */
- (uctxt->sc->hw_context * BIT(16))) +
- /* 64K PIO space / ctxt */
- (type == PIO_BUFS_SOP ?
- (TXE_PIO_SIZE / 2) : 0); /* sop? */
- /*
- * Map only the amount allocated to the context, not the
- * entire available context's PIO space.
- */
- memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
- flags &= ~VM_MAYREAD;
- flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- mapio = 1;
- break;
- case PIO_CRED:
- if (flags & VM_WRITE) {
- ret = -EPERM;
- goto done;
- }
- /*
- * The credit return location for this context could be on the
- * second or third page allocated for credit returns (if number
- * of enabled contexts > 64 and 128 respectively).
- */
- memaddr = dd->cr_base[uctxt->numa_id].pa +
- (((u64)uctxt->sc->hw_free -
- (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
- memlen = PAGE_SIZE;
- flags &= ~VM_MAYWRITE;
- flags |= VM_DONTCOPY | VM_DONTEXPAND;
- /*
- * The driver has already allocated memory for credit
- * returns and programmed it into the chip. Has that
- * memory been flagged as non-cached?
- */
- /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
- mapio = 1;
- break;
- case RCV_HDRQ:
- memaddr = uctxt->rcvhdrq_phys;
- memlen = uctxt->rcvhdrq_size;
- break;
- case RCV_EGRBUF: {
- unsigned long addr;
- int i;
- /*
- * The RcvEgr buffer need to be handled differently
- * as multiple non-contiguous pages need to be mapped
- * into the user process.
- */
- memlen = uctxt->egrbufs.size;
- if ((vma->vm_end - vma->vm_start) != memlen) {
- dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
- (vma->vm_end - vma->vm_start), memlen);
- ret = -EINVAL;
- goto done;
- }
- if (vma->vm_flags & VM_WRITE) {
- ret = -EPERM;
- goto done;
- }
- vma->vm_flags &= ~VM_MAYWRITE;
- addr = vma->vm_start;
- for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
- ret = remap_pfn_range(
- vma, addr,
- uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
- uctxt->egrbufs.buffers[i].len,
- vma->vm_page_prot);
- if (ret < 0)
- goto done;
- addr += uctxt->egrbufs.buffers[i].len;
- }
- ret = 0;
- goto done;
- }
- case UREGS:
- /*
- * Map only the page that contains this context's user
- * registers.
- */
- memaddr = (unsigned long)
- (dd->physaddr + RXE_PER_CONTEXT_USER)
- + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
- /*
- * TidFlow table is on the same page as the rest of the
- * user registers.
- */
- memlen = PAGE_SIZE;
- flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- mapio = 1;
- break;
- case EVENTS:
- /*
- * Use the page where this context's flags are. User level
- * knows where it's own bitmap is within the page.
- */
- memaddr = (unsigned long)(dd->events +
- ((uctxt->ctxt - dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
- memlen = PAGE_SIZE;
- /*
- * v3.7 removes VM_RESERVED but the effect is kept by
- * using VM_IO.
- */
- flags |= VM_IO | VM_DONTEXPAND;
- vmf = 1;
- break;
- case STATUS:
- memaddr = kvirt_to_phys((void *)dd->status);
- memlen = PAGE_SIZE;
- flags |= VM_IO | VM_DONTEXPAND;
- break;
- case RTAIL:
- if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
- /*
- * If the memory allocation failed, the context alloc
- * also would have failed, so we would never get here
- */
- ret = -EINVAL;
- goto done;
- }
- if (flags & VM_WRITE) {
- ret = -EPERM;
- goto done;
- }
- memaddr = uctxt->rcvhdrqtailaddr_phys;
- memlen = PAGE_SIZE;
- flags &= ~VM_MAYWRITE;
- break;
- case SUBCTXT_UREGS:
- memaddr = (u64)uctxt->subctxt_uregbase;
- memlen = PAGE_SIZE;
- flags |= VM_IO | VM_DONTEXPAND;
- vmf = 1;
- break;
- case SUBCTXT_RCV_HDRQ:
- memaddr = (u64)uctxt->subctxt_rcvhdr_base;
- memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
- flags |= VM_IO | VM_DONTEXPAND;
- vmf = 1;
- break;
- case SUBCTXT_EGRBUF:
- memaddr = (u64)uctxt->subctxt_rcvegrbuf;
- memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
- flags |= VM_IO | VM_DONTEXPAND;
- flags &= ~VM_MAYWRITE;
- vmf = 1;
- break;
- case SDMA_COMP: {
- struct hfi1_user_sdma_comp_q *cq = fd->cq;
-
- if (!cq) {
- ret = -EFAULT;
- goto done;
- }
- memaddr = (u64)cq->comps;
- memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
- flags |= VM_IO | VM_DONTEXPAND;
- vmf = 1;
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
-
- if ((vma->vm_end - vma->vm_start) != memlen) {
- hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
- uctxt->ctxt, fd->subctxt,
- (vma->vm_end - vma->vm_start), memlen);
- ret = -EINVAL;
- goto done;
- }
-
- vma->vm_flags = flags;
- hfi1_cdbg(PROC,
- "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
- ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
- vma->vm_end - vma->vm_start, vma->vm_flags);
- pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
- if (vmf) {
- vma->vm_pgoff = pfn;
- vma->vm_ops = &vm_ops;
- ret = 0;
- } else if (mapio) {
- ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
- vma->vm_page_prot);
- } else {
- ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
- vma->vm_page_prot);
- }
-done:
- return ret;
-}
-
-/*
- * Local (non-chip) user memory is not mapped right away but as it is
- * accessed by the user-level code.
- */
-static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *page;
-
- page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
- if (!page)
- return VM_FAULT_SIGBUS;
-
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
-{
- struct hfi1_ctxtdata *uctxt;
- unsigned pollflag;
-
- uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
- if (!uctxt)
- pollflag = POLLERR;
- else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
- pollflag = poll_urgent(fp, pt);
- else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
- pollflag = poll_next(fp, pt);
- else /* invalid */
- pollflag = POLLERR;
-
- return pollflag;
-}
-
-static int hfi1_file_close(struct inode *inode, struct file *fp)
-{
- struct hfi1_filedata *fdata = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fdata->uctxt;
- struct hfi1_devdata *dd;
- unsigned long flags, *ev;
-
- fp->private_data = NULL;
-
- if (!uctxt)
- goto done;
-
- hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
- dd = uctxt->dd;
- mutex_lock(&hfi1_mutex);
-
- flush_wc();
- /* drain user sdma queue */
- hfi1_user_sdma_free_queues(fdata);
-
- /* release the cpu */
- hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
-
- /*
- * Clear any left over, unhandled events so the next process that
- * gets this context doesn't get confused.
- */
- ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
- *ev = 0;
-
- if (--uctxt->cnt) {
- uctxt->active_slaves &= ~(1 << fdata->subctxt);
- uctxt->subpid[fdata->subctxt] = 0;
- mutex_unlock(&hfi1_mutex);
- goto done;
- }
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- /*
- * Disable receive context and interrupt available, reset all
- * RcvCtxtCtrl bits to default values.
- */
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_TIDFLOW_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_TAILUPD_DIS |
- HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
- HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
- /* Clear the context's J_KEY */
- hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
- /*
- * Reset context integrity checks to default.
- * (writes to CSRs probably belong in chip.c)
- */
- write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
- hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
- sc_disable(uctxt->sc);
- uctxt->pid = 0;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- dd->rcd[uctxt->ctxt] = NULL;
-
- hfi1_user_exp_rcv_free(fdata);
- hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
-
- uctxt->rcvwait_to = 0;
- uctxt->piowait_to = 0;
- uctxt->rcvnowait = 0;
- uctxt->pionowait = 0;
- uctxt->event_flags = 0;
-
- hfi1_stats.sps_ctxts--;
- if (++dd->freectxts == dd->num_user_contexts)
- aspm_enable_all(dd);
- mutex_unlock(&hfi1_mutex);
- hfi1_free_ctxtdata(dd, uctxt);
-done:
- kfree(fdata);
- return 0;
-}
-
-/*
- * Convert kernel *virtual* addresses to physical addresses.
- * This is used to vmalloc'ed addresses.
- */
-static u64 kvirt_to_phys(void *addr)
-{
- struct page *page;
- u64 paddr = 0;
-
- page = vmalloc_to_page(addr);
- if (page)
- paddr = page_to_pfn(page) << PAGE_SHIFT;
-
- return paddr;
-}
-
-static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
-{
- int i_minor, ret = 0;
- unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
-
- swmajor = uinfo->userversion >> 16;
- if (swmajor != HFI1_USER_SWMAJOR) {
- ret = -ENODEV;
- goto done;
- }
-
- swminor = uinfo->userversion & 0xffff;
-
- if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
- alg = uinfo->hfi1_alg;
-
- mutex_lock(&hfi1_mutex);
- /* First, lets check if we need to setup a shared context? */
- if (uinfo->subctxt_cnt) {
- struct hfi1_filedata *fd = fp->private_data;
-
- ret = find_shared_ctxt(fp, uinfo);
- if (ret < 0)
- goto done_unlock;
- if (ret)
- fd->rec_cpu_num = hfi1_get_proc_affinity(
- fd->uctxt->dd, fd->uctxt->numa_id);
- }
-
- /*
- * We execute the following block if we couldn't find a
- * shared context or if context sharing is not required.
- */
- if (!ret) {
- i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
- ret = get_user_context(fp, uinfo, i_minor - 1, alg);
- }
-done_unlock:
- mutex_unlock(&hfi1_mutex);
-done:
- return ret;
-}
-
-/* return true if the device available for general use */
-static int usable_device(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd = dd->pport;
-
- return driver_lstate(ppd) == IB_PORT_ACTIVE;
-}
-
-static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
- int devno, unsigned alg)
-{
- struct hfi1_devdata *dd = NULL;
- int ret = 0, devmax, npresent, nup, dev;
-
- devmax = hfi1_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (!nup) {
- ret = -ENETDOWN;
- goto done;
- }
- if (devno >= 0) {
- dd = hfi1_lookup(devno);
- if (!dd)
- ret = -ENODEV;
- else if (!dd->freectxts)
- ret = -EBUSY;
- } else {
- struct hfi1_devdata *pdd;
-
- if (alg == HFI1_ALG_ACROSS) {
- unsigned free = 0U;
-
- for (dev = 0; dev < devmax; dev++) {
- pdd = hfi1_lookup(dev);
- if (!pdd)
- continue;
- if (!usable_device(pdd))
- continue;
- if (pdd->freectxts &&
- pdd->freectxts > free) {
- dd = pdd;
- free = pdd->freectxts;
- }
- }
- } else {
- for (dev = 0; dev < devmax; dev++) {
- pdd = hfi1_lookup(dev);
- if (!pdd)
- continue;
- if (!usable_device(pdd))
- continue;
- if (pdd->freectxts) {
- dd = pdd;
- break;
- }
- }
- }
- if (!dd)
- ret = -EBUSY;
- }
-done:
- return ret ? ret : allocate_ctxt(fp, dd, uinfo);
-}
-
-static int find_shared_ctxt(struct file *fp,
- const struct hfi1_user_info *uinfo)
-{
- int devmax, ndev, i;
- int ret = 0;
- struct hfi1_filedata *fd = fp->private_data;
-
- devmax = hfi1_count_units(NULL, NULL);
-
- for (ndev = 0; ndev < devmax; ndev++) {
- struct hfi1_devdata *dd = hfi1_lookup(ndev);
-
- if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
- continue;
- for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
- struct hfi1_ctxtdata *uctxt = dd->rcd[i];
-
- /* Skip ctxts which are not yet open */
- if (!uctxt || !uctxt->cnt)
- continue;
- /* Skip ctxt if it doesn't match the requested one */
- if (memcmp(uctxt->uuid, uinfo->uuid,
- sizeof(uctxt->uuid)) ||
- uctxt->jkey != generate_jkey(current_uid()) ||
- uctxt->subctxt_id != uinfo->subctxt_id ||
- uctxt->subctxt_cnt != uinfo->subctxt_cnt)
- continue;
-
- /* Verify the sharing process matches the master */
- if (uctxt->userversion != uinfo->userversion ||
- uctxt->cnt >= uctxt->subctxt_cnt) {
- ret = -EINVAL;
- goto done;
- }
- fd->uctxt = uctxt;
- fd->subctxt = uctxt->cnt++;
- uctxt->subpid[fd->subctxt] = current->pid;
- uctxt->active_slaves |= 1 << fd->subctxt;
- ret = 1;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
- struct hfi1_user_info *uinfo)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt;
- unsigned ctxt;
- int ret, numa;
-
- if (dd->flags & HFI1_FROZEN) {
- /*
- * Pick an error that is unique from all other errors
- * that are returned so the user process knows that
- * it tried to allocate while the SPC was frozen. It
- * it should be able to retry with success in a short
- * while.
- */
- return -EIO;
- }
-
- for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
- if (!dd->rcd[ctxt])
- break;
-
- if (ctxt == dd->num_rcv_contexts)
- return -EBUSY;
-
- fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
- if (fd->rec_cpu_num != -1)
- numa = cpu_to_node(fd->rec_cpu_num);
- else
- numa = numa_node_id();
- uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa);
- if (!uctxt) {
- dd_dev_err(dd,
- "Unable to allocate ctxtdata memory, failing open\n");
- return -ENOMEM;
- }
- hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
- uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
- uctxt->numa_id);
-
- /*
- * Allocate and enable a PIO send context.
- */
- uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
- uctxt->dd->node);
- if (!uctxt->sc)
- return -ENOMEM;
-
- hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
- uctxt->sc->hw_context);
- ret = sc_enable(uctxt->sc);
- if (ret)
- return ret;
- /*
- * Setup shared context resources if the user-level has requested
- * shared contexts and this is the 'master' process.
- * This has to be done here so the rest of the sub-contexts find the
- * proper master.
- */
- if (uinfo->subctxt_cnt && !fd->subctxt) {
- ret = init_subctxts(uctxt, uinfo);
- /*
- * On error, we don't need to disable and de-allocate the
- * send context because it will be done during file close
- */
- if (ret)
- return ret;
- }
- uctxt->userversion = uinfo->userversion;
- uctxt->pid = current->pid;
- uctxt->flags = HFI1_CAP_UGET(MASK);
- init_waitqueue_head(&uctxt->wait);
- strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
- memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
- uctxt->jkey = generate_jkey(current_uid());
- INIT_LIST_HEAD(&uctxt->sdma_queues);
- spin_lock_init(&uctxt->sdma_qlock);
- hfi1_stats.sps_ctxts++;
- /*
- * Disable ASPM when there are open user/PSM contexts to avoid
- * issues with ASPM L1 exit latency
- */
- if (dd->freectxts-- == dd->num_user_contexts)
- aspm_disable_all(dd);
- fd->uctxt = uctxt;
-
- return 0;
-}
-
-static int init_subctxts(struct hfi1_ctxtdata *uctxt,
- const struct hfi1_user_info *uinfo)
-{
- unsigned num_subctxts;
-
- num_subctxts = uinfo->subctxt_cnt;
- if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
- return -EINVAL;
-
- uctxt->subctxt_cnt = uinfo->subctxt_cnt;
- uctxt->subctxt_id = uinfo->subctxt_id;
- uctxt->active_slaves = 1;
- uctxt->redirect_seq_cnt = 1;
- set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
-
- return 0;
-}
-
-static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
-{
- int ret = 0;
- unsigned num_subctxts = uctxt->subctxt_cnt;
-
- uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
- if (!uctxt->subctxt_uregbase) {
- ret = -ENOMEM;
- goto bail;
- }
- /* We can take the size of the RcvHdr Queue from the master */
- uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
- num_subctxts);
- if (!uctxt->subctxt_rcvhdr_base) {
- ret = -ENOMEM;
- goto bail_ureg;
- }
-
- uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
- num_subctxts);
- if (!uctxt->subctxt_rcvegrbuf) {
- ret = -ENOMEM;
- goto bail_rhdr;
- }
- goto bail;
-bail_rhdr:
- vfree(uctxt->subctxt_rcvhdr_base);
-bail_ureg:
- vfree(uctxt->subctxt_uregbase);
- uctxt->subctxt_uregbase = NULL;
-bail:
- return ret;
-}
-
-static int user_init(struct file *fp)
-{
- unsigned int rcvctrl_ops = 0;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
-
- /* make sure that the context has already been setup */
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
- return -EFAULT;
-
- /* initialize poll variables... */
- uctxt->urgent = 0;
- uctxt->urgent_poll = 0;
-
- /*
- * Now enable the ctxt for receive.
- * For chips that are set to DMA the tail register to memory
- * when they change (and when the update bit transitions from
- * 0 to 1. So for those chips, we turn it off and then back on.
- * This will (very briefly) affect any other open ctxts, but the
- * duration is very short, and therefore isn't an issue. We
- * explicitly set the in-memory tail copy to 0 beforehand, so we
- * don't have to wait to be sure the DMA update has happened
- * (chip resets head/tail to 0 on transition to enable).
- */
- if (uctxt->rcvhdrtail_kvaddr)
- clear_rcvhdrtail(uctxt);
-
- /* Setup J_KEY before enabling the context */
- hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
-
- rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
- rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
- /*
- * Ignore the bit in the flags for now until proper
- * support for multiple packet per rcv array entry is
- * added.
- */
- if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
- rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
- /*
- * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
- * We can't rely on the correct value to be set from prior
- * uses of the chip or ctxt. Therefore, add the rcvctrl op
- * for both cases.
- */
- if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
- rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
- else
- rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
- hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
-
- /* Notify any waiting slaves */
- if (uctxt->subctxt_cnt) {
- clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
- wake_up(&uctxt->wait);
- }
-
- return 0;
-}
-
-static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
-{
- struct hfi1_ctxt_info cinfo;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- int ret = 0;
-
- memset(&cinfo, 0, sizeof(cinfo));
- ret = hfi1_get_base_kinfo(uctxt, &cinfo);
- if (ret < 0)
- goto done;
- cinfo.num_active = hfi1_count_active_units();
- cinfo.unit = uctxt->dd->unit;
- cinfo.ctxt = uctxt->ctxt;
- cinfo.subctxt = fd->subctxt;
- cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
- uctxt->dd->rcv_entries.group_size) +
- uctxt->expected_count;
- cinfo.credits = uctxt->sc->credits;
- cinfo.numa_node = uctxt->numa_id;
- cinfo.rec_cpu = fd->rec_cpu_num;
- cinfo.send_ctxt = uctxt->sc->hw_context;
-
- cinfo.egrtids = uctxt->egrbufs.alloced;
- cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
- cinfo.sdma_ring_size = fd->cq->nentries;
- cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
-
- trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
- if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
- ret = -EFAULT;
-done:
- return ret;
-}
-
-static int setup_ctxt(struct file *fp)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- int ret = 0;
-
- /*
- * Context should be set up only once, including allocation and
- * programming of eager buffers. This is done if context sharing
- * is not requested or by the master process.
- */
- if (!uctxt->subctxt_cnt || !fd->subctxt) {
- ret = hfi1_init_ctxt(uctxt->sc);
- if (ret)
- goto done;
-
- /* Now allocate the RcvHdr queue and eager buffers. */
- ret = hfi1_create_rcvhdrq(dd, uctxt);
- if (ret)
- goto done;
- ret = hfi1_setup_eagerbufs(uctxt);
- if (ret)
- goto done;
- if (uctxt->subctxt_cnt && !fd->subctxt) {
- ret = setup_subctxt(uctxt);
- if (ret)
- goto done;
- }
- } else {
- ret = wait_event_interruptible(uctxt->wait, !test_bit(
- HFI1_CTXT_MASTER_UNINIT,
- &uctxt->event_flags));
- if (ret)
- goto done;
- }
-
- ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
- if (ret)
- goto done;
- /*
- * Expected receive has to be setup for all processes (including
- * shared contexts). However, it has to be done after the master
- * context has been fully configured as it depends on the
- * eager/expected split of the RcvArray entries.
- * Setting it up here ensures that the subcontexts will be waiting
- * (due to the above wait_event_interruptible() until the master
- * is setup.
- */
- ret = hfi1_user_exp_rcv_init(fp);
- if (ret)
- goto done;
-
- set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
-done:
- return ret;
-}
-
-static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
-{
- struct hfi1_base_info binfo;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- ssize_t sz;
- unsigned offset;
- int ret = 0;
-
- trace_hfi1_uctxtdata(uctxt->dd, uctxt);
-
- memset(&binfo, 0, sizeof(binfo));
- binfo.hw_version = dd->revision;
- binfo.sw_version = HFI1_KERN_SWVERSION;
- binfo.bthqp = kdeth_qp;
- binfo.jkey = uctxt->jkey;
- /*
- * If more than 64 contexts are enabled the allocated credit
- * return will span two or three contiguous pages. Since we only
- * map the page containing the context's credit return address,
- * we need to calculate the offset in the proper page.
- */
- offset = ((u64)uctxt->sc->hw_free -
- (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
- binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
- fd->subctxt, offset);
- binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
- fd->subctxt,
- uctxt->sc->base_addr);
- binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
- uctxt->ctxt,
- fd->subctxt,
- uctxt->sc->base_addr);
- binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
- fd->subctxt,
- uctxt->rcvhdrq);
- binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
- fd->subctxt,
- uctxt->egrbufs.rcvtids[0].phys);
- binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
- fd->subctxt, 0);
- /*
- * user regs are at
- * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
- */
- binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
- fd->subctxt, 0);
- offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
- sizeof(*dd->events));
- binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
- fd->subctxt,
- offset);
- binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
- fd->subctxt,
- dd->status);
- if (HFI1_CAP_IS_USET(DMA_RTAIL))
- binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
- fd->subctxt, 0);
- if (uctxt->subctxt_cnt) {
- binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
- uctxt->ctxt,
- fd->subctxt, 0);
- binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
- uctxt->ctxt,
- fd->subctxt, 0);
- binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
- uctxt->ctxt,
- fd->subctxt, 0);
- }
- sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
- if (copy_to_user(ubase, &binfo, sz))
- ret = -EFAULT;
- return ret;
-}
-
-static unsigned int poll_urgent(struct file *fp,
- struct poll_table_struct *pt)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
-
- poll_wait(fp, &uctxt->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (uctxt->urgent != uctxt->urgent_poll) {
- pollflag = POLLIN | POLLRDNORM;
- uctxt->urgent_poll = uctxt->urgent;
- } else {
- pollflag = 0;
- set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
- }
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-static unsigned int poll_next(struct file *fp,
- struct poll_table_struct *pt)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
-
- poll_wait(fp, &uctxt->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (hdrqempty(uctxt)) {
- set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
- pollflag = 0;
- } else {
- pollflag = POLLIN | POLLRDNORM;
- }
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-/*
- * Find all user contexts in use, and set the specified bit in their
- * event mask.
- * See also find_ctxt() for a similar use, that is specific to send buffers.
- */
-int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
-{
- struct hfi1_ctxtdata *uctxt;
- struct hfi1_devdata *dd = ppd->dd;
- unsigned ctxt;
- int ret = 0;
- unsigned long flags;
-
- if (!dd->events) {
- ret = -EINVAL;
- goto done;
- }
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
- ctxt++) {
- uctxt = dd->rcd[ctxt];
- if (uctxt) {
- unsigned long *evs = dd->events +
- (uctxt->ctxt - dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS;
- int i;
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(evtbit, evs);
- for (i = 1; i < uctxt->subctxt_cnt; i++)
- set_bit(evtbit, evs + i);
- }
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-done:
- return ret;
-}
-
-/**
- * manage_rcvq - manage a context's receive queue
- * @uctxt: the context
- * @subctxt: the sub-context
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the context, for use in queue
- * overflow conditions. start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
- int start_stop)
-{
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned int rcvctrl_op;
-
- if (subctxt)
- goto bail;
- /* atomically clear receive enable ctxt. */
- if (start_stop) {
- /*
- * On enable, force in-memory copy of the tail register to
- * 0, so that protocol code doesn't have to worry about
- * whether or not the chip has yet updated the in-memory
- * copy or not on return from the system call. The chip
- * always resets it's tail register back to 0 on a
- * transition from disabled to enabled.
- */
- if (uctxt->rcvhdrtail_kvaddr)
- clear_rcvhdrtail(uctxt);
- rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
- } else {
- rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
- }
- hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
- /* always; new head should be equal to new tail; see above */
-bail:
- return 0;
-}
-
-/*
- * clear the event notifier events for this context.
- * User process then performs actions appropriate to bit having been
- * set, if desired, and checks again in future.
- */
-static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
- unsigned long events)
-{
- int i;
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned long *evs;
-
- if (!dd->events)
- return 0;
-
- evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + subctxt;
-
- for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
- if (!test_bit(i, &events))
- continue;
- clear_bit(i, evs);
- }
- return 0;
-}
-
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
- u16 pkey)
-{
- int ret = -ENOENT, i, intable = 0;
- struct hfi1_pportdata *ppd = uctxt->ppd;
- struct hfi1_devdata *dd = uctxt->dd;
-
- if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
- ret = -EINVAL;
- goto done;
- }
-
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
- if (pkey == ppd->pkeys[i]) {
- intable = 1;
- break;
- }
-
- if (intable)
- ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
-done:
- return ret;
-}
-
-static int ui_open(struct inode *inode, struct file *filp)
-{
- struct hfi1_devdata *dd;
-
- dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
- filp->private_data = dd; /* for other methods */
- return 0;
-}
-
-static int ui_release(struct inode *inode, struct file *filp)
-{
- /* nothing to do */
- return 0;
-}
-
-static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
-{
- struct hfi1_devdata *dd = filp->private_data;
-
- return fixed_size_llseek(filp, offset, whence,
- (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
- loff_t *f_pos)
-{
- struct hfi1_devdata *dd = filp->private_data;
- void __iomem *base = dd->kregbase;
- unsigned long total, csr_off,
- barlen = (dd->kregend - dd->kregbase);
- u64 data;
-
- /* only read 8 byte quantities */
- if ((count % 8) != 0)
- return -EINVAL;
- /* offset must be 8-byte aligned */
- if ((*f_pos % 8) != 0)
- return -EINVAL;
- /* destination buffer must be 8-byte aligned */
- if ((unsigned long)buf % 8 != 0)
- return -EINVAL;
- /* must be in range */
- if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
- return -EINVAL;
- /* only set the base if we are not starting past the BAR */
- if (*f_pos < barlen)
- base += *f_pos;
- csr_off = *f_pos;
- for (total = 0; total < count; total += 8, csr_off += 8) {
- /* accessing LCB CSRs requires more checks */
- if (is_lcb_offset(csr_off)) {
- if (read_lcb_csr(dd, csr_off, (u64 *)&data))
- break; /* failed */
- }
- /*
- * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
- * false parity error. Avoid the whole issue by not reading
- * them. These registers are defined as having a read value
- * of 0.
- */
- else if (csr_off == ASIC_GPIO_CLEAR ||
- csr_off == ASIC_GPIO_FORCE ||
- csr_off == ASIC_QSFP1_CLEAR ||
- csr_off == ASIC_QSFP1_FORCE ||
- csr_off == ASIC_QSFP2_CLEAR ||
- csr_off == ASIC_QSFP2_FORCE)
- data = 0;
- else if (csr_off >= barlen) {
- /*
- * read_8051_data can read more than just 8 bytes at
- * a time. However, folding this into the loop and
- * handling the reads in 8 byte increments allows us
- * to smoothly transition from chip memory to 8051
- * memory.
- */
- if (read_8051_data(dd,
- (u32)(csr_off - barlen),
- sizeof(data), &data))
- break; /* failed */
- } else
- data = readq(base + total);
- if (put_user(data, (unsigned long __user *)(buf + total)))
- break;
- }
- *f_pos += total;
- return total;
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos)
-{
- struct hfi1_devdata *dd = filp->private_data;
- void __iomem *base;
- unsigned long total, data, csr_off;
- int in_lcb;
-
- /* only write 8 byte quantities */
- if ((count % 8) != 0)
- return -EINVAL;
- /* offset must be 8-byte aligned */
- if ((*f_pos % 8) != 0)
- return -EINVAL;
- /* source buffer must be 8-byte aligned */
- if ((unsigned long)buf % 8 != 0)
- return -EINVAL;
- /* must be in range */
- if (*f_pos + count > dd->kregend - dd->kregbase)
- return -EINVAL;
-
- base = (void __iomem *)dd->kregbase + *f_pos;
- csr_off = *f_pos;
- in_lcb = 0;
- for (total = 0; total < count; total += 8, csr_off += 8) {
- if (get_user(data, (unsigned long __user *)(buf + total)))
- break;
- /* accessing LCB CSRs requires a special procedure */
- if (is_lcb_offset(csr_off)) {
- if (!in_lcb) {
- int ret = acquire_lcb_access(dd, 1);
-
- if (ret)
- break;
- in_lcb = 1;
- }
- } else {
- if (in_lcb) {
- release_lcb_access(dd, 1);
- in_lcb = 0;
- }
- }
- writeq(data, base + total);
- }
- if (in_lcb)
- release_lcb_access(dd, 1);
- *f_pos += total;
- return total;
-}
-
-static const struct file_operations ui_file_ops = {
- .owner = THIS_MODULE,
- .llseek = ui_lseek,
- .read = ui_read,
- .write = ui_write,
- .open = ui_open,
- .release = ui_release,
-};
-
-#define UI_OFFSET 192 /* device minor offset for UI devices */
-static int create_ui = 1;
-
-static struct cdev wildcard_cdev;
-static struct device *wildcard_device;
-
-static atomic_t user_count = ATOMIC_INIT(0);
-
-static void user_remove(struct hfi1_devdata *dd)
-{
- if (atomic_dec_return(&user_count) == 0)
- hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
-
- hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
- hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
-}
-
-static int user_add(struct hfi1_devdata *dd)
-{
- char name[10];
- int ret;
-
- if (atomic_inc_return(&user_count) == 1) {
- ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
- &wildcard_cdev, &wildcard_device,
- true);
- if (ret)
- goto done;
- }
-
- snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
- ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
- &dd->user_cdev, &dd->user_device,
- true);
- if (ret)
- goto done;
-
- if (create_ui) {
- snprintf(name, sizeof(name),
- "%s_ui%d", class_name(), dd->unit);
- ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
- &dd->ui_cdev, &dd->ui_device,
- false);
- if (ret)
- goto done;
- }
-
- return 0;
-done:
- user_remove(dd);
- return ret;
-}
-
-/*
- * Create per-unit files in /dev
- */
-int hfi1_device_create(struct hfi1_devdata *dd)
-{
- int r, ret;
-
- r = user_add(dd);
- ret = hfi1_diag_add(dd);
- if (r && !ret)
- ret = r;
- return ret;
-}
-
-/*
- * Remove per-unit files in /dev
- * void, core kernel returns no errors for this stuff
- */
-void hfi1_device_remove(struct hfi1_devdata *dd)
-{
- user_remove(dd);
- hfi1_diag_remove(dd);
-}
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c
deleted file mode 100644
index f51570e8f..000000000
--- a/drivers/staging/rdma/hfi1/firmware.c
+++ /dev/null
@@ -1,2049 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/firmware.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/crc32.h>
-
-#include "hfi.h"
-#include "trace.h"
-
-/*
- * Make it easy to toggle firmware file name and if it gets loaded by
- * editing the following. This may be something we do while in development
- * but not necessarily something a user would ever need to use.
- */
-#define DEFAULT_FW_8051_NAME_FPGA "/*(DEBLOBBED)*/"
-#define DEFAULT_FW_8051_NAME_ASIC "/*(DEBLOBBED)*/"
-#define DEFAULT_FW_FABRIC_NAME "/*(DEBLOBBED)*/"
-#define DEFAULT_FW_SBUS_NAME "/*(DEBLOBBED)*/"
-#define DEFAULT_FW_PCIE_NAME "/*(DEBLOBBED)*/"
-#define DEFAULT_PLATFORM_CONFIG_NAME "/*(DEBLOBBED)*/"
-#define ALT_FW_8051_NAME_ASIC "/*(DEBLOBBED)*/"
-#define ALT_FW_FABRIC_NAME "/*(DEBLOBBED)*/"
-#define ALT_FW_SBUS_NAME "/*(DEBLOBBED)*/"
-#define ALT_FW_PCIE_NAME "/*(DEBLOBBED)*/"
-
-static uint fw_8051_load = 1;
-static uint fw_fabric_serdes_load = 1;
-static uint fw_pcie_serdes_load = 1;
-static uint fw_sbus_load = 1;
-
-/*
- * Access required in platform.c
- * Maintains state of whether the platform config was fetched via the
- * fallback option
- */
-uint platform_config_load;
-
-/* Firmware file names get set in hfi1_firmware_init() based on the above */
-static char *fw_8051_name;
-static char *fw_fabric_serdes_name;
-static char *fw_sbus_name;
-static char *fw_pcie_serdes_name;
-static char *platform_config_name;
-
-#define SBUS_MAX_POLL_COUNT 100
-#define SBUS_COUNTER(reg, name) \
- (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \
- ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK)
-
-/*
- * Firmware security header.
- */
-struct css_header {
- u32 module_type;
- u32 header_len;
- u32 header_version;
- u32 module_id;
- u32 module_vendor;
- u32 date; /* BCD yyyymmdd */
- u32 size; /* in DWORDs */
- u32 key_size; /* in DWORDs */
- u32 modulus_size; /* in DWORDs */
- u32 exponent_size; /* in DWORDs */
- u32 reserved[22];
-};
-
-/* expected field values */
-#define CSS_MODULE_TYPE 0x00000006
-#define CSS_HEADER_LEN 0x000000a1
-#define CSS_HEADER_VERSION 0x00010000
-#define CSS_MODULE_VENDOR 0x00008086
-
-#define KEY_SIZE 256
-#define MU_SIZE 8
-#define EXPONENT_SIZE 4
-
-/* the file itself */
-struct firmware_file {
- struct css_header css_header;
- u8 modulus[KEY_SIZE];
- u8 exponent[EXPONENT_SIZE];
- u8 signature[KEY_SIZE];
- u8 firmware[];
-};
-
-struct augmented_firmware_file {
- struct css_header css_header;
- u8 modulus[KEY_SIZE];
- u8 exponent[EXPONENT_SIZE];
- u8 signature[KEY_SIZE];
- u8 r2[KEY_SIZE];
- u8 mu[MU_SIZE];
- u8 firmware[];
-};
-
-/* augmented file size difference */
-#define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \
- sizeof(struct firmware_file))
-
-struct firmware_details {
- /* Linux core piece */
- const struct firmware *fw;
-
- struct css_header *css_header;
- u8 *firmware_ptr; /* pointer to binary data */
- u32 firmware_len; /* length in bytes */
- u8 *modulus; /* pointer to the modulus */
- u8 *exponent; /* pointer to the exponent */
- u8 *signature; /* pointer to the signature */
- u8 *r2; /* pointer to r2 */
- u8 *mu; /* pointer to mu */
- struct augmented_firmware_file dummy_header;
-};
-
-/*
- * The mutex protects fw_state, fw_err, and all of the firmware_details
- * variables.
- */
-static DEFINE_MUTEX(fw_mutex);
-enum fw_state {
- FW_EMPTY,
- FW_TRY,
- FW_FINAL,
- FW_ERR
-};
-
-static enum fw_state fw_state = FW_EMPTY;
-static int fw_err;
-static struct firmware_details fw_8051;
-static struct firmware_details fw_fabric;
-static struct firmware_details fw_pcie;
-static struct firmware_details fw_sbus;
-static const struct firmware *platform_config;
-
-/* flags for turn_off_spicos() */
-#define SPICO_SBUS 0x1
-#define SPICO_FABRIC 0x2
-#define ENABLE_SPICO_SMASK 0x1
-
-/* security block commands */
-#define RSA_CMD_INIT 0x1
-#define RSA_CMD_START 0x2
-
-/* security block status */
-#define RSA_STATUS_IDLE 0x0
-#define RSA_STATUS_ACTIVE 0x1
-#define RSA_STATUS_DONE 0x2
-#define RSA_STATUS_FAILED 0x3
-
-/* RSA engine timeout, in ms */
-#define RSA_ENGINE_TIMEOUT 100 /* ms */
-
-/* hardware mutex timeout, in ms */
-#define HM_TIMEOUT 10 /* ms */
-
-/* 8051 memory access timeout, in us */
-#define DC8051_ACCESS_TIMEOUT 100 /* us */
-
-/* the number of fabric SerDes on the SBus */
-#define NUM_FABRIC_SERDES 4
-
-/* SBus fabric SerDes addresses, one set per HFI */
-static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
- { 0x01, 0x02, 0x03, 0x04 },
- { 0x28, 0x29, 0x2a, 0x2b }
-};
-
-/* SBus PCIe SerDes addresses, one set per HFI */
-static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = {
- { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16,
- 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 },
- { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d,
- 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d }
-};
-
-/* SBus PCIe PCS addresses, one set per HFI */
-const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = {
- { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17,
- 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 },
- { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
- 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e }
-};
-
-/* SBus fabric SerDes broadcast addresses, one per HFI */
-static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 };
-static const u8 all_fabric_serdes_broadcast = 0xe1;
-
-/* SBus PCIe SerDes broadcast addresses, one per HFI */
-const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
-static const u8 all_pcie_serdes_broadcast = 0xe0;
-
-/* forwards */
-static void dispose_one_firmware(struct firmware_details *fdet);
-static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
- struct firmware_details *fdet);
-
-/*
- * Read a single 64-bit value from 8051 data memory.
- *
- * Expects:
- * o caller to have already set up data read, no auto increment
- * o caller to turn off read enable when finished
- *
- * The address argument is a byte offset. Bits 0:2 in the address are
- * ignored - i.e. the hardware will always do aligned 8-byte reads as if
- * the lower bits are zero.
- *
- * Return 0 on success, -ENXIO on a read error (timeout).
- */
-static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
-{
- u64 reg;
- int count;
-
- /* start the read at the given address */
- reg = ((addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
- << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
- | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK;
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
-
- /* wait until ACCESS_COMPLETED is set */
- count = 0;
- while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
- & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
- == 0) {
- count++;
- if (count > DC8051_ACCESS_TIMEOUT) {
- dd_dev_err(dd, "timeout reading 8051 data\n");
- return -ENXIO;
- }
- ndelay(10);
- }
-
- /* gather the data */
- *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
-
- return 0;
-}
-
-/*
- * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks.
- * Return 0 on success, -errno on error.
- */
-int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
-{
- unsigned long flags;
- u32 done;
- int ret = 0;
-
- spin_lock_irqsave(&dd->dc8051_memlock, flags);
-
- /* data read set-up, no auto-increment */
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
-
- for (done = 0; done < len; addr += 8, done += 8, result++) {
- ret = __read_8051_data(dd, addr, result);
- if (ret)
- break;
- }
-
- /* turn off read enable */
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
-
- spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
-
- return ret;
-}
-
-/*
- * Write data or code to the 8051 code or data RAM.
- */
-static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
- const u8 *data, u32 len)
-{
- u64 reg;
- u32 offset;
- int aligned, count;
-
- /* check alignment */
- aligned = ((unsigned long)data & 0x7) == 0;
-
- /* write set-up */
- reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull)
- | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK;
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
-
- reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
- << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
- | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK;
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
-
- /* write */
- for (offset = 0; offset < len; offset += 8) {
- int bytes = len - offset;
-
- if (bytes < 8) {
- reg = 0;
- memcpy(&reg, &data[offset], bytes);
- } else if (aligned) {
- reg = *(u64 *)&data[offset];
- } else {
- memcpy(&reg, &data[offset], 8);
- }
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
-
- /* wait until ACCESS_COMPLETED is set */
- count = 0;
- while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
- & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
- == 0) {
- count++;
- if (count > DC8051_ACCESS_TIMEOUT) {
- dd_dev_err(dd, "timeout writing 8051 data\n");
- return -ENXIO;
- }
- udelay(1);
- }
- }
-
- /* turn off write access, auto increment (also sets to data access) */
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
- write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
-
- return 0;
-}
-
-/* return 0 if values match, non-zero and complain otherwise */
-static int invalid_header(struct hfi1_devdata *dd, const char *what,
- u32 actual, u32 expected)
-{
- if (actual == expected)
- return 0;
-
- dd_dev_err(dd,
- "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
- what, expected, actual);
- return 1;
-}
-
-/*
- * Verify that the static fields in the CSS header match.
- */
-static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
-{
- /* verify CSS header fields (most sizes are in DW, so add /4) */
- if (invalid_header(dd, "module_type", css->module_type,
- CSS_MODULE_TYPE) ||
- invalid_header(dd, "header_len", css->header_len,
- (sizeof(struct firmware_file) / 4)) ||
- invalid_header(dd, "header_version", css->header_version,
- CSS_HEADER_VERSION) ||
- invalid_header(dd, "module_vendor", css->module_vendor,
- CSS_MODULE_VENDOR) ||
- invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
- invalid_header(dd, "modulus_size", css->modulus_size,
- KEY_SIZE / 4) ||
- invalid_header(dd, "exponent_size", css->exponent_size,
- EXPONENT_SIZE / 4)) {
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Make sure there are at least some bytes after the prefix.
- */
-static int payload_check(struct hfi1_devdata *dd, const char *name,
- long file_size, long prefix_size)
-{
- /* make sure we have some payload */
- if (prefix_size >= file_size) {
- dd_dev_err(dd,
- "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
- name, file_size, prefix_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Request the firmware from the system. Extract the pieces and fill in
- * fdet. If successful, the caller will need to call dispose_one_firmware().
- * Returns 0 on success, -ERRNO on error.
- */
-static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
- struct firmware_details *fdet)
-{
- struct css_header *css;
- int ret;
-
- memset(fdet, 0, sizeof(*fdet));
-
- ret = reject_firmware(&fdet->fw, name, &dd->pcidev->dev);
- if (ret) {
- dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
- name, ret);
- return ret;
- }
-
- /* verify the firmware */
- if (fdet->fw->size < sizeof(struct css_header)) {
- dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
- ret = -EINVAL;
- goto done;
- }
- css = (struct css_header *)fdet->fw->data;
-
- hfi1_cdbg(FIRMWARE, "Firmware %s details:", name);
- hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size);
- hfi1_cdbg(FIRMWARE, "CSS structure:");
- hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type);
- hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)",
- css->header_len, 4 * css->header_len);
- hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version);
- hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id);
- hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor);
- hfi1_cdbg(FIRMWARE, " date 0x%x", css->date);
- hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)",
- css->size, 4 * css->size);
- hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)",
- css->key_size, 4 * css->key_size);
- hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)",
- css->modulus_size, 4 * css->modulus_size);
- hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)",
- css->exponent_size, 4 * css->exponent_size);
- hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes",
- fdet->fw->size - sizeof(struct firmware_file));
-
- /*
- * If the file does not have a valid CSS header, fail.
- * Otherwise, check the CSS size field for an expected size.
- * The augmented file has r2 and mu inserted after the header
- * was generated, so there will be a known difference between
- * the CSS header size and the actual file size. Use this
- * difference to identify an augmented file.
- *
- * Note: css->size is in DWORDs, multiply by 4 to get bytes.
- */
- ret = verify_css_header(dd, css);
- if (ret) {
- dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
- } else if ((css->size * 4) == fdet->fw->size) {
- /* non-augmented firmware file */
- struct firmware_file *ff = (struct firmware_file *)
- fdet->fw->data;
-
- /* make sure there are bytes in the payload */
- ret = payload_check(dd, name, fdet->fw->size,
- sizeof(struct firmware_file));
- if (ret == 0) {
- fdet->css_header = css;
- fdet->modulus = ff->modulus;
- fdet->exponent = ff->exponent;
- fdet->signature = ff->signature;
- fdet->r2 = fdet->dummy_header.r2; /* use dummy space */
- fdet->mu = fdet->dummy_header.mu; /* use dummy space */
- fdet->firmware_ptr = ff->firmware;
- fdet->firmware_len = fdet->fw->size -
- sizeof(struct firmware_file);
- /*
- * Header does not include r2 and mu - generate here.
- * For now, fail.
- */
- dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
- ret = -EINVAL;
- }
- } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) {
- /* augmented firmware file */
- struct augmented_firmware_file *aff =
- (struct augmented_firmware_file *)fdet->fw->data;
-
- /* make sure there are bytes in the payload */
- ret = payload_check(dd, name, fdet->fw->size,
- sizeof(struct augmented_firmware_file));
- if (ret == 0) {
- fdet->css_header = css;
- fdet->modulus = aff->modulus;
- fdet->exponent = aff->exponent;
- fdet->signature = aff->signature;
- fdet->r2 = aff->r2;
- fdet->mu = aff->mu;
- fdet->firmware_ptr = aff->firmware;
- fdet->firmware_len = fdet->fw->size -
- sizeof(struct augmented_firmware_file);
- }
- } else {
- /* css->size check failed */
- dd_dev_err(dd,
- "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
- fdet->fw->size / 4,
- (fdet->fw->size - AUGMENT_SIZE) / 4,
- css->size);
-
- ret = -EINVAL;
- }
-
-done:
- /* if returning an error, clean up after ourselves */
- if (ret)
- dispose_one_firmware(fdet);
- return ret;
-}
-
-static void dispose_one_firmware(struct firmware_details *fdet)
-{
- release_firmware(fdet->fw);
- /* erase all previous information */
- memset(fdet, 0, sizeof(*fdet));
-}
-
-/*
- * Obtain the 4 firmwares from the OS. All must be obtained at once or not
- * at all. If called with the firmware state in FW_TRY, use alternate names.
- * On exit, this routine will have set the firmware state to one of FW_TRY,
- * FW_FINAL, or FW_ERR.
- *
- * Must be holding fw_mutex.
- */
-static void __obtain_firmware(struct hfi1_devdata *dd)
-{
- int err = 0;
-
- if (fw_state == FW_FINAL) /* nothing more to obtain */
- return;
- if (fw_state == FW_ERR) /* already in error */
- return;
-
- /* fw_state is FW_EMPTY or FW_TRY */
-retry:
- if (fw_state == FW_TRY) {
- /*
- * We tried the original and it failed. Move to the
- * alternate.
- */
- dd_dev_warn(dd, "using alternate firmware names\n");
- /*
- * Let others run. Some systems, when missing firmware, does
- * something that holds for 30 seconds. If we do that twice
- * in a row it triggers task blocked warning.
- */
- cond_resched();
- if (fw_8051_load)
- dispose_one_firmware(&fw_8051);
- if (fw_fabric_serdes_load)
- dispose_one_firmware(&fw_fabric);
- if (fw_sbus_load)
- dispose_one_firmware(&fw_sbus);
- if (fw_pcie_serdes_load)
- dispose_one_firmware(&fw_pcie);
- fw_8051_name = ALT_FW_8051_NAME_ASIC;
- fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
- fw_sbus_name = ALT_FW_SBUS_NAME;
- fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
- }
-
- if (fw_sbus_load) {
- err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
- if (err)
- goto done;
- }
-
- if (fw_pcie_serdes_load) {
- err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
- if (err)
- goto done;
- }
-
- if (fw_fabric_serdes_load) {
- err = obtain_one_firmware(dd, fw_fabric_serdes_name,
- &fw_fabric);
- if (err)
- goto done;
- }
-
- if (fw_8051_load) {
- err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
- if (err)
- goto done;
- }
-
-done:
- if (err) {
- /* oops, had problems obtaining a firmware */
- if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
- /* retry with alternate (RTL only) */
- fw_state = FW_TRY;
- goto retry;
- }
- dd_dev_err(dd, "unable to obtain working firmware\n");
- fw_state = FW_ERR;
- fw_err = -ENOENT;
- } else {
- /* success */
- if (fw_state == FW_EMPTY &&
- dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
- fw_state = FW_TRY; /* may retry later */
- else
- fw_state = FW_FINAL; /* cannot try again */
- }
-}
-
-/*
- * Called by all HFIs when loading their firmware - i.e. device probe time.
- * The first one will do the actual firmware load. Use a mutex to resolve
- * any possible race condition.
- *
- * The call to this routine cannot be moved to driver load because the kernel
- * call reject_firmware() requires a device which is only available after
- * the first device probe.
- */
-static int obtain_firmware(struct hfi1_devdata *dd)
-{
- unsigned long timeout;
- int err = 0;
-
- mutex_lock(&fw_mutex);
-
- /* 40s delay due to long delay on missing firmware on some systems */
- timeout = jiffies + msecs_to_jiffies(40000);
- while (fw_state == FW_TRY) {
- /*
- * Another device is trying the firmware. Wait until it
- * decides what works (or not).
- */
- if (time_after(jiffies, timeout)) {
- /* waited too long */
- dd_dev_err(dd, "Timeout waiting for firmware try");
- fw_state = FW_ERR;
- fw_err = -ETIMEDOUT;
- break;
- }
- mutex_unlock(&fw_mutex);
- msleep(20); /* arbitrary delay */
- mutex_lock(&fw_mutex);
- }
- /* not in FW_TRY state */
-
- if (fw_state == FW_FINAL) {
- if (platform_config) {
- dd->platform_config.data = platform_config->data;
- dd->platform_config.size = platform_config->size;
- }
- goto done; /* already acquired */
- } else if (fw_state == FW_ERR) {
- goto done; /* already tried and failed */
- }
- /* fw_state is FW_EMPTY */
-
- /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
- __obtain_firmware(dd);
-
- if (platform_config_load) {
- platform_config = NULL;
- err = reject_firmware(&platform_config, platform_config_name,
- &dd->pcidev->dev);
- if (err) {
- platform_config = NULL;
- goto done;
- }
- dd->platform_config.data = platform_config->data;
- dd->platform_config.size = platform_config->size;
- }
-
-done:
- mutex_unlock(&fw_mutex);
-
- return fw_err;
-}
-
-/*
- * Called when the driver unloads. The timing is asymmetric with its
- * counterpart, obtain_firmware(). If called at device remove time,
- * then it is conceivable that another device could probe while the
- * firmware is being disposed. The mutexes can be moved to do that
- * safely, but then the firmware would be requested from the OS multiple
- * times.
- *
- * No mutex is needed as the driver is unloading and there cannot be any
- * other callers.
- */
-void dispose_firmware(void)
-{
- dispose_one_firmware(&fw_8051);
- dispose_one_firmware(&fw_fabric);
- dispose_one_firmware(&fw_pcie);
- dispose_one_firmware(&fw_sbus);
-
- release_firmware(platform_config);
- platform_config = NULL;
-
- /* retain the error state, otherwise revert to empty */
- if (fw_state != FW_ERR)
- fw_state = FW_EMPTY;
-}
-
-/*
- * Called with the result of a firmware download.
- *
- * Return 1 to retry loading the firmware, 0 to stop.
- */
-static int retry_firmware(struct hfi1_devdata *dd, int load_result)
-{
- int retry;
-
- mutex_lock(&fw_mutex);
-
- if (load_result == 0) {
- /*
- * The load succeeded, so expect all others to do the same.
- * Do not retry again.
- */
- if (fw_state == FW_TRY)
- fw_state = FW_FINAL;
- retry = 0; /* do NOT retry */
- } else if (fw_state == FW_TRY) {
- /* load failed, obtain alternate firmware */
- __obtain_firmware(dd);
- retry = (fw_state == FW_FINAL);
- } else {
- /* else in FW_FINAL or FW_ERR, no retry in either case */
- retry = 0;
- }
-
- mutex_unlock(&fw_mutex);
- return retry;
-}
-
-/*
- * Write a block of data to a given array CSR. All calls will be in
- * multiples of 8 bytes.
- */
-static void write_rsa_data(struct hfi1_devdata *dd, int what,
- const u8 *data, int nbytes)
-{
- int qw_size = nbytes / 8;
- int i;
-
- if (((unsigned long)data & 0x7) == 0) {
- /* aligned */
- u64 *ptr = (u64 *)data;
-
- for (i = 0; i < qw_size; i++, ptr++)
- write_csr(dd, what + (8 * i), *ptr);
- } else {
- /* not aligned */
- for (i = 0; i < qw_size; i++, data += 8) {
- u64 value;
-
- memcpy(&value, data, 8);
- write_csr(dd, what + (8 * i), value);
- }
- }
-}
-
-/*
- * Write a block of data to a given CSR as a stream of writes. All calls will
- * be in multiples of 8 bytes.
- */
-static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
- const u8 *data, int nbytes)
-{
- u64 *ptr = (u64 *)data;
- int qw_size = nbytes / 8;
-
- for (; qw_size > 0; qw_size--, ptr++)
- write_csr(dd, what, *ptr);
-}
-
-/*
- * Download the signature and start the RSA mechanism. Wait for
- * RSA_ENGINE_TIMEOUT before giving up.
- */
-static int run_rsa(struct hfi1_devdata *dd, const char *who,
- const u8 *signature)
-{
- unsigned long timeout;
- u64 reg;
- u32 status;
- int ret = 0;
-
- /* write the signature */
- write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
-
- /* initialize RSA */
- write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
-
- /*
- * Make sure the engine is idle and insert a delay between the two
- * writes to MISC_CFG_RSA_CMD.
- */
- status = (read_csr(dd, MISC_CFG_FW_CTRL)
- & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
- >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
- if (status != RSA_STATUS_IDLE) {
- dd_dev_err(dd, "%s security engine not idle - giving up\n",
- who);
- return -EBUSY;
- }
-
- /* start RSA */
- write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
-
- /*
- * Look for the result.
- *
- * The RSA engine is hooked up to two MISC errors. The driver
- * masks these errors as they do not respond to the standard
- * error "clear down" mechanism. Look for these errors here and
- * clear them when possible. This routine will exit with the
- * errors of the current run still set.
- *
- * MISC_FW_AUTH_FAILED_ERR
- * Firmware authorization failed. This can be cleared by
- * re-initializing the RSA engine, then clearing the status bit.
- * Do not re-init the RSA angine immediately after a successful
- * run - this will reset the current authorization.
- *
- * MISC_KEY_MISMATCH_ERR
- * Key does not match. The only way to clear this is to load
- * a matching key then clear the status bit. If this error
- * is raised, it will persist outside of this routine until a
- * matching key is loaded.
- */
- timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies;
- while (1) {
- status = (read_csr(dd, MISC_CFG_FW_CTRL)
- & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
- >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
-
- if (status == RSA_STATUS_IDLE) {
- /* should not happen */
- dd_dev_err(dd, "%s firmware security bad idle state\n",
- who);
- ret = -EINVAL;
- break;
- } else if (status == RSA_STATUS_DONE) {
- /* finished successfully */
- break;
- } else if (status == RSA_STATUS_FAILED) {
- /* finished unsuccessfully */
- ret = -EINVAL;
- break;
- }
- /* else still active */
-
- if (time_after(jiffies, timeout)) {
- /*
- * Timed out while active. We can't reset the engine
- * if it is stuck active, but run through the
- * error code to see what error bits are set.
- */
- dd_dev_err(dd, "%s firmware security time out\n", who);
- ret = -ETIMEDOUT;
- break;
- }
-
- msleep(20);
- }
-
- /*
- * Arrive here on success or failure. Clear all RSA engine
- * errors. All current errors will stick - the RSA logic is keeping
- * error high. All previous errors will clear - the RSA logic
- * is not keeping the error high.
- */
- write_csr(dd, MISC_ERR_CLEAR,
- MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
- MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
- /*
- * All that is left are the current errors. Print warnings on
- * authorization failure details, if any. Firmware authorization
- * can be retried, so these are only warnings.
- */
- reg = read_csr(dd, MISC_ERR_STATUS);
- if (ret) {
- if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
- dd_dev_warn(dd, "%s firmware authorization failed\n",
- who);
- if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
- dd_dev_warn(dd, "%s firmware key mismatch\n", who);
- }
-
- return ret;
-}
-
-static void load_security_variables(struct hfi1_devdata *dd,
- struct firmware_details *fdet)
-{
- /* Security variables a. Write the modulus */
- write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
- /* Security variables b. Write the r2 */
- write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
- /* Security variables c. Write the mu */
- write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
- /* Security variables d. Write the header */
- write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
- (u8 *)fdet->css_header,
- sizeof(struct css_header));
-}
-
-/* return the 8051 firmware state */
-static inline u32 get_firmware_state(struct hfi1_devdata *dd)
-{
- u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
-
- return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT)
- & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK;
-}
-
-/*
- * Wait until the firmware is up and ready to take host requests.
- * Return 0 on success, -ETIMEDOUT on timeout.
- */
-int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
-{
- unsigned long timeout;
-
- /* in the simulator, the fake 8051 is always ready */
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
- return 0;
-
- timeout = msecs_to_jiffies(mstimeout) + jiffies;
- while (1) {
- if (get_firmware_state(dd) == 0xa0) /* ready */
- return 0;
- if (time_after(jiffies, timeout)) /* timed out */
- return -ETIMEDOUT;
- usleep_range(1950, 2050); /* sleep 2ms-ish */
- }
-}
-
-/*
- * Load the 8051 firmware.
- */
-static int load_8051_firmware(struct hfi1_devdata *dd,
- struct firmware_details *fdet)
-{
- u64 reg;
- int ret;
- u8 ver_a, ver_b;
-
- /*
- * DC Reset sequence
- * Load DC 8051 firmware
- */
- /*
- * DC reset step 1: Reset DC8051
- */
- reg = DC_DC8051_CFG_RST_M8051W_SMASK
- | DC_DC8051_CFG_RST_CRAM_SMASK
- | DC_DC8051_CFG_RST_DRAM_SMASK
- | DC_DC8051_CFG_RST_IRAM_SMASK
- | DC_DC8051_CFG_RST_SFR_SMASK;
- write_csr(dd, DC_DC8051_CFG_RST, reg);
-
- /*
- * DC reset step 2 (optional): Load 8051 data memory with link
- * configuration
- */
-
- /*
- * DC reset step 3: Load DC8051 firmware
- */
- /* release all but the core reset */
- reg = DC_DC8051_CFG_RST_M8051W_SMASK;
- write_csr(dd, DC_DC8051_CFG_RST, reg);
-
- /* Firmware load step 1 */
- load_security_variables(dd, fdet);
-
- /*
- * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED
- */
- write_csr(dd, MISC_CFG_FW_CTRL, 0);
-
- /* Firmware load steps 3-5 */
- ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
- fdet->firmware_len);
- if (ret)
- return ret;
-
- /*
- * DC reset step 4. Host starts the DC8051 firmware
- */
- /*
- * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED
- */
- write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
-
- /* Firmware load steps 7-10 */
- ret = run_rsa(dd, "8051", fdet->signature);
- if (ret)
- return ret;
-
- /* clear all reset bits, releasing the 8051 */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
- /*
- * DC reset step 5. Wait for firmware to be ready to accept host
- * requests.
- */
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) { /* timed out */
- dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
- get_firmware_state(dd));
- return -ETIMEDOUT;
- }
-
- read_misc_status(dd, &ver_a, &ver_b);
- dd_dev_info(dd, "8051 firmware version %d.%d\n",
- (int)ver_b, (int)ver_a);
- dd->dc8051_ver = dc8051_ver(ver_b, ver_a);
-
- return 0;
-}
-
-/*
- * Write the SBus request register
- *
- * No need for masking - the arguments are sized exactly.
- */
-void sbus_request(struct hfi1_devdata *dd,
- u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
-{
- write_csr(dd, ASIC_CFG_SBUS_REQUEST,
- ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
- ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
- ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
- ((u64)receiver_addr <<
- ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
-}
-
-/*
- * Turn off the SBus and fabric serdes spicos.
- *
- * + Must be called with Sbus fast mode turned on.
- * + Must be called after fabric serdes broadcast is set up.
- * + Must be called before the 8051 is loaded - assumes 8051 is not loaded
- * when using MISC_CFG_FW_CTRL.
- */
-static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
-{
- /* only needed on A0 */
- if (!is_ax(dd))
- return;
-
- dd_dev_info(dd, "Turning off spicos:%s%s\n",
- flags & SPICO_SBUS ? " SBus" : "",
- flags & SPICO_FABRIC ? " fabric" : "");
-
- write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
- /* disable SBus spico */
- if (flags & SPICO_SBUS)
- sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
- WRITE_SBUS_RECEIVER, 0x00000040);
-
- /* disable the fabric serdes spicos */
- if (flags & SPICO_FABRIC)
- sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
- 0x07, WRITE_SBUS_RECEIVER, 0x00000000);
- write_csr(dd, MISC_CFG_FW_CTRL, 0);
-}
-
-/*
- * Reset all of the fabric serdes for this HFI in preparation to take the
- * link to Polling.
- *
- * To do a reset, we need to write to to the serdes registers. Unfortunately,
- * the fabric serdes download to the other HFI on the ASIC will have turned
- * off the firmware validation on this HFI. This means we can't write to the
- * registers to reset the serdes. Work around this by performing a complete
- * re-download and validation of the fabric serdes firmware. This, as a
- * by-product, will reset the serdes. NOTE: the re-download requires that
- * the 8051 be in the Offline state. I.e. not actively trying to use the
- * serdes. This routine is called at the point where the link is Offline and
- * is getting ready to go to Polling.
- */
-void fabric_serdes_reset(struct hfi1_devdata *dd)
-{
- int ret;
-
- if (!fw_fabric_serdes_load)
- return;
-
- ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
- if (ret) {
- dd_dev_err(dd,
- "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n");
- return;
- }
- set_sbus_fast_mode(dd);
-
- if (is_ax(dd)) {
- /* A0 serdes do not work with a re-download */
- u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
-
- /* place SerDes in reset and disable SPICO */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
- /* wait 100 refclk cycles @ 156.25MHz => 640ns */
- udelay(1);
- /* remove SerDes reset */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
- /* turn SPICO enable on */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
- } else {
- turn_off_spicos(dd, SPICO_FABRIC);
- /*
- * No need for firmware retry - what to download has already
- * been decided.
- * No need to pay attention to the load return - the only
- * failure is a validation failure, which has already been
- * checked by the initial download.
- */
- (void)load_fabric_serdes_firmware(dd, &fw_fabric);
- }
-
- clear_sbus_fast_mode(dd);
- release_chip_resource(dd, CR_SBUS);
-}
-
-/* Access to the SBus in this routine should probably be serialized */
-int sbus_request_slow(struct hfi1_devdata *dd,
- u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
-{
- u64 reg, count = 0;
-
- /* make sure fast mode is clear */
- clear_sbus_fast_mode(dd);
-
- sbus_request(dd, receiver_addr, data_addr, command, data_in);
- write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
- ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
- /* Wait for both DONE and RCV_DATA_VALID to go high */
- reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
- while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
- (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) {
- if (count++ >= SBUS_MAX_POLL_COUNT) {
- u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
- /*
- * If the loop has timed out, we are OK if DONE bit
- * is set and RCV_DATA_VALID and EXECUTE counters
- * are the same. If not, we cannot proceed.
- */
- if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
- (SBUS_COUNTER(counts, RCV_DATA_VALID) ==
- SBUS_COUNTER(counts, EXECUTE)))
- break;
- return -ETIMEDOUT;
- }
- udelay(1);
- reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
- }
- count = 0;
- write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
- /* Wait for DONE to clear after EXECUTE is cleared */
- reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
- while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) {
- if (count++ >= SBUS_MAX_POLL_COUNT)
- return -ETIME;
- udelay(1);
- reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
- }
- return 0;
-}
-
-static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
- struct firmware_details *fdet)
-{
- int i, err;
- const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
-
- dd_dev_info(dd, "Downloading fabric firmware\n");
-
- /* step 1: load security variables */
- load_security_variables(dd, fdet);
- /* step 2: place SerDes in reset and disable SPICO */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
- /* wait 100 refclk cycles @ 156.25MHz => 640ns */
- udelay(1);
- /* step 3: remove SerDes reset */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
- /* step 4: assert IMEM override */
- sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
- /* step 5: download SerDes machine code */
- for (i = 0; i < fdet->firmware_len; i += 4) {
- sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
- *(u32 *)&fdet->firmware_ptr[i]);
- }
- /* step 6: IMEM override off */
- sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
- /* step 7: turn ECC on */
- sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
-
- /* steps 8-11: run the RSA engine */
- err = run_rsa(dd, "fabric serdes", fdet->signature);
- if (err)
- return err;
-
- /* step 12: turn SPICO enable on */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
- /* step 13: enable core hardware interrupts */
- sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
-
- return 0;
-}
-
-static int load_sbus_firmware(struct hfi1_devdata *dd,
- struct firmware_details *fdet)
-{
- int i, err;
- const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
-
- dd_dev_info(dd, "Downloading SBus firmware\n");
-
- /* step 1: load security variables */
- load_security_variables(dd, fdet);
- /* step 2: place SPICO into reset and enable off */
- sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
- /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */
- sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
- /* step 4: set starting IMEM address for burst download */
- sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
- /* step 5: download the SBus Master machine code */
- for (i = 0; i < fdet->firmware_len; i += 4) {
- sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
- *(u32 *)&fdet->firmware_ptr[i]);
- }
- /* step 6: set IMEM_CNTL_EN off */
- sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
- /* step 7: turn ECC on */
- sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
-
- /* steps 8-11: run the RSA engine */
- err = run_rsa(dd, "SBus", fdet->signature);
- if (err)
- return err;
-
- /* step 12: set SPICO_ENABLE on */
- sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
-
- return 0;
-}
-
-static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
- struct firmware_details *fdet)
-{
- int i;
- const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
-
- dd_dev_info(dd, "Downloading PCIe firmware\n");
-
- /* step 1: load security variables */
- load_security_variables(dd, fdet);
- /* step 2: assert single step (halts the SBus Master spico) */
- sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
- /* step 3: enable XDMEM access */
- sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
- /* step 4: load firmware into SBus Master XDMEM */
- /*
- * NOTE: the dmem address, write_en, and wdata are all pre-packed,
- * we only need to pick up the bytes and write them
- */
- for (i = 0; i < fdet->firmware_len; i += 4) {
- sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
- *(u32 *)&fdet->firmware_ptr[i]);
- }
- /* step 5: disable XDMEM access */
- sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
- /* step 6: allow SBus Spico to run */
- sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
-
- /*
- * steps 7-11: run RSA, if it succeeds, firmware is available to
- * be swapped
- */
- return run_rsa(dd, "PCIe serdes", fdet->signature);
-}
-
-/*
- * Set the given broadcast values on the given list of devices.
- */
-static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
- const u8 *addrs, int count)
-{
- while (--count >= 0) {
- /*
- * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave
- * defaults for everything else. Do not read-modify-write,
- * per instruction from the manufacturer.
- *
- * Register 0xfd:
- * bits what
- * ----- ---------------------------------
- * 0 IGNORE_BROADCAST (default 0)
- * 11:4 BROADCAST_GROUP_1 (default 0xff)
- * 23:16 BROADCAST_GROUP_2 (default 0xff)
- */
- sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
- (u32)bg1 << 4 | (u32)bg2 << 16);
- }
-}
-
-int acquire_hw_mutex(struct hfi1_devdata *dd)
-{
- unsigned long timeout;
- int try = 0;
- u8 mask = 1 << dd->hfi1_id;
- u8 user;
-
-retry:
- timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
- while (1) {
- write_csr(dd, ASIC_CFG_MUTEX, mask);
- user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
- if (user == mask)
- return 0; /* success */
- if (time_after(jiffies, timeout))
- break; /* timed out */
- msleep(20);
- }
-
- /* timed out */
- dd_dev_err(dd,
- "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
- (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
-
- if (try == 0) {
- /* break mutex and retry */
- write_csr(dd, ASIC_CFG_MUTEX, 0);
- try++;
- goto retry;
- }
-
- return -EBUSY;
-}
-
-void release_hw_mutex(struct hfi1_devdata *dd)
-{
- write_csr(dd, ASIC_CFG_MUTEX, 0);
-}
-
-/* return the given resource bit(s) as a mask for the given HFI */
-static inline u64 resource_mask(u32 hfi1_id, u32 resource)
-{
- return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0);
-}
-
-static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
- const char *func)
-{
- dd_dev_err(dd,
- "%s: hardware mutex stuck - suggest rebooting the machine\n",
- func);
-}
-
-/*
- * Acquire access to a chip resource.
- *
- * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed.
- */
-static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
-{
- u64 scratch0, all_bits, my_bit;
- int ret;
-
- if (resource & CR_DYN_MASK) {
- /* a dynamic resource is in use if either HFI has set the bit */
- all_bits = resource_mask(0, resource) |
- resource_mask(1, resource);
- my_bit = resource_mask(dd->hfi1_id, resource);
- } else {
- /* non-dynamic resources are not split between HFIs */
- all_bits = resource;
- my_bit = resource;
- }
-
- /* lock against other callers within the driver wanting a resource */
- mutex_lock(&dd->asic_data->asic_resource_mutex);
-
- ret = acquire_hw_mutex(dd);
- if (ret) {
- fail_mutex_acquire_message(dd, __func__);
- ret = -EIO;
- goto done;
- }
-
- scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
- if (scratch0 & all_bits) {
- ret = -EBUSY;
- } else {
- write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
- /* force write to be visible to other HFI on another OS */
- (void)read_csr(dd, ASIC_CFG_SCRATCH);
- }
-
- release_hw_mutex(dd);
-
-done:
- mutex_unlock(&dd->asic_data->asic_resource_mutex);
- return ret;
-}
-
-/*
- * Acquire access to a chip resource, wait up to mswait milliseconds for
- * the resource to become available.
- *
- * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex
- * acquire failed.
- */
-int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
-{
- unsigned long timeout;
- int ret;
-
- timeout = jiffies + msecs_to_jiffies(mswait);
- while (1) {
- ret = __acquire_chip_resource(dd, resource);
- if (ret != -EBUSY)
- return ret;
- /* resource is busy, check our timeout */
- if (time_after_eq(jiffies, timeout))
- return -EBUSY;
- usleep_range(80, 120); /* arbitrary delay */
- }
-}
-
-/*
- * Release access to a chip resource
- */
-void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
-{
- u64 scratch0, bit;
-
- /* only dynamic resources should ever be cleared */
- if (!(resource & CR_DYN_MASK)) {
- dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
- resource);
- return;
- }
- bit = resource_mask(dd->hfi1_id, resource);
-
- /* lock against other callers within the driver wanting a resource */
- mutex_lock(&dd->asic_data->asic_resource_mutex);
-
- if (acquire_hw_mutex(dd)) {
- fail_mutex_acquire_message(dd, __func__);
- goto done;
- }
-
- scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
- if ((scratch0 & bit) != 0) {
- scratch0 &= ~bit;
- write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
- /* force write to be visible to other HFI on another OS */
- (void)read_csr(dd, ASIC_CFG_SCRATCH);
- } else {
- dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
- __func__, dd->hfi1_id, resource);
- }
-
- release_hw_mutex(dd);
-
-done:
- mutex_unlock(&dd->asic_data->asic_resource_mutex);
-}
-
-/*
- * Return true if resource is set, false otherwise. Print a warning
- * if not set and a function is supplied.
- */
-bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
- const char *func)
-{
- u64 scratch0, bit;
-
- if (resource & CR_DYN_MASK)
- bit = resource_mask(dd->hfi1_id, resource);
- else
- bit = resource;
-
- scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
- if ((scratch0 & bit) == 0) {
- if (func)
- dd_dev_warn(dd,
- "%s: id %d, resource 0x%x, not acquired!\n",
- func, dd->hfi1_id, resource);
- return false;
- }
- return true;
-}
-
-static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
-{
- u64 scratch0;
-
- /* lock against other callers within the driver wanting a resource */
- mutex_lock(&dd->asic_data->asic_resource_mutex);
-
- if (acquire_hw_mutex(dd)) {
- fail_mutex_acquire_message(dd, func);
- goto done;
- }
-
- /* clear all dynamic access bits for this HFI */
- scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
- scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
- write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
- /* force write to be visible to other HFI on another OS */
- (void)read_csr(dd, ASIC_CFG_SCRATCH);
-
- release_hw_mutex(dd);
-
-done:
- mutex_unlock(&dd->asic_data->asic_resource_mutex);
-}
-
-void init_chip_resources(struct hfi1_devdata *dd)
-{
- /* clear any holds left by us */
- clear_chip_resources(dd, __func__);
-}
-
-void finish_chip_resources(struct hfi1_devdata *dd)
-{
- /* clear any holds left by us */
- clear_chip_resources(dd, __func__);
-}
-
-void set_sbus_fast_mode(struct hfi1_devdata *dd)
-{
- write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
- ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
-}
-
-void clear_sbus_fast_mode(struct hfi1_devdata *dd)
-{
- u64 reg, count = 0;
-
- reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
- while (SBUS_COUNTER(reg, EXECUTE) !=
- SBUS_COUNTER(reg, RCV_DATA_VALID)) {
- if (count++ >= SBUS_MAX_POLL_COUNT)
- break;
- udelay(1);
- reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
- }
- write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
-}
-
-int load_firmware(struct hfi1_devdata *dd)
-{
- int ret;
-
- if (fw_fabric_serdes_load) {
- ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
- if (ret)
- return ret;
-
- set_sbus_fast_mode(dd);
-
- set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
- fabric_serdes_broadcast[dd->hfi1_id],
- fabric_serdes_addrs[dd->hfi1_id],
- NUM_FABRIC_SERDES);
- turn_off_spicos(dd, SPICO_FABRIC);
- do {
- ret = load_fabric_serdes_firmware(dd, &fw_fabric);
- } while (retry_firmware(dd, ret));
-
- clear_sbus_fast_mode(dd);
- release_chip_resource(dd, CR_SBUS);
- if (ret)
- return ret;
- }
-
- if (fw_8051_load) {
- do {
- ret = load_8051_firmware(dd, &fw_8051);
- } while (retry_firmware(dd, ret));
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-int hfi1_firmware_init(struct hfi1_devdata *dd)
-{
- /* only RTL can use these */
- if (dd->icode != ICODE_RTL_SILICON) {
- fw_fabric_serdes_load = 0;
- fw_pcie_serdes_load = 0;
- fw_sbus_load = 0;
- }
-
- /* no 8051 or QSFP on simulator */
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
- fw_8051_load = 0;
- platform_config_load = 0;
- }
-
- if (!fw_8051_name) {
- if (dd->icode == ICODE_RTL_SILICON)
- fw_8051_name = DEFAULT_FW_8051_NAME_ASIC;
- else
- fw_8051_name = DEFAULT_FW_8051_NAME_FPGA;
- }
- if (!fw_fabric_serdes_name)
- fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME;
- if (!fw_sbus_name)
- fw_sbus_name = DEFAULT_FW_SBUS_NAME;
- if (!fw_pcie_serdes_name)
- fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
- if (!platform_config_name)
- platform_config_name = DEFAULT_PLATFORM_CONFIG_NAME;
-
- return obtain_firmware(dd);
-}
-
-/*
- * This function is a helper function for parse_platform_config(...) and
- * does not check for validity of the platform configuration cache
- * (because we know it is invalid as we are building up the cache).
- * As such, this should not be called from anywhere other than
- * parse_platform_config
- */
-static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
-{
- u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask;
- struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
-
- if (!system_table)
- return -EINVAL;
-
- meta_ver_meta =
- *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata
- + SYSTEM_TABLE_META_VERSION);
-
- mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
- ver_start = meta_ver_meta & mask;
-
- meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT;
-
- mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
- ver_len = meta_ver_meta & mask;
-
- ver_start /= 8;
- meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
-
- if (meta_ver < 5) {
- dd_dev_info(
- dd, "%s:Please update platform config\n", __func__);
- return -EINVAL;
- }
- return 0;
-}
-
-int parse_platform_config(struct hfi1_devdata *dd)
-{
- struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
- u32 *ptr = NULL;
- u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
- u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
- int ret = -EINVAL; /* assume failure */
-
- if (!dd->platform_config.data) {
- dd_dev_info(dd, "%s: Missing config file\n", __func__);
- goto bail;
- }
- ptr = (u32 *)dd->platform_config.data;
-
- magic_num = *ptr;
- ptr++;
- if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
- dd_dev_info(dd, "%s: Bad config file\n", __func__);
- goto bail;
- }
-
- /* Field is file size in DWORDs */
- file_length = (*ptr) * 4;
- ptr++;
-
- if (file_length > dd->platform_config.size) {
- dd_dev_info(dd, "%s:File claims to be larger than read size\n",
- __func__);
- goto bail;
- } else if (file_length < dd->platform_config.size) {
- dd_dev_info(dd,
- "%s:File claims to be smaller than read size, continuing\n",
- __func__);
- }
- /* exactly equal, perfection */
-
- /*
- * In both cases where we proceed, using the self-reported file length
- * is the safer option
- */
- while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
- header1 = *ptr;
- header2 = *(ptr + 1);
- if (header1 != ~header2) {
- dd_dev_info(dd, "%s: Failed validation at offset %ld\n",
- __func__, (ptr - (u32 *)
- dd->platform_config.data));
- goto bail;
- }
-
- record_idx = *ptr &
- ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1);
-
- table_length_dwords = (*ptr >>
- PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) &
- ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1);
-
- table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) &
- ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1);
-
- /* Done with this set of headers */
- ptr += 2;
-
- if (record_idx) {
- /* data table */
- switch (table_type) {
- case PLATFORM_CONFIG_SYSTEM_TABLE:
- pcfgcache->config_tables[table_type].num_table =
- 1;
- ret = check_meta_version(dd, ptr);
- if (ret)
- goto bail;
- break;
- case PLATFORM_CONFIG_PORT_TABLE:
- pcfgcache->config_tables[table_type].num_table =
- 2;
- break;
- case PLATFORM_CONFIG_RX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_TX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
- pcfgcache->config_tables[table_type].num_table =
- table_length_dwords;
- break;
- default:
- dd_dev_info(dd,
- "%s: Unknown data table %d, offset %ld\n",
- __func__, table_type,
- (ptr - (u32 *)
- dd->platform_config.data));
- goto bail; /* We don't trust this file now */
- }
- pcfgcache->config_tables[table_type].table = ptr;
- } else {
- /* metadata table */
- switch (table_type) {
- case PLATFORM_CONFIG_SYSTEM_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_PORT_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_RX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_TX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
- break;
- default:
- dd_dev_info(dd,
- "%s: Unknown meta table %d, offset %ld\n",
- __func__, table_type,
- (ptr -
- (u32 *)dd->platform_config.data));
- goto bail; /* We don't trust this file now */
- }
- pcfgcache->config_tables[table_type].table_metadata =
- ptr;
- }
-
- /* Calculate and check table crc */
- crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
- (table_length_dwords * 4));
- crc ^= ~(u32)0;
-
- /* Jump the table */
- ptr += table_length_dwords;
- if (crc != *ptr) {
- dd_dev_info(dd, "%s: Failed CRC check at offset %ld\n",
- __func__, (ptr -
- (u32 *)
- dd->platform_config.data));
- goto bail;
- }
- /* Jump the CRC DWORD */
- ptr++;
- }
-
- pcfgcache->cache_valid = 1;
- return 0;
-bail:
- memset(pcfgcache, 0, sizeof(struct platform_config_cache));
- return ret;
-}
-
-static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
- int field, u32 *field_len_bits,
- u32 *field_start_bits)
-{
- struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
- u32 *src_ptr = NULL;
-
- if (!pcfgcache->cache_valid)
- return -EINVAL;
-
- switch (table) {
- case PLATFORM_CONFIG_SYSTEM_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_PORT_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_RX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_TX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
- if (field && field < platform_config_table_limits[table])
- src_ptr =
- pcfgcache->config_tables[table].table_metadata + field;
- break;
- default:
- dd_dev_info(dd, "%s: Unknown table\n", __func__);
- break;
- }
-
- if (!src_ptr)
- return -EINVAL;
-
- if (field_start_bits)
- *field_start_bits = *src_ptr &
- ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
-
- if (field_len_bits)
- *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT)
- & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
-
- return 0;
-}
-
-/* This is the central interface to getting data out of the platform config
- * file. It depends on parse_platform_config() having populated the
- * platform_config_cache in hfi1_devdata, and checks the cache_valid member to
- * validate the sanity of the cache.
- *
- * The non-obvious parameters:
- * @table_index: Acts as a look up key into which instance of the tables the
- * relevant field is fetched from.
- *
- * This applies to the data tables that have multiple instances. The port table
- * is an exception to this rule as each HFI only has one port and thus the
- * relevant table can be distinguished by hfi_id.
- *
- * @data: pointer to memory that will be populated with the field requested.
- * @len: length of memory pointed by @data in bytes.
- */
-int get_platform_config_field(struct hfi1_devdata *dd,
- enum platform_config_table_type_encoding
- table_type, int table_index, int field_index,
- u32 *data, u32 len)
-{
- int ret = 0, wlen = 0, seek = 0;
- u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
- struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
-
- if (data)
- memset(data, 0, len);
- else
- return -EINVAL;
-
- ret = get_platform_fw_field_metadata(dd, table_type, field_index,
- &field_len_bits,
- &field_start_bits);
- if (ret)
- return -EINVAL;
-
- /* Convert length to bits */
- len *= 8;
-
- /* Our metadata function checked cache_valid and field_index for us */
- switch (table_type) {
- case PLATFORM_CONFIG_SYSTEM_TABLE:
- src_ptr = pcfgcache->config_tables[table_type].table;
-
- if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) {
- if (len < field_len_bits)
- return -EINVAL;
-
- seek = field_start_bits / 8;
- wlen = field_len_bits / 8;
-
- src_ptr = (u32 *)((u8 *)src_ptr + seek);
-
- /*
- * We expect the field to be byte aligned and whole byte
- * lengths if we are here
- */
- memcpy(data, src_ptr, wlen);
- return 0;
- }
- break;
- case PLATFORM_CONFIG_PORT_TABLE:
- /* Port table is 4 DWORDS */
- src_ptr = dd->hfi1_id ?
- pcfgcache->config_tables[table_type].table + 4 :
- pcfgcache->config_tables[table_type].table;
- break;
- case PLATFORM_CONFIG_RX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_TX_PRESET_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
- /* fall through */
- case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
- src_ptr = pcfgcache->config_tables[table_type].table;
-
- if (table_index <
- pcfgcache->config_tables[table_type].num_table)
- src_ptr += table_index;
- else
- src_ptr = NULL;
- break;
- default:
- dd_dev_info(dd, "%s: Unknown table\n", __func__);
- break;
- }
-
- if (!src_ptr || len < field_len_bits)
- return -EINVAL;
-
- src_ptr += (field_start_bits / 32);
- *data = (*src_ptr >> (field_start_bits % 32)) &
- ((1 << field_len_bits) - 1);
-
- return 0;
-}
-
-/*
- * Download the firmware needed for the Gen3 PCIe SerDes. An update
- * to the SBus firmware is needed before updating the PCIe firmware.
- *
- * Note: caller must be holding the SBus resource.
- */
-int load_pcie_firmware(struct hfi1_devdata *dd)
-{
- int ret = 0;
-
- /* both firmware loads below use the SBus */
- set_sbus_fast_mode(dd);
-
- if (fw_sbus_load) {
- turn_off_spicos(dd, SPICO_SBUS);
- do {
- ret = load_sbus_firmware(dd, &fw_sbus);
- } while (retry_firmware(dd, ret));
- if (ret)
- goto done;
- }
-
- if (fw_pcie_serdes_load) {
- dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
- set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
- pcie_serdes_broadcast[dd->hfi1_id],
- pcie_serdes_addrs[dd->hfi1_id],
- NUM_PCIE_SERDES);
- do {
- ret = load_pcie_serdes_firmware(dd, &fw_pcie);
- } while (retry_firmware(dd, ret));
- if (ret)
- goto done;
- }
-
-done:
- clear_sbus_fast_mode(dd);
-
- return ret;
-}
-
-/*
- * Read the GUID from the hardware, store it in dd.
- */
-void read_guid(struct hfi1_devdata *dd)
-{
- /* Take the DC out of reset to get a valid GUID value */
- write_csr(dd, CCE_DC_CTRL, 0);
- (void)read_csr(dd, CCE_DC_CTRL);
-
- dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
- dd_dev_info(dd, "GUID %llx",
- (unsigned long long)dd->base_guid);
-}
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h
deleted file mode 100644
index 16cbdc407..000000000
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ /dev/null
@@ -1,1946 +0,0 @@
-#ifndef _HFI1_KERNEL_H
-#define _HFI1_KERNEL_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/fs.h>
-#include <linux/completion.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/cdev.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <rdma/rdma_vt.h>
-
-#include "chip_registers.h"
-#include "common.h"
-#include "verbs.h"
-#include "pio.h"
-#include "chip.h"
-#include "mad.h"
-#include "qsfp.h"
-#include "platform.h"
-#include "affinity.h"
-
-/* bumped 1 from s/w major version of TrueScale */
-#define HFI1_CHIP_VERS_MAJ 3U
-
-/* don't care about this except printing */
-#define HFI1_CHIP_VERS_MIN 0U
-
-/* The Organization Unique Identifier (Mfg code), and its position in GUID */
-#define HFI1_OUI 0x001175
-#define HFI1_OUI_LSB 40
-
-#define DROP_PACKET_OFF 0
-#define DROP_PACKET_ON 1
-
-extern unsigned long hfi1_cap_mask;
-#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
-#define HFI1_CAP_UGET_MASK(mask, cap) \
- (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
-#define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
-#define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
-#define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
-#define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
-#define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
- HFI1_CAP_MISC_MASK)
-/* Offline Disabled Reason is 4-bits */
-#define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
-
-/*
- * Control context is always 0 and handles the error packets.
- * It also handles the VL15 and multicast packets.
- */
-#define HFI1_CTRL_CTXT 0
-
-/*
- * Driver context will store software counters for each of the events
- * associated with these status registers
- */
-#define NUM_CCE_ERR_STATUS_COUNTERS 41
-#define NUM_RCV_ERR_STATUS_COUNTERS 64
-#define NUM_MISC_ERR_STATUS_COUNTERS 13
-#define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
-#define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
-#define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
-#define NUM_SEND_ERR_STATUS_COUNTERS 3
-#define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
-#define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
-
-/*
- * per driver stats, either not device nor port-specific, or
- * summed over all of the devices and ports.
- * They are described by name via ipathfs filesystem, so layout
- * and number of elements can change without breaking compatibility.
- * If members are added or deleted hfi1_statnames[] in debugfs.c must
- * change to match.
- */
-struct hfi1_ib_stats {
- __u64 sps_ints; /* number of interrupts handled */
- __u64 sps_errints; /* number of error interrupts */
- __u64 sps_txerrs; /* tx-related packet errors */
- __u64 sps_rcverrs; /* non-crc rcv packet errors */
- __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
- __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
- __u64 sps_ctxts; /* number of contexts currently open */
- __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
- __u64 sps_buffull;
- __u64 sps_hdrfull;
-};
-
-extern struct hfi1_ib_stats hfi1_stats;
-extern const struct pci_error_handlers hfi1_pci_err_handler;
-
-/*
- * First-cut criterion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
-
-/*
- * Below contains all data related to a single context (formerly called port).
- */
-
-#ifdef CONFIG_DEBUG_FS
-struct hfi1_opcode_stats_perctx;
-#endif
-
-struct ctxt_eager_bufs {
- ssize_t size; /* total size of eager buffers */
- u32 count; /* size of buffers array */
- u32 numbufs; /* number of buffers allocated */
- u32 alloced; /* number of rcvarray entries used */
- u32 rcvtid_size; /* size of each eager rcv tid */
- u32 threshold; /* head update threshold */
- struct eager_buffer {
- void *addr;
- dma_addr_t phys;
- ssize_t len;
- } *buffers;
- struct {
- void *addr;
- dma_addr_t phys;
- } *rcvtids;
-};
-
-struct exp_tid_set {
- struct list_head list;
- u32 count;
-};
-
-struct hfi1_ctxtdata {
- /* shadow the ctxt's RcvCtrl register */
- u64 rcvctrl;
- /* rcvhdrq base, needs mmap before useful */
- void *rcvhdrq;
- /* kernel virtual address where hdrqtail is updated */
- volatile __le64 *rcvhdrtail_kvaddr;
- /*
- * Shared page for kernel to signal user processes that send buffers
- * need disarming. The process should call HFI1_CMD_DISARM_BUFS
- * or HFI1_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
- */
- unsigned long *user_event_mask;
- /* when waiting for rcv or pioavail */
- wait_queue_head_t wait;
- /* rcvhdrq size (for freeing) */
- size_t rcvhdrq_size;
- /* number of rcvhdrq entries */
- u16 rcvhdrq_cnt;
- /* size of each of the rcvhdrq entries */
- u16 rcvhdrqentsize;
- /* mmap of hdrq, must fit in 44 bits */
- dma_addr_t rcvhdrq_phys;
- dma_addr_t rcvhdrqtailaddr_phys;
- struct ctxt_eager_bufs egrbufs;
- /* this receive context's assigned PIO ACK send context */
- struct send_context *sc;
-
- /* dynamic receive available interrupt timeout */
- u32 rcvavail_timeout;
- /*
- * number of opens (including slave sub-contexts) on this instance
- * (ignoring forks, dup, etc. for now)
- */
- int cnt;
- /*
- * how much space to leave at start of eager TID entries for
- * protocol use, on each TID
- */
- /* instead of calculating it */
- unsigned ctxt;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_cnt;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_id;
- u8 uuid[16];
- /* job key */
- u16 jkey;
- /* number of RcvArray groups for this context. */
- u32 rcv_array_groups;
- /* index of first eager TID entry. */
- u32 eager_base;
- /* number of expected TID entries */
- u32 expected_count;
- /* index of first expected TID entry. */
- u32 expected_base;
-
- struct exp_tid_set tid_group_list;
- struct exp_tid_set tid_used_list;
- struct exp_tid_set tid_full_list;
-
- /* lock protecting all Expected TID data */
- struct mutex exp_lock;
- /* number of pio bufs for this ctxt (all procs, if shared) */
- u32 piocnt;
- /* first pio buffer for this ctxt */
- u32 pio_base;
- /* chip offset of PIO buffers for this ctxt */
- u32 piobufs;
- /* per-context configuration flags */
- u32 flags;
- /* per-context event flags for fileops/intr communication */
- unsigned long event_flags;
- /* WAIT_RCV that timed out, no interrupt */
- u32 rcvwait_to;
- /* WAIT_PIO that timed out, no interrupt */
- u32 piowait_to;
- /* WAIT_RCV already happened, no wait */
- u32 rcvnowait;
- /* WAIT_PIO already happened, no wait */
- u32 pionowait;
- /* total number of polled urgent packets */
- u32 urgent;
- /* saved total number of polled urgent packets for poll edge trigger */
- u32 urgent_poll;
- /* pid of process using this ctxt */
- pid_t pid;
- pid_t subpid[HFI1_MAX_SHARED_CTXTS];
- /* same size as task_struct .comm[], command that opened context */
- char comm[TASK_COMM_LEN];
- /* so file ops can get at unit */
- struct hfi1_devdata *dd;
- /* so functions that need physical port can get it easily */
- struct hfi1_pportdata *ppd;
- /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
- void *subctxt_uregbase;
- /* An array of pages for the eager receive buffers * N */
- void *subctxt_rcvegrbuf;
- /* An array of pages for the eager header queue entries * N */
- void *subctxt_rcvhdr_base;
- /* The version of the library which opened this ctxt */
- u32 userversion;
- /* Bitmask of active slaves */
- u32 active_slaves;
- /* Type of packets or conditions we want to poll for */
- u16 poll_type;
- /* receive packet sequence counter */
- u8 seq_cnt;
- u8 redirect_seq_cnt;
- /* ctxt rcvhdrq head offset */
- u32 head;
- u32 pkt_count;
- /* QPs waiting for context processing */
- struct list_head qp_wait_list;
- /* interrupt handling */
- u64 imask; /* clear interrupt mask */
- int ireg; /* clear interrupt register */
- unsigned numa_id; /* numa node of this context */
- /* verbs stats per CTX */
- struct hfi1_opcode_stats_perctx *opstats;
- /*
- * This is the kernel thread that will keep making
- * progress on the user sdma requests behind the scenes.
- * There is one per context (shared contexts use the master's).
- */
- struct task_struct *progress;
- struct list_head sdma_queues;
- /* protect sdma queues */
- spinlock_t sdma_qlock;
-
- /* Is ASPM interrupt supported for this context */
- bool aspm_intr_supported;
- /* ASPM state (enabled/disabled) for this context */
- bool aspm_enabled;
- /* Timer for re-enabling ASPM if interrupt activity quietens down */
- struct timer_list aspm_timer;
- /* Lock to serialize between intr, timer intr and user threads */
- spinlock_t aspm_lock;
- /* Is ASPM processing enabled for this context (in intr context) */
- bool aspm_intr_enable;
- /* Last interrupt timestamp */
- ktime_t aspm_ts_last_intr;
- /* Last timestamp at which we scheduled a timer for this context */
- ktime_t aspm_ts_timer_sched;
-
- /*
- * The interrupt handler for a particular receive context can vary
- * throughout it's lifetime. This is not a lock protected data member so
- * it must be updated atomically and the prev and new value must always
- * be valid. Worst case is we process an extra interrupt and up to 64
- * packets with the wrong interrupt handler.
- */
- int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded);
-};
-
-/*
- * Represents a single packet at a high level. Put commonly computed things in
- * here so we do not have to keep doing them over and over. The rule of thumb is
- * if something is used one time to derive some value, store that something in
- * here. If it is used multiple times, then store the result of that derivation
- * in here.
- */
-struct hfi1_packet {
- void *ebuf;
- void *hdr;
- struct hfi1_ctxtdata *rcd;
- __le32 *rhf_addr;
- struct rvt_qp *qp;
- struct hfi1_other_headers *ohdr;
- u64 rhf;
- u32 maxcnt;
- u32 rhqoff;
- u32 hdrqtail;
- int numpkt;
- u16 tlen;
- u16 hlen;
- s16 etail;
- u16 rsize;
- u8 updegr;
- u8 rcv_flags;
- u8 etype;
-};
-
-static inline bool has_sc4_bit(struct hfi1_packet *p)
-{
- return !!rhf_dc_info(p->rhf);
-}
-
-/*
- * Private data for snoop/capture support.
- */
-struct hfi1_snoop_data {
- int mode_flag;
- struct cdev cdev;
- struct device *class_dev;
- /* protect snoop data */
- spinlock_t snoop_lock;
- struct list_head queue;
- wait_queue_head_t waitq;
- void *filter_value;
- int (*filter_callback)(void *hdr, void *data, void *value);
- u64 dcc_cfg; /* saved value of DCC Cfg register */
-};
-
-/* snoop mode_flag values */
-#define HFI1_PORT_SNOOP_MODE 1U
-#define HFI1_PORT_CAPTURE_MODE 2U
-
-struct rvt_sge_state;
-
-/*
- * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
- * Mostly for MADs that set or query link parameters, also ipath
- * config interfaces
- */
-#define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
-#define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
-#define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
-#define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
-#define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
-#define HFI1_IB_CFG_SPD 5 /* current Link spd */
-#define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
-#define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
-#define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
-#define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
-#define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
-#define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
-#define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
-#define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
-#define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
-#define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
-#define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
-#define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
-#define HFI1_IB_CFG_VL_HIGH_LIMIT 19
-#define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
-#define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
-
-/*
- * HFI or Host Link States
- *
- * These describe the states the driver thinks the logical and physical
- * states are in. Used as an argument to set_link_state(). Implemented
- * as bits for easy multi-state checking. The actual state can only be
- * one.
- */
-#define __HLS_UP_INIT_BP 0
-#define __HLS_UP_ARMED_BP 1
-#define __HLS_UP_ACTIVE_BP 2
-#define __HLS_DN_DOWNDEF_BP 3 /* link down default */
-#define __HLS_DN_POLL_BP 4
-#define __HLS_DN_DISABLE_BP 5
-#define __HLS_DN_OFFLINE_BP 6
-#define __HLS_VERIFY_CAP_BP 7
-#define __HLS_GOING_UP_BP 8
-#define __HLS_GOING_OFFLINE_BP 9
-#define __HLS_LINK_COOLDOWN_BP 10
-
-#define HLS_UP_INIT BIT(__HLS_UP_INIT_BP)
-#define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP)
-#define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP)
-#define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
-#define HLS_DN_POLL BIT(__HLS_DN_POLL_BP)
-#define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP)
-#define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP)
-#define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP)
-#define HLS_GOING_UP BIT(__HLS_GOING_UP_BP)
-#define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
-#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
-
-#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
-
-/* use this MTU size if none other is given */
-#define HFI1_DEFAULT_ACTIVE_MTU 8192
-/* use this MTU size as the default maximum */
-#define HFI1_DEFAULT_MAX_MTU 8192
-/* default partition key */
-#define DEFAULT_PKEY 0xffff
-
-/*
- * Possible fabric manager config parameters for fm_{get,set}_table()
- */
-#define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */
-#define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */
-#define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */
-#define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */
-#define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */
-#define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */
-
-/*
- * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
- * these are bits so they can be combined, e.g.
- * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
- */
-#define HFI1_RCVCTRL_TAILUPD_ENB 0x01
-#define HFI1_RCVCTRL_TAILUPD_DIS 0x02
-#define HFI1_RCVCTRL_CTXT_ENB 0x04
-#define HFI1_RCVCTRL_CTXT_DIS 0x08
-#define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
-#define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
-#define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
-#define HFI1_RCVCTRL_PKEY_DIS 0x80
-#define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
-#define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
-#define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
-#define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
-#define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
-#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
-#define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
-#define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
-
-/* partition enforcement flags */
-#define HFI1_PART_ENFORCE_IN 0x1
-#define HFI1_PART_ENFORCE_OUT 0x2
-
-/* how often we check for synthetic counter wrap around */
-#define SYNTH_CNT_TIME 2
-
-/* Counter flags */
-#define CNTR_NORMAL 0x0 /* Normal counters, just read register */
-#define CNTR_SYNTH 0x1 /* Synthetic counters, saturate at all 1s */
-#define CNTR_DISABLED 0x2 /* Disable this counter */
-#define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */
-#define CNTR_VL 0x8 /* Per VL counter */
-#define CNTR_SDMA 0x10
-#define CNTR_INVALID_VL -1 /* Specifies invalid VL */
-#define CNTR_MODE_W 0x0
-#define CNTR_MODE_R 0x1
-
-/* VLs Supported/Operational */
-#define HFI1_MIN_VLS_SUPPORTED 1
-#define HFI1_MAX_VLS_SUPPORTED 8
-
-static inline void incr_cntr64(u64 *cntr)
-{
- if (*cntr < (u64)-1LL)
- (*cntr)++;
-}
-
-static inline void incr_cntr32(u32 *cntr)
-{
- if (*cntr < (u32)-1LL)
- (*cntr)++;
-}
-
-#define MAX_NAME_SIZE 64
-struct hfi1_msix_entry {
- enum irq_type type;
- struct msix_entry msix;
- void *arg;
- char name[MAX_NAME_SIZE];
- cpumask_t mask;
-};
-
-/* per-SL CCA information */
-struct cca_timer {
- struct hrtimer hrtimer;
- struct hfi1_pportdata *ppd; /* read-only */
- int sl; /* read-only */
- u16 ccti; /* read/write - current value of CCTI */
-};
-
-struct link_down_reason {
- /*
- * SMA-facing value. Should be set from .latest when
- * HLS_UP_* -> HLS_DN_* transition actually occurs.
- */
- u8 sma;
- u8 latest;
-};
-
-enum {
- LO_PRIO_TABLE,
- HI_PRIO_TABLE,
- MAX_PRIO_TABLE
-};
-
-struct vl_arb_cache {
- /* protect vl arb cache */
- spinlock_t lock;
- struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
-};
-
-/*
- * The structure below encapsulates data relevant to a physical IB Port.
- * Current chips support only one such port, but the separation
- * clarifies things a bit. Note that to conform to IB conventions,
- * port-numbers are one-based. The first or only port is port1.
- */
-struct hfi1_pportdata {
- struct hfi1_ibport ibport_data;
-
- struct hfi1_devdata *dd;
- struct kobject pport_cc_kobj;
- struct kobject sc2vl_kobj;
- struct kobject sl2sc_kobj;
- struct kobject vl2mtu_kobj;
-
- /* PHY support */
- u32 port_type;
- struct qsfp_data qsfp_info;
-
- /* GUID for this interface, in host order */
- u64 guid;
- /* GUID for peer interface, in host order */
- u64 neighbor_guid;
-
- /* up or down physical link state */
- u32 linkup;
-
- /*
- * this address is mapped read-only into user processes so they can
- * get status cheaply, whenever they want. One qword of status per port
- */
- u64 *statusp;
-
- /* SendDMA related entries */
-
- struct workqueue_struct *hfi1_wq;
-
- /* move out of interrupt context */
- struct work_struct link_vc_work;
- struct work_struct link_up_work;
- struct work_struct link_down_work;
- struct work_struct dc_host_req_work;
- struct work_struct sma_message_work;
- struct work_struct freeze_work;
- struct work_struct link_downgrade_work;
- struct work_struct link_bounce_work;
- /* host link state variables */
- struct mutex hls_lock;
- u32 host_link_state;
-
- spinlock_t sdma_alllock ____cacheline_aligned_in_smp;
-
- u32 lstate; /* logical link state */
-
- /* these are the "32 bit" regs */
-
- u32 ibmtu; /* The MTU programmed for this unit */
- /*
- * Current max size IB packet (in bytes) including IB headers, that
- * we can send. Changes when ibmtu changes.
- */
- u32 ibmaxlen;
- u32 current_egress_rate; /* units [10^6 bits/sec] */
- /* LID programmed for this instance */
- u16 lid;
- /* list of pkeys programmed; 0 if not set */
- u16 pkeys[MAX_PKEY_VALUES];
- u16 link_width_supported;
- u16 link_width_downgrade_supported;
- u16 link_speed_supported;
- u16 link_width_enabled;
- u16 link_width_downgrade_enabled;
- u16 link_speed_enabled;
- u16 link_width_active;
- u16 link_width_downgrade_tx_active;
- u16 link_width_downgrade_rx_active;
- u16 link_speed_active;
- u8 vls_supported;
- u8 vls_operational;
- u8 actual_vls_operational;
- /* LID mask control */
- u8 lmc;
- /* Rx Polarity inversion (compensate for ~tx on partner) */
- u8 rx_pol_inv;
-
- u8 hw_pidx; /* physical port index */
- u8 port; /* IB port number and index into dd->pports - 1 */
- /* type of neighbor node */
- u8 neighbor_type;
- u8 neighbor_normal;
- u8 neighbor_fm_security; /* 1 if firmware checking is disabled */
- u8 neighbor_port_number;
- u8 is_sm_config_started;
- u8 offline_disabled_reason;
- u8 is_active_optimize_enabled;
- u8 driver_link_ready; /* driver ready for active link */
- u8 link_enabled; /* link enabled? */
- u8 linkinit_reason;
- u8 local_tx_rate; /* rate given to 8051 firmware */
- u8 last_pstate; /* info only */
-
- /* placeholders for IB MAD packet settings */
- u8 overrun_threshold;
- u8 phy_error_threshold;
-
- /* Used to override LED behavior for things like maintenance beaconing*/
- /*
- * Alternates per phase of blink
- * [0] holds LED off duration, [1] holds LED on duration
- */
- unsigned long led_override_vals[2];
- u8 led_override_phase; /* LSB picks from vals[] */
- atomic_t led_override_timer_active;
- /* Used to flash LEDs in override mode */
- struct timer_list led_override_timer;
-
- u32 sm_trap_qp;
- u32 sa_qp;
-
- /*
- * cca_timer_lock protects access to the per-SL cca_timer
- * structures (specifically the ccti member).
- */
- spinlock_t cca_timer_lock ____cacheline_aligned_in_smp;
- struct cca_timer cca_timer[OPA_MAX_SLS];
-
- /* List of congestion control table entries */
- struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX];
-
- /* congestion entries, each entry corresponding to a SL */
- struct opa_congestion_setting_entry_shadow
- congestion_entries[OPA_MAX_SLS];
-
- /*
- * cc_state_lock protects (write) access to the per-port
- * struct cc_state.
- */
- spinlock_t cc_state_lock ____cacheline_aligned_in_smp;
-
- struct cc_state __rcu *cc_state;
-
- /* Total number of congestion control table entries */
- u16 total_cct_entry;
-
- /* Bit map identifying service level */
- u32 cc_sl_control_map;
-
- /* CA's max number of 64 entry units in the congestion control table */
- u8 cc_max_table_entries;
-
- /*
- * begin congestion log related entries
- * cc_log_lock protects all congestion log related data
- */
- spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
- u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
- u16 threshold_event_counter;
- struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS];
- int cc_log_idx; /* index for logging events */
- int cc_mad_idx; /* index for reporting events */
- /* end congestion log related entries */
-
- struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE];
-
- /* port relative counter buffer */
- u64 *cntrs;
- /* port relative synthetic counter buffer */
- u64 *scntrs;
- /* port_xmit_discards are synthesized from different egress errors */
- u64 port_xmit_discards;
- u64 port_xmit_discards_vl[C_VL_COUNT];
- u64 port_xmit_constraint_errors;
- u64 port_rcv_constraint_errors;
- /* count of 'link_err' interrupts from DC */
- u64 link_downed;
- /* number of times link retrained successfully */
- u64 link_up;
- /* number of times a link unknown frame was reported */
- u64 unknown_frame_count;
- /* port_ltp_crc_mode is returned in 'portinfo' MADs */
- u16 port_ltp_crc_mode;
- /* port_crc_mode_enabled is the crc we support */
- u8 port_crc_mode_enabled;
- /* mgmt_allowed is also returned in 'portinfo' MADs */
- u8 mgmt_allowed;
- u8 part_enforce; /* partition enforcement flags */
- struct link_down_reason local_link_down_reason;
- struct link_down_reason neigh_link_down_reason;
- /* Value to be sent to link peer on LinkDown .*/
- u8 remote_link_down_reason;
- /* Error events that will cause a port bounce. */
- u32 port_error_action;
- struct work_struct linkstate_active_work;
- /* Does this port need to prescan for FECNs */
- bool cc_prescan;
-};
-
-typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
-
-typedef void (*opcode_handler)(struct hfi1_packet *packet);
-
-/* return values for the RHF receive functions */
-#define RHF_RCV_CONTINUE 0 /* keep going */
-#define RHF_RCV_DONE 1 /* stop, this packet processed */
-#define RHF_RCV_REPROCESS 2 /* stop. retain this packet */
-
-struct rcv_array_data {
- u8 group_size;
- u16 ngroups;
- u16 nctxt_extra;
-};
-
-struct per_vl_data {
- u16 mtu;
- struct send_context *sc;
-};
-
-/* 16 to directly index */
-#define PER_VL_SEND_CONTEXTS 16
-
-struct err_info_rcvport {
- u8 status_and_code;
- u64 packet_flit1;
- u64 packet_flit2;
-};
-
-struct err_info_constraint {
- u8 status;
- u16 pkey;
- u32 slid;
-};
-
-struct hfi1_temp {
- unsigned int curr; /* current temperature */
- unsigned int lo_lim; /* low temperature limit */
- unsigned int hi_lim; /* high temperature limit */
- unsigned int crit_lim; /* critical temperature limit */
- u8 triggers; /* temperature triggers */
-};
-
-/* common data between shared ASIC HFIs */
-struct hfi1_asic_data {
- struct hfi1_devdata *dds[2]; /* back pointers */
- struct mutex asic_resource_mutex;
-};
-
-/* device data struct now contains only "general per-device" info.
- * fields related to a physical IB port are in a hfi1_pportdata struct.
- */
-struct sdma_engine;
-struct sdma_vl_map;
-
-#define BOARD_VERS_MAX 96 /* how long the version string can be */
-#define SERIAL_MAX 16 /* length of the serial number */
-
-typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
-struct hfi1_devdata {
- struct hfi1_ibdev verbs_dev; /* must be first */
- struct list_head list;
- /* pointers to related structs for this device */
- /* pci access data structure */
- struct pci_dev *pcidev;
- struct cdev user_cdev;
- struct cdev diag_cdev;
- struct cdev ui_cdev;
- struct device *user_device;
- struct device *diag_device;
- struct device *ui_device;
-
- /* mem-mapped pointer to base of chip regs */
- u8 __iomem *kregbase;
- /* end of mem-mapped chip space excluding sendbuf and user regs */
- u8 __iomem *kregend;
- /* physical address of chip for io_remap, etc. */
- resource_size_t physaddr;
- /* receive context data */
- struct hfi1_ctxtdata **rcd;
- /* send context data */
- struct send_context_info *send_contexts;
- /* map hardware send contexts to software index */
- u8 *hw_to_sw;
- /* spinlock for allocating and releasing send context resources */
- spinlock_t sc_lock;
- /* Per VL data. Enough for all VLs but not all elements are set/used. */
- struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
- /* lock for pio_map */
- spinlock_t pio_map_lock;
- /* array of kernel send contexts */
- struct send_context **kernel_send_context;
- /* array of vl maps */
- struct pio_vl_map __rcu *pio_map;
- /* seqlock for sc2vl */
- seqlock_t sc2vl_lock;
- u64 sc2vl[4];
- /* Send Context initialization lock. */
- spinlock_t sc_init_lock;
-
- /* fields common to all SDMA engines */
-
- /* default flags to last descriptor */
- u64 default_desc1;
- volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */
- dma_addr_t sdma_heads_phys;
- void *sdma_pad_dma; /* DMA'ed by chip */
- dma_addr_t sdma_pad_phys;
- /* for deallocation */
- size_t sdma_heads_size;
- /* number from the chip */
- u32 chip_sdma_engines;
- /* num used */
- u32 num_sdma;
- /* lock for sdma_map */
- spinlock_t sde_map_lock;
- /* array of engines sized by num_sdma */
- struct sdma_engine *per_sdma;
- /* array of vl maps */
- struct sdma_vl_map __rcu *sdma_map;
- /* SPC freeze waitqueue and variable */
- wait_queue_head_t sdma_unfreeze_wq;
- atomic_t sdma_unfreeze_count;
-
- /* common data between shared ASIC HFIs in this OS */
- struct hfi1_asic_data *asic_data;
-
- /* hfi1_pportdata, points to array of (physical) port-specific
- * data structs, indexed by pidx (0..n-1)
- */
- struct hfi1_pportdata *pport;
-
- /* mem-mapped pointer to base of PIO buffers */
- void __iomem *piobase;
- /*
- * write-combining mem-mapped pointer to base of RcvArray
- * memory.
- */
- void __iomem *rcvarray_wc;
- /*
- * credit return base - a per-NUMA range of DMA address that
- * the chip will use to update the per-context free counter
- */
- struct credit_return_base *cr_base;
-
- /* send context numbers and sizes for each type */
- struct sc_config_sizes sc_sizes[SC_MAX];
-
- u32 lcb_access_count; /* count of LCB users */
-
- char *boardname; /* human readable board info */
-
- /* device (not port) flags, basically device capabilities */
- u32 flags;
-
- /* reset value */
- u64 z_int_counter;
- u64 z_rcv_limit;
- u64 z_send_schedule;
- /* percpu int_counter */
- u64 __percpu *int_counter;
- u64 __percpu *rcv_limit;
- u64 __percpu *send_schedule;
- /* number of receive contexts in use by the driver */
- u32 num_rcv_contexts;
- /* number of pio send contexts in use by the driver */
- u32 num_send_contexts;
- /*
- * number of ctxts available for PSM open
- */
- u32 freectxts;
- /* total number of available user/PSM contexts */
- u32 num_user_contexts;
- /* base receive interrupt timeout, in CSR units */
- u32 rcv_intr_timeout_csr;
-
- u64 __iomem *egrtidbase;
- spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
- spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
- /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
- spinlock_t uctxt_lock; /* rcd and user context changes */
- /* exclusive access to 8051 */
- spinlock_t dc8051_lock;
- /* exclusive access to 8051 memory */
- spinlock_t dc8051_memlock;
- int dc8051_timed_out; /* remember if the 8051 timed out */
- /*
- * A page that will hold event notification bitmaps for all
- * contexts. This page will be mapped into all processes.
- */
- unsigned long *events;
- /*
- * per unit status, see also portdata statusp
- * mapped read-only into user processes so they can get unit and
- * IB link status cheaply
- */
- struct hfi1_status *status;
- u32 freezelen; /* max length of freezemsg */
-
- /* revision register shadow */
- u64 revision;
- /* Base GUID for device (network order) */
- u64 base_guid;
-
- /* these are the "32 bit" regs */
-
- /* value we put in kr_rcvhdrsize */
- u32 rcvhdrsize;
- /* number of receive contexts the chip supports */
- u32 chip_rcv_contexts;
- /* number of receive array entries */
- u32 chip_rcv_array_count;
- /* number of PIO send contexts the chip supports */
- u32 chip_send_contexts;
- /* number of bytes in the PIO memory buffer */
- u32 chip_pio_mem_size;
- /* number of bytes in the SDMA memory buffer */
- u32 chip_sdma_mem_size;
-
- /* size of each rcvegrbuffer */
- u32 rcvegrbufsize;
- /* log2 of above */
- u16 rcvegrbufsize_shift;
- /* both sides of the PCIe link are gen3 capable */
- u8 link_gen3_capable;
- /* localbus width (1, 2,4,8,16,32) from config space */
- u32 lbus_width;
- /* localbus speed in MHz */
- u32 lbus_speed;
- int unit; /* unit # of this chip */
- int node; /* home node of this chip */
-
- /* save these PCI fields to restore after a reset */
- u32 pcibar0;
- u32 pcibar1;
- u32 pci_rom;
- u16 pci_command;
- u16 pcie_devctl;
- u16 pcie_lnkctl;
- u16 pcie_devctl2;
- u32 pci_msix0;
- u32 pci_lnkctl3;
- u32 pci_tph2;
-
- /*
- * ASCII serial number, from flash, large enough for original
- * all digit strings, and longer serial number format
- */
- u8 serial[SERIAL_MAX];
- /* human readable board version */
- u8 boardversion[BOARD_VERS_MAX];
- u8 lbus_info[32]; /* human readable localbus info */
- /* chip major rev, from CceRevision */
- u8 majrev;
- /* chip minor rev, from CceRevision */
- u8 minrev;
- /* hardware ID */
- u8 hfi1_id;
- /* implementation code */
- u8 icode;
- /* default link down value (poll/sleep) */
- u8 link_default;
- /* vAU of this device */
- u8 vau;
- /* vCU of this device */
- u8 vcu;
- /* link credits of this device */
- u16 link_credits;
- /* initial vl15 credits to use */
- u16 vl15_init;
-
- /* Misc small ints */
- /* Number of physical ports available */
- u8 num_pports;
- /* Lowest context number which can be used by user processes */
- u8 first_user_ctxt;
- u8 n_krcv_queues;
- u8 qos_shift;
- u8 qpn_mask;
-
- u16 rhf_offset; /* offset of RHF within receive header entry */
- u16 irev; /* implementation revision */
- u16 dc8051_ver; /* 8051 firmware version */
-
- struct platform_config platform_config;
- struct platform_config_cache pcfg_cache;
-
- struct diag_client *diag_client;
- spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
-
- u8 psxmitwait_supported;
- /* cycle length of PS* counters in HW (in picoseconds) */
- u16 psxmitwait_check_rate;
- /* high volume overflow errors deferred to tasklet */
- struct tasklet_struct error_tasklet;
-
- /* MSI-X information */
- struct hfi1_msix_entry *msix_entries;
- u32 num_msix_entries;
-
- /* INTx information */
- u32 requested_intx_irq; /* did we request one? */
- char intx_name[MAX_NAME_SIZE]; /* INTx name */
-
- /* general interrupt: mask of handled interrupts */
- u64 gi_mask[CCE_NUM_INT_CSRS];
-
- struct rcv_array_data rcv_entries;
-
- /*
- * 64 bit synthetic counters
- */
- struct timer_list synth_stats_timer;
-
- /*
- * device counters
- */
- char *cntrnames;
- size_t cntrnameslen;
- size_t ndevcntrs;
- u64 *cntrs;
- u64 *scntrs;
-
- /*
- * remembered values for synthetic counters
- */
- u64 last_tx;
- u64 last_rx;
-
- /*
- * per-port counters
- */
- size_t nportcntrs;
- char *portcntrnames;
- size_t portcntrnameslen;
-
- struct hfi1_snoop_data hfi1_snoop;
-
- struct err_info_rcvport err_info_rcvport;
- struct err_info_constraint err_info_rcv_constraint;
- struct err_info_constraint err_info_xmit_constraint;
- u8 err_info_uncorrectable;
- u8 err_info_fmconfig;
-
- atomic_t drop_packet;
- u8 do_drop;
-
- /*
- * Software counters for the status bits defined by the
- * associated error status registers
- */
- u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS];
- u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS];
- u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS];
- u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS];
- u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS];
- u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS];
- u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS];
-
- /* Software counter that spans all contexts */
- u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS];
- /* Software counter that spans all DMA engines */
- u64 sw_send_dma_eng_err_status_cnt[
- NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
- /* Software counter that aggregates all cce_err_status errors */
- u64 sw_cce_err_status_aggregate;
-
- /* receive interrupt functions */
- rhf_rcv_function_ptr *rhf_rcv_function_map;
- rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
-
- /*
- * Handlers for outgoing data so that snoop/capture does not
- * have to have its hooks in the send path
- */
- send_routine process_pio_send;
- send_routine process_dma_send;
- void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count);
-
- /* OUI comes from the HW. Used everywhere as 3 separate bytes. */
- u8 oui1;
- u8 oui2;
- u8 oui3;
- /* Timer and counter used to detect RcvBufOvflCnt changes */
- struct timer_list rcverr_timer;
- u32 rcv_ovfl_cnt;
-
- wait_queue_head_t event_queue;
-
- /* Save the enabled LCB error bits */
- u64 lcb_err_en;
- u8 dc_shutdown;
-
- /* receive context tail dummy address */
- __le64 *rcvhdrtail_dummy_kvaddr;
- dma_addr_t rcvhdrtail_dummy_physaddr;
-
- bool eprom_available; /* true if EPROM is available for this device */
- bool aspm_supported; /* Does HW support ASPM */
- bool aspm_enabled; /* ASPM state: enabled/disabled */
- /* Serialize ASPM enable/disable between multiple verbs contexts */
- spinlock_t aspm_lock;
- /* Number of verbs contexts which have disabled ASPM */
- atomic_t aspm_disabled_cnt;
-
- struct hfi1_affinity *affinity;
-};
-
-/* 8051 firmware version helper */
-#define dc8051_ver(a, b) ((a) << 8 | (b))
-
-/* f_put_tid types */
-#define PT_EXPECTED 0
-#define PT_EAGER 1
-#define PT_INVALID 2
-
-struct tid_rb_node;
-struct mmu_rb_node;
-
-/* Private data for file operations */
-struct hfi1_filedata {
- struct hfi1_ctxtdata *uctxt;
- unsigned subctxt;
- struct hfi1_user_sdma_comp_q *cq;
- struct hfi1_user_sdma_pkt_q *pq;
- /* for cpu affinity; -1 if none */
- int rec_cpu_num;
- u32 tid_n_pinned;
- struct rb_root tid_rb_root;
- struct tid_rb_node **entry_to_rb;
- spinlock_t tid_lock; /* protect tid_[limit,used] counters */
- u32 tid_limit;
- u32 tid_used;
- u32 *invalid_tids;
- u32 invalid_tid_idx;
- /* protect invalid_tids array and invalid_tid_idx */
- spinlock_t invalid_lock;
-};
-
-extern struct list_head hfi1_dev_list;
-extern spinlock_t hfi1_devs_lock;
-struct hfi1_devdata *hfi1_lookup(int unit);
-extern u32 hfi1_cpulist_count;
-extern unsigned long *hfi1_cpulist;
-
-extern unsigned int snoop_drop_send;
-extern unsigned int snoop_force_capture;
-int hfi1_init(struct hfi1_devdata *, int);
-int hfi1_count_units(int *npresentp, int *nupp);
-int hfi1_count_active_units(void);
-
-int hfi1_diag_add(struct hfi1_devdata *);
-void hfi1_diag_remove(struct hfi1_devdata *);
-void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
-
-void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
-
-int hfi1_create_rcvhdrq(struct hfi1_devdata *, struct hfi1_ctxtdata *);
-int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *);
-int hfi1_create_ctxts(struct hfi1_devdata *dd);
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32, int);
-void hfi1_init_pportdata(struct pci_dev *, struct hfi1_pportdata *,
- struct hfi1_devdata *, u8, u8);
-void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *);
-
-int handle_receive_interrupt(struct hfi1_ctxtdata *, int);
-int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int);
-int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
-void set_all_slowpath(struct hfi1_devdata *dd);
-
-/* receive packet handler dispositions */
-#define RCV_PKT_OK 0x0 /* keep going */
-#define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
-#define RCV_PKT_DONE 0x2 /* stop, no more packets detected */
-
-/* calculate the current RHF address */
-static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
-{
- return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->dd->rhf_offset;
-}
-
-int hfi1_reset_device(int);
-
-/* return the driver's idea of the logical OPA port state */
-static inline u32 driver_lstate(struct hfi1_pportdata *ppd)
-{
- return ppd->lstate; /* use the cached value */
-}
-
-void receive_interrupt_work(struct work_struct *work);
-
-/* extract service channel from header and rhf */
-static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
-{
- return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
- ((!!(rhf & RHF_DC_INFO_MASK)) << 4);
-}
-
-static inline u16 generate_jkey(kuid_t uid)
-{
- return from_kuid(current_user_ns(), uid) & 0xffff;
-}
-
-/*
- * active_egress_rate
- *
- * returns the active egress rate in units of [10^6 bits/sec]
- */
-static inline u32 active_egress_rate(struct hfi1_pportdata *ppd)
-{
- u16 link_speed = ppd->link_speed_active;
- u16 link_width = ppd->link_width_active;
- u32 egress_rate;
-
- if (link_speed == OPA_LINK_SPEED_25G)
- egress_rate = 25000;
- else /* assume OPA_LINK_SPEED_12_5G */
- egress_rate = 12500;
-
- switch (link_width) {
- case OPA_LINK_WIDTH_4X:
- egress_rate *= 4;
- break;
- case OPA_LINK_WIDTH_3X:
- egress_rate *= 3;
- break;
- case OPA_LINK_WIDTH_2X:
- egress_rate *= 2;
- break;
- default:
- /* assume IB_WIDTH_1X */
- break;
- }
-
- return egress_rate;
-}
-
-/*
- * egress_cycles
- *
- * Returns the number of 'fabric clock cycles' to egress a packet
- * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
- * rate is (approximately) 805 MHz, the units of the returned value
- * are (1/805 MHz).
- */
-static inline u32 egress_cycles(u32 len, u32 rate)
-{
- u32 cycles;
-
- /*
- * cycles is:
- *
- * (length) [bits] / (rate) [bits/sec]
- * ---------------------------------------------------
- * fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
- */
-
- cycles = len * 8; /* bits */
- cycles *= 805;
- cycles /= rate;
-
- return cycles;
-}
-
-void set_link_ipg(struct hfi1_pportdata *ppd);
-void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
- u32 rqpn, u8 svc_type);
-void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
- u32 pkey, u32 slid, u32 dlid, u8 sc5,
- const struct ib_grh *old_grh);
-
-#define PACKET_EGRESS_TIMEOUT 350
-static inline void pause_for_credit_return(struct hfi1_devdata *dd)
-{
- /* Pause at least 1us, to ensure chip returns all credits */
- u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000;
-
- udelay(usec ? usec : 1);
-}
-
-/**
- * sc_to_vlt() reverse lookup sc to vl
- * @dd - devdata
- * @sc5 - 5 bit sc
- */
-static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
-{
- unsigned seq;
- u8 rval;
-
- if (sc5 >= OPA_MAX_SCS)
- return (u8)(0xff);
-
- do {
- seq = read_seqbegin(&dd->sc2vl_lock);
- rval = *(((u8 *)dd->sc2vl) + sc5);
- } while (read_seqretry(&dd->sc2vl_lock, seq));
-
- return rval;
-}
-
-#define PKEY_MEMBER_MASK 0x8000
-#define PKEY_LOW_15_MASK 0x7fff
-
-/*
- * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
- * being an entry from the ingress partition key table), return 0
- * otherwise. Use the matching criteria for ingress partition keys
- * specified in the OPAv1 spec., section 9.10.14.
- */
-static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent)
-{
- u16 mkey = pkey & PKEY_LOW_15_MASK;
- u16 ment = ent & PKEY_LOW_15_MASK;
-
- if (mkey == ment) {
- /*
- * If pkey[15] is clear (limited partition member),
- * is bit 15 in the corresponding table element
- * clear (limited member)?
- */
- if (!(pkey & PKEY_MEMBER_MASK))
- return !!(ent & PKEY_MEMBER_MASK);
- return 1;
- }
- return 0;
-}
-
-/*
- * ingress_pkey_table_search - search the entire pkey table for
- * an entry which matches 'pkey'. return 0 if a match is found,
- * and 1 otherwise.
- */
-static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
-{
- int i;
-
- for (i = 0; i < MAX_PKEY_VALUES; i++) {
- if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i]))
- return 0;
- }
- return 1;
-}
-
-/*
- * ingress_pkey_table_fail - record a failure of ingress pkey validation,
- * i.e., increment port_rcv_constraint_errors for the port, and record
- * the 'error info' for this failure.
- */
-static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
- u16 slid)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- incr_cntr64(&ppd->port_rcv_constraint_errors);
- if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) {
- dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK;
- dd->err_info_rcv_constraint.slid = slid;
- dd->err_info_rcv_constraint.pkey = pkey;
- }
-}
-
-/*
- * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
- * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
- * is a hint as to the best place in the partition key table to begin
- * searching. This function should not be called on the data path because
- * of performance reasons. On datapath pkey check is expected to be done
- * by HW and rcv_pkey_check function should be called instead.
- */
-static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
- u8 sc5, u8 idx, u16 slid)
-{
- if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
- return 0;
-
- /* If SC15, pkey[0:14] must be 0x7fff */
- if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
- goto bad;
-
- /* Is the pkey = 0x0, or 0x8000? */
- if ((pkey & PKEY_LOW_15_MASK) == 0)
- goto bad;
-
- /* The most likely matching pkey has index 'idx' */
- if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx]))
- return 0;
-
- /* no match - try the whole table */
- if (!ingress_pkey_table_search(ppd, pkey))
- return 0;
-
-bad:
- ingress_pkey_table_fail(ppd, pkey, slid);
- return 1;
-}
-
-/*
- * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
- * otherwise. It only ensures pkey is vlid for QP0. This function
- * should be called on the data path instead of ingress_pkey_check
- * as on data path, pkey check is done by HW (except for QP0).
- */
-static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
- u8 sc5, u16 slid)
-{
- if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
- return 0;
-
- /* If SC15, pkey[0:14] must be 0x7fff */
- if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
- goto bad;
-
- return 0;
-bad:
- ingress_pkey_table_fail(ppd, pkey, slid);
- return 1;
-}
-
-/* MTU handling */
-
-/* MTU enumeration, 256-4k match IB */
-#define OPA_MTU_0 0
-#define OPA_MTU_256 1
-#define OPA_MTU_512 2
-#define OPA_MTU_1024 3
-#define OPA_MTU_2048 4
-#define OPA_MTU_4096 5
-
-u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
-int mtu_to_enum(u32 mtu, int default_if_bad);
-u16 enum_to_mtu(int);
-static inline int valid_ib_mtu(unsigned int mtu)
-{
- return mtu == 256 || mtu == 512 ||
- mtu == 1024 || mtu == 2048 ||
- mtu == 4096;
-}
-
-static inline int valid_opa_max_mtu(unsigned int mtu)
-{
- return mtu >= 2048 &&
- (valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240);
-}
-
-int set_mtu(struct hfi1_pportdata *);
-
-int hfi1_set_lid(struct hfi1_pportdata *, u32, u8);
-void hfi1_disable_after_error(struct hfi1_devdata *);
-int hfi1_set_uevent_bits(struct hfi1_pportdata *, const int);
-int hfi1_rcvbuf_validate(u32, u8, u16 *);
-
-int fm_get_table(struct hfi1_pportdata *, int, void *);
-int fm_set_table(struct hfi1_pportdata *, int, void *);
-
-void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
-void reset_link_credits(struct hfi1_devdata *dd);
-void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
-
-int snoop_recv_handler(struct hfi1_packet *packet);
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count);
-int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
-
-static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
-{
- return ppd->dd;
-}
-
-static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev)
-{
- return container_of(dev, struct hfi1_devdata, verbs_dev);
-}
-
-static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev)
-{
- return dd_from_dev(to_idev(ibdev));
-}
-
-static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
-{
- return container_of(ibp, struct hfi1_pportdata, ibport_data);
-}
-
-static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
-{
- return container_of(rdi, struct hfi1_ibdev, rdi);
-}
-
-static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- WARN_ON(pidx >= dd->num_pports);
- return &dd->pport[pidx].ibport_data;
-}
-
-/*
- * Return the indexed PKEY from the port PKEY table.
- */
-static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
-{
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 ret;
-
- if (index >= ARRAY_SIZE(ppd->pkeys))
- ret = 0;
- else
- ret = ppd->pkeys[index];
-
- return ret;
-}
-
-/*
- * Readers of cc_state must call get_cc_state() under rcu_read_lock().
- * Writers of cc_state must call get_cc_state() under cc_state_lock.
- */
-static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
-{
- return rcu_dereference(ppd->cc_state);
-}
-
-/*
- * values for dd->flags (_device_ related flags)
- */
-#define HFI1_INITTED 0x1 /* chip and driver up and initted */
-#define HFI1_PRESENT 0x2 /* chip accesses can be done */
-#define HFI1_FROZEN 0x4 /* chip in SPC freeze */
-#define HFI1_HAS_SDMA_TIMEOUT 0x8
-#define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
-#define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
-
-/* IB dword length mask in PBC (lower 11 bits); same for all chips */
-#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
-
-/* ctxt_flag bit offsets */
- /* context has been setup */
-#define HFI1_CTXT_SETUP_DONE 1
- /* waiting for a packet to arrive */
-#define HFI1_CTXT_WAITING_RCV 2
- /* master has not finished initializing */
-#define HFI1_CTXT_MASTER_UNINIT 4
- /* waiting for an urgent packet to arrive */
-#define HFI1_CTXT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
- const struct pci_device_id *);
-void hfi1_free_devdata(struct hfi1_devdata *);
-void cc_state_reclaim(struct rcu_head *rcu);
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
-
-/* LED beaconing functions */
-void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
- unsigned int timeoff);
-void shutdown_led_override(struct hfi1_pportdata *ppd);
-
-#define HFI1_CREDIT_RETURN_RATE (100)
-
-/*
- * The number of words for the KDETH protocol field. If this is
- * larger then the actual field used, then part of the payload
- * will be in the header.
- *
- * Optimally, we want this sized so that a typical case will
- * use full cache lines. The typical local KDETH header would
- * be:
- *
- * Bytes Field
- * 8 LRH
- * 12 BHT
- * ?? KDETH
- * 8 RHF
- * ---
- * 28 + KDETH
- *
- * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
- */
-#define DEFAULT_RCVHDRSIZE 9
-
-/*
- * Maximal header byte count:
- *
- * Bytes Field
- * 8 LRH
- * 40 GRH (optional)
- * 12 BTH
- * ?? KDETH
- * 8 RHF
- * ---
- * 68 + KDETH
- *
- * We also want to maintain a cache line alignment to assist DMA'ing
- * of the header bytes. Round up to a good size.
- */
-#define DEFAULT_RCVHDR_ENTSIZE 32
-
-bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
-int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
-void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
-
-static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
-{
- *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
-{
- /*
- * volatile because it's a DMA target from the chip, routine is
- * inlined, and don't want register caching or reordering.
- */
- return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
-}
-
-/*
- * sysfs interface.
- */
-
-extern const char ib_hfi1_version[];
-
-int hfi1_device_create(struct hfi1_devdata *);
-void hfi1_device_remove(struct hfi1_devdata *);
-
-int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
- struct kobject *kobj);
-int hfi1_verbs_register_sysfs(struct hfi1_devdata *);
-void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *);
-/* Hook for sysfs read of QSFP */
-int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
-
-int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
-void hfi1_pcie_cleanup(struct pci_dev *);
-int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *,
- const struct pci_device_id *);
-void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
-void hfi1_pcie_flr(struct hfi1_devdata *);
-int pcie_speeds(struct hfi1_devdata *);
-void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *);
-void hfi1_enable_intx(struct pci_dev *);
-void restore_pci_variables(struct hfi1_devdata *dd);
-int do_pcie_gen3_transition(struct hfi1_devdata *dd);
-int parse_platform_config(struct hfi1_devdata *dd);
-int get_platform_config_field(struct hfi1_devdata *dd,
- enum platform_config_table_type_encoding
- table_type, int table_index, int field_index,
- u32 *data, u32 len);
-
-const char *get_unit_name(int unit);
-const char *get_card_name(struct rvt_dev_info *rdi);
-struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-static inline void flush_wc(void)
-{
- asm volatile("sfence" : : : "memory");
-}
-
-void handle_eflags(struct hfi1_packet *packet);
-int process_receive_ib(struct hfi1_packet *packet);
-int process_receive_bypass(struct hfi1_packet *packet);
-int process_receive_error(struct hfi1_packet *packet);
-int kdeth_process_expected(struct hfi1_packet *packet);
-int kdeth_process_eager(struct hfi1_packet *packet);
-int process_receive_invalid(struct hfi1_packet *packet);
-
-extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
-
-void update_sge(struct rvt_sge_state *ss, u32 length);
-
-/* global module parameter variables */
-extern unsigned int hfi1_max_mtu;
-extern unsigned int hfi1_cu;
-extern unsigned int user_credit_return_threshold;
-extern int num_user_contexts;
-extern unsigned n_krcvqs;
-extern uint krcvqs[];
-extern int krcvqsset;
-extern uint kdeth_qp;
-extern uint loopback;
-extern uint quick_linkup;
-extern uint rcv_intr_timeout;
-extern uint rcv_intr_count;
-extern uint rcv_intr_dynamic;
-extern ushort link_crc_mask;
-
-extern struct mutex hfi1_mutex;
-
-/* Number of seconds before our card status check... */
-#define STATUS_TIMEOUT 60
-
-#define DRIVER_NAME "hfi1"
-#define HFI1_USER_MINOR_BASE 0
-#define HFI1_TRACE_MINOR 127
-#define HFI1_DIAGPKT_MINOR 128
-#define HFI1_DIAG_MINOR_BASE 129
-#define HFI1_SNOOP_CAPTURE_BASE 200
-#define HFI1_NMINORS 255
-
-#define PCI_VENDOR_ID_INTEL 0x8086
-#define PCI_DEVICE_ID_INTEL0 0x24f0
-#define PCI_DEVICE_ID_INTEL1 0x24f1
-
-#define HFI1_PKT_USER_SC_INTEGRITY \
- (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
-
-#define HFI1_PKT_KERNEL_SC_INTEGRITY \
- (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
-
-static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
- u16 ctxt_type)
-{
- u64 base_sc_integrity =
- SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
- | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
- | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
-
- if (ctxt_type == SC_USER)
- base_sc_integrity |= HFI1_PKT_USER_SC_INTEGRITY;
- else
- base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
-
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sc_integrity &
- ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
- return base_sc_integrity;
-}
-
-static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
-{
- u64 base_sdma_integrity =
- SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
- | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
- | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
-
- if (is_ax(dd))
- /* turn off send-side job key checks - A0 */
- return base_sdma_integrity &
- ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
- return base_sdma_integrity;
-}
-
-/*
- * hfi1_early_err is used (only!) to print early errors before devdata is
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
- * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
- * the same as dd_dev_err, but is used when the message really needs
- * the IB port# to be definitive as to what's happening..
- */
-#define hfi1_early_err(dev, fmt, ...) \
- dev_err(dev, fmt, ##__VA_ARGS__)
-
-#define hfi1_early_info(dev, fmt, ...) \
- dev_info(dev, fmt, ##__VA_ARGS__)
-
-#define dd_dev_emerg(dd, fmt, ...) \
- dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
-#define dd_dev_err(dd, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
-#define dd_dev_warn(dd, fmt, ...) \
- dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
-
-#define dd_dev_warn_ratelimited(dd, fmt, ...) \
- dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
-
-#define dd_dev_info(dd, fmt, ...) \
- dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
-
-#define dd_dev_dbg(dd, fmt, ...) \
- dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
-
-#define hfi1_dev_porterr(dd, port, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- get_unit_name((dd)->unit), (dd)->unit, (port), \
- ##__VA_ARGS__)
-
-/*
- * this is used for formatting hw error messages...
- */
-struct hfi1_hwerror_msgs {
- u64 mask;
- const char *msg;
- size_t sz;
-};
-
-/* in intr.c... */
-void hfi1_format_hwerrors(u64 hwerrs,
- const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t lmsg);
-
-#define USER_OPCODE_CHECK_VAL 0xC0
-#define USER_OPCODE_CHECK_MASK 0xC0
-#define OPCODE_CHECK_VAL_DISABLED 0x0
-#define OPCODE_CHECK_MASK_DISABLED 0x0
-
-static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd;
- int i;
-
- dd->z_int_counter = get_all_cpu_total(dd->int_counter);
- dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
- dd->z_send_schedule = get_all_cpu_total(dd->send_schedule);
-
- ppd = (struct hfi1_pportdata *)(dd + 1);
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- ppd->ibport_data.rvp.z_rc_acks =
- get_all_cpu_total(ppd->ibport_data.rvp.rc_acks);
- ppd->ibport_data.rvp.z_rc_qacks =
- get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks);
- }
-}
-
-/* Control LED state */
-static inline void setextled(struct hfi1_devdata *dd, u32 on)
-{
- if (on)
- write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F);
- else
- write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
-}
-
-/* return the i2c resource given the target */
-static inline u32 i2c_target(u32 target)
-{
- return target ? CR_I2C2 : CR_I2C1;
-}
-
-/* return the i2c chain chip resource that this HFI uses for QSFP */
-static inline u32 qsfp_resource(struct hfi1_devdata *dd)
-{
- return i2c_target(dd->hfi1_id);
-}
-
-int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
-
-#endif /* _HFI1_KERNEL_H */
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c
deleted file mode 100644
index cfcdc16b4..000000000
--- a/drivers/staging/rdma/hfi1/init.c
+++ /dev/null
@@ -1,1809 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/hrtimer.h>
-#include <rdma/rdma_vt.h>
-
-#include "hfi.h"
-#include "device.h"
-#include "common.h"
-#include "trace.h"
-#include "mad.h"
-#include "sdma.h"
-#include "debugfs.h"
-#include "verbs.h"
-#include "aspm.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-
-/*
- * min buffers we want to have per context, after driver
- */
-#define HFI1_MIN_USER_CTXT_BUFCNT 7
-
-#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
-#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
-#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
-#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
-
-/*
- * Number of user receive contexts we are configured to use (to allow for more
- * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
- */
-int num_user_contexts = -1;
-module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
-MODULE_PARM_DESC(
- num_user_contexts, "Set max number of user contexts to use");
-
-uint krcvqs[RXE_NUM_DATA_VL];
-int krcvqsset;
-module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
-MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
-
-/* computed based on above array */
-unsigned n_krcvqs;
-
-static unsigned hfi1_rcvarr_split = 25;
-module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
-
-static uint eager_buffer_size = (2 << 20); /* 2MB */
-module_param(eager_buffer_size, uint, S_IRUGO);
-MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
-
-static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
-module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
-
-static uint hfi1_hdrq_entsize = 32;
-module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
-MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
-
-unsigned int user_credit_return_threshold = 33; /* default is 33% */
-module_param(user_credit_return_threshold, uint, S_IRUGO);
-MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
-
-static inline u64 encode_rcv_header_entry_size(u16);
-
-static struct idr hfi1_unit_table;
-u32 hfi1_cpulist_count;
-unsigned long *hfi1_cpulist;
-
-/*
- * Common code for creating the receive context array.
- */
-int hfi1_create_ctxts(struct hfi1_devdata *dd)
-{
- unsigned i;
- int ret;
-
- /* Control context has to be always 0 */
- BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
-
- dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
- GFP_KERNEL, dd->node);
- if (!dd->rcd)
- goto nomem;
-
- /* create one or more kernel contexts */
- for (i = 0; i < dd->first_user_ctxt; ++i) {
- struct hfi1_pportdata *ppd;
- struct hfi1_ctxtdata *rcd;
-
- ppd = dd->pport + (i % dd->num_pports);
- rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
- if (!rcd) {
- dd_dev_err(dd,
- "Unable to allocate kernel receive context, failing\n");
- goto nomem;
- }
- /*
- * Set up the kernel context flags here and now because they
- * use default values for all receive side memories. User
- * contexts will be handled as they are created.
- */
- rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
- HFI1_CAP_KGET(NODROP_RHQ_FULL) |
- HFI1_CAP_KGET(NODROP_EGR_FULL) |
- HFI1_CAP_KGET(DMA_RTAIL);
-
- /* Control context must use DMA_RTAIL */
- if (rcd->ctxt == HFI1_CTRL_CTXT)
- rcd->flags |= HFI1_CAP_DMA_RTAIL;
- rcd->seq_cnt = 1;
-
- rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
- if (!rcd->sc) {
- dd_dev_err(dd,
- "Unable to allocate kernel send context, failing\n");
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
- goto nomem;
- }
-
- ret = hfi1_init_ctxt(rcd->sc);
- if (ret < 0) {
- dd_dev_err(dd,
- "Failed to setup kernel receive context, failing\n");
- sc_free(rcd->sc);
- dd->rcd[rcd->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, rcd);
- ret = -EFAULT;
- goto bail;
- }
- }
-
- /*
- * Initialize aspm, to be done after gen3 transition and setting up
- * contexts and before enabling interrupts
- */
- aspm_init(dd);
-
- return 0;
-nomem:
- ret = -ENOMEM;
-bail:
- kfree(dd->rcd);
- dd->rcd = NULL;
- return ret;
-}
-
-/*
- * Common code for user and kernel context setup.
- */
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
- int numa)
-{
- struct hfi1_devdata *dd = ppd->dd;
- struct hfi1_ctxtdata *rcd;
- unsigned kctxt_ngroups = 0;
- u32 base;
-
- if (dd->rcv_entries.nctxt_extra >
- dd->num_rcv_contexts - dd->first_user_ctxt)
- kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
- (dd->num_rcv_contexts - dd->first_user_ctxt));
- rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
- if (rcd) {
- u32 rcvtids, max_entries;
-
- hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
-
- INIT_LIST_HEAD(&rcd->qp_wait_list);
- rcd->ppd = ppd;
- rcd->dd = dd;
- rcd->cnt = 1;
- rcd->ctxt = ctxt;
- dd->rcd[ctxt] = rcd;
- rcd->numa_id = numa;
- rcd->rcv_array_groups = dd->rcv_entries.ngroups;
-
- mutex_init(&rcd->exp_lock);
-
- /*
- * Calculate the context's RcvArray entry starting point.
- * We do this here because we have to take into account all
- * the RcvArray entries that previous context would have
- * taken and we have to account for any extra groups
- * assigned to the kernel or user contexts.
- */
- if (ctxt < dd->first_user_ctxt) {
- if (ctxt < kctxt_ngroups) {
- base = ctxt * (dd->rcv_entries.ngroups + 1);
- rcd->rcv_array_groups++;
- } else
- base = kctxt_ngroups +
- (ctxt * dd->rcv_entries.ngroups);
- } else {
- u16 ct = ctxt - dd->first_user_ctxt;
-
- base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
- kctxt_ngroups);
- if (ct < dd->rcv_entries.nctxt_extra) {
- base += ct * (dd->rcv_entries.ngroups + 1);
- rcd->rcv_array_groups++;
- } else
- base += dd->rcv_entries.nctxt_extra +
- (ct * dd->rcv_entries.ngroups);
- }
- rcd->eager_base = base * dd->rcv_entries.group_size;
-
- /* Validate and initialize Rcv Hdr Q variables */
- if (rcvhdrcnt % HDRQ_INCREMENT) {
- dd_dev_err(dd,
- "ctxt%u: header queue count %d must be divisible by %lu\n",
- rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
- goto bail;
- }
- rcd->rcvhdrq_cnt = rcvhdrcnt;
- rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
- /*
- * Simple Eager buffer allocation: we have already pre-allocated
- * the number of RcvArray entry groups. Each ctxtdata structure
- * holds the number of groups for that context.
- *
- * To follow CSR requirements and maintain cacheline alignment,
- * make sure all sizes and bases are multiples of group_size.
- *
- * The expected entry count is what is left after assigning
- * eager.
- */
- max_entries = rcd->rcv_array_groups *
- dd->rcv_entries.group_size;
- rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
- rcd->egrbufs.count = round_down(rcvtids,
- dd->rcv_entries.group_size);
- if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
- dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
- rcd->ctxt);
- rcd->egrbufs.count = MAX_EAGER_ENTRIES;
- }
- hfi1_cdbg(PROC,
- "ctxt%u: max Eager buffer RcvArray entries: %u\n",
- rcd->ctxt, rcd->egrbufs.count);
-
- /*
- * Allocate array that will hold the eager buffer accounting
- * data.
- * This will allocate the maximum possible buffer count based
- * on the value of the RcvArray split parameter.
- * The resulting value will be rounded down to the closest
- * multiple of dd->rcv_entries.group_size.
- */
- rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
- sizeof(*rcd->egrbufs.buffers),
- GFP_KERNEL);
- if (!rcd->egrbufs.buffers)
- goto bail;
- rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
- sizeof(*rcd->egrbufs.rcvtids),
- GFP_KERNEL);
- if (!rcd->egrbufs.rcvtids)
- goto bail;
- rcd->egrbufs.size = eager_buffer_size;
- /*
- * The size of the buffers programmed into the RcvArray
- * entries needs to be big enough to handle the highest
- * MTU supported.
- */
- if (rcd->egrbufs.size < hfi1_max_mtu) {
- rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
- hfi1_cdbg(PROC,
- "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
- rcd->ctxt, rcd->egrbufs.size);
- }
- rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
-
- if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
- rcd->opstats = kzalloc(sizeof(*rcd->opstats),
- GFP_KERNEL);
- if (!rcd->opstats)
- goto bail;
- }
- }
- return rcd;
-bail:
- kfree(rcd->egrbufs.rcvtids);
- kfree(rcd->egrbufs.buffers);
- kfree(rcd);
- return NULL;
-}
-
-/*
- * Convert a receive header entry size that to the encoding used in the CSR.
- *
- * Return a zero if the given size is invalid.
- */
-static inline u64 encode_rcv_header_entry_size(u16 size)
-{
- /* there are only 3 valid receive header entry sizes */
- if (size == 2)
- return 1;
- if (size == 16)
- return 2;
- else if (size == 32)
- return 4;
- return 0; /* invalid */
-}
-
-/*
- * Select the largest ccti value over all SLs to determine the intra-
- * packet gap for the link.
- *
- * called with cca_timer_lock held (to protect access to cca_timer
- * array), and rcu_read_lock() (to protect access to cc_state).
- */
-void set_link_ipg(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- struct cc_state *cc_state;
- int i;
- u16 cce, ccti_limit, max_ccti = 0;
- u16 shift, mult;
- u64 src;
- u32 current_egress_rate; /* Mbits /sec */
- u32 max_pkt_time;
- /*
- * max_pkt_time is the maximum packet egress time in units
- * of the fabric clock period 1/(805 MHz).
- */
-
- cc_state = get_cc_state(ppd);
-
- if (!cc_state)
- /*
- * This should _never_ happen - rcu_read_lock() is held,
- * and set_link_ipg() should not be called if cc_state
- * is NULL.
- */
- return;
-
- for (i = 0; i < OPA_MAX_SLS; i++) {
- u16 ccti = ppd->cca_timer[i].ccti;
-
- if (ccti > max_ccti)
- max_ccti = ccti;
- }
-
- ccti_limit = cc_state->cct.ccti_limit;
- if (max_ccti > ccti_limit)
- max_ccti = ccti_limit;
-
- cce = cc_state->cct.entries[max_ccti].entry;
- shift = (cce & 0xc000) >> 14;
- mult = (cce & 0x3fff);
-
- current_egress_rate = active_egress_rate(ppd);
-
- max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
-
- src = (max_pkt_time >> shift) * mult;
-
- src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
- src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
-
- write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
-}
-
-static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
-{
- struct cca_timer *cca_timer;
- struct hfi1_pportdata *ppd;
- int sl;
- u16 ccti, ccti_timer, ccti_min;
- struct cc_state *cc_state;
- unsigned long flags;
-
- cca_timer = container_of(t, struct cca_timer, hrtimer);
- ppd = cca_timer->ppd;
- sl = cca_timer->sl;
-
- rcu_read_lock();
-
- cc_state = get_cc_state(ppd);
-
- if (!cc_state) {
- rcu_read_unlock();
- return HRTIMER_NORESTART;
- }
-
- /*
- * 1) decrement ccti for SL
- * 2) calculate IPG for link (set_link_ipg())
- * 3) restart timer, unless ccti is at min value
- */
-
- ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
- ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
-
- spin_lock_irqsave(&ppd->cca_timer_lock, flags);
-
- ccti = cca_timer->ccti;
-
- if (ccti > ccti_min) {
- cca_timer->ccti--;
- set_link_ipg(ppd);
- }
-
- spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
-
- rcu_read_unlock();
-
- if (ccti > ccti_min) {
- unsigned long nsec = 1024 * ccti_timer;
- /* ccti_timer is in units of 1.024 usec */
- hrtimer_forward_now(t, ns_to_ktime(nsec));
- return HRTIMER_RESTART;
- }
- return HRTIMER_NORESTART;
-}
-
-/*
- * Common code for initializing the physical port structure.
- */
-void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
- struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
-{
- int i, size;
- uint default_pkey_idx;
-
- ppd->dd = dd;
- ppd->hw_pidx = hw_pidx;
- ppd->port = port; /* IB port number, not index */
-
- default_pkey_idx = 1;
-
- ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
- if (loopback) {
- hfi1_early_err(&pdev->dev,
- "Faking data partition 0x8001 in idx %u\n",
- !default_pkey_idx);
- ppd->pkeys[!default_pkey_idx] = 0x8001;
- }
-
- INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
- INIT_WORK(&ppd->link_up_work, handle_link_up);
- INIT_WORK(&ppd->link_down_work, handle_link_down);
- INIT_WORK(&ppd->dc_host_req_work, handle_8051_request);
- INIT_WORK(&ppd->freeze_work, handle_freeze);
- INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
- INIT_WORK(&ppd->sma_message_work, handle_sma_message);
- INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
- INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
- INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
-
- mutex_init(&ppd->hls_lock);
- spin_lock_init(&ppd->sdma_alllock);
- spin_lock_init(&ppd->qsfp_info.qsfp_lock);
-
- ppd->qsfp_info.ppd = ppd;
- ppd->sm_trap_qp = 0x0;
- ppd->sa_qp = 0x1;
-
- ppd->hfi1_wq = NULL;
-
- spin_lock_init(&ppd->cca_timer_lock);
-
- for (i = 0; i < OPA_MAX_SLS; i++) {
- hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ppd->cca_timer[i].ppd = ppd;
- ppd->cca_timer[i].sl = i;
- ppd->cca_timer[i].ccti = 0;
- ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
- }
-
- ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
-
- spin_lock_init(&ppd->cc_state_lock);
- spin_lock_init(&ppd->cc_log_lock);
- size = sizeof(struct cc_state);
- RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL));
- if (!rcu_dereference(ppd->cc_state))
- goto bail;
- return;
-
-bail:
-
- hfi1_early_err(&pdev->dev,
- "Congestion Control Agent disabled for port %d\n", port);
-}
-
-/*
- * Do initialization for device that is only needed on
- * first detect, not on resets.
- */
-static int loadtime_init(struct hfi1_devdata *dd)
-{
- return 0;
-}
-
-/**
- * init_after_reset - re-initialize after a reset
- * @dd: the hfi1_ib device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_after_reset(struct hfi1_devdata *dd)
-{
- int i;
-
- /*
- * Ensure chip does no sends or receives, tail updates, or
- * pioavail updates while we re-initialize. This is mostly
- * for the driver data structures, not chip registers.
- */
- for (i = 0; i < dd->num_rcv_contexts; i++)
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_TAILUPD_DIS, i);
- pio_send_control(dd, PSC_GLOBAL_DISABLE);
- for (i = 0; i < dd->num_send_contexts; i++)
- sc_disable(dd->send_contexts[i].sc);
-
- return 0;
-}
-
-static void enable_chip(struct hfi1_devdata *dd)
-{
- u32 rcvmask;
- u32 i;
-
- /* enable PIO send */
- pio_send_control(dd, PSC_GLOBAL_ENABLE);
-
- /*
- * Enable kernel ctxts' receive and receive interrupt.
- * Other ctxts done as user opens and initializes them.
- */
- for (i = 0; i < dd->first_user_ctxt; ++i) {
- rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
- rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
- HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
- if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
- rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
- rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
- rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- hfi1_rcvctrl(dd, rcvmask, i);
- sc_enable(dd->rcd[i]->sc);
- }
-}
-
-/**
- * create_workqueues - create per port workqueues
- * @dd: the hfi1_ib device
- */
-static int create_workqueues(struct hfi1_devdata *dd)
-{
- int pidx;
- struct hfi1_pportdata *ppd;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (!ppd->hfi1_wq) {
- ppd->hfi1_wq =
- alloc_workqueue(
- "hfi%d_%d",
- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
- dd->num_sdma,
- dd->unit, pidx);
- if (!ppd->hfi1_wq)
- goto wq_error;
- }
- }
- return 0;
-wq_error:
- pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- }
- return -ENOMEM;
-}
-
-/**
- * hfi1_init - do the actual initialization sequence on the chip
- * @dd: the hfi1_ib device
- * @reinit: re-initializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip. This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0). We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int hfi1_init(struct hfi1_devdata *dd, int reinit)
-{
- int ret = 0, pidx, lastfail = 0;
- unsigned i, len;
- struct hfi1_ctxtdata *rcd;
- struct hfi1_pportdata *ppd;
-
- /* Set up recv low level handlers */
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
- kdeth_process_expected;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
- kdeth_process_eager;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
- process_receive_error;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
- process_receive_bypass;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
- process_receive_invalid;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
- process_receive_invalid;
- dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
- process_receive_invalid;
- dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
-
- /* Set up send low level handlers */
- dd->process_pio_send = hfi1_verbs_send_pio;
- dd->process_dma_send = hfi1_verbs_send_dma;
- dd->pio_inline_send = pio_copy;
-
- if (is_ax(dd)) {
- atomic_set(&dd->drop_packet, DROP_PACKET_ON);
- dd->do_drop = 1;
- } else {
- atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
- dd->do_drop = 0;
- }
-
- /* make sure the link is not "up" */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- ppd->linkup = 0;
- }
-
- if (reinit)
- ret = init_after_reset(dd);
- else
- ret = loadtime_init(dd);
- if (ret)
- goto done;
-
- /* allocate dummy tail memory for all receive contexts */
- dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, sizeof(u64),
- &dd->rcvhdrtail_dummy_physaddr,
- GFP_KERNEL);
-
- if (!dd->rcvhdrtail_dummy_kvaddr) {
- dd_dev_err(dd, "cannot allocate dummy tail memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
- /* dd->rcd can be NULL if early initialization failed */
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
- /*
- * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
- * re-init, the simplest way to handle this is to free
- * existing, and re-allocate.
- * Need to re-create rest of ctxt 0 ctxtdata as well.
- */
- rcd = dd->rcd[i];
- if (!rcd)
- continue;
-
- rcd->do_interrupt = &handle_receive_interrupt;
-
- lastfail = hfi1_create_rcvhdrq(dd, rcd);
- if (!lastfail)
- lastfail = hfi1_setup_eagerbufs(rcd);
- if (lastfail)
- dd_dev_err(dd,
- "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
- }
- if (lastfail)
- ret = lastfail;
-
- /* Allocate enough memory for user event notification. */
- len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
- sizeof(*dd->events));
- dd->events = vmalloc_user(len);
- if (!dd->events)
- dd_dev_err(dd, "Failed to allocate user events page\n");
- /*
- * Allocate a page for device and port status.
- * Page will be shared amongst all user processes.
- */
- dd->status = vmalloc_user(PAGE_SIZE);
- if (!dd->status)
- dd_dev_err(dd, "Failed to allocate dev status page\n");
- else
- dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
- sizeof(dd->status->freezemsg));
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (dd->status)
- /* Currently, we only have one port */
- ppd->statusp = &dd->status->port;
-
- set_mtu(ppd);
- }
-
- /* enable chip even if we have an error, so we can debug cause */
- enable_chip(dd);
-
-done:
- /*
- * Set status even if port serdes is not initialized
- * so that diags will work.
- */
- if (dd->status)
- dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
- HFI1_STATUS_INITTED;
- if (!ret) {
- /* enable all interrupts from the chip */
- set_intr_state(dd, 1);
-
- /* chip is OK for user apps; mark it as initialized */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- /*
- * start the serdes - must be after interrupts are
- * enabled so we are notified when the link goes up
- */
- lastfail = bringup_serdes(ppd);
- if (lastfail)
- dd_dev_info(dd,
- "Failed to bring up port %u\n",
- ppd->port);
-
- /*
- * Set status even if port serdes is not initialized
- * so that diags will work.
- */
- if (ppd->statusp)
- *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
- HFI1_STATUS_INITTED;
- if (!ppd->link_speed_enabled)
- continue;
- }
- }
-
- /* if ret is non-zero, we probably should do some cleanup here... */
- return ret;
-}
-
-static inline struct hfi1_devdata *__hfi1_lookup(int unit)
-{
- return idr_find(&hfi1_unit_table, unit);
-}
-
-struct hfi1_devdata *hfi1_lookup(int unit)
-{
- struct hfi1_devdata *dd;
- unsigned long flags;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- dd = __hfi1_lookup(unit);
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
-
- return dd;
-}
-
-/*
- * Stop the timers during unit shutdown, or after an error late
- * in initialization.
- */
-static void stop_timers(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd;
- int pidx;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->led_override_timer.data) {
- del_timer_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
- }
-}
-
-/**
- * shutdown_device - shut down a device
- * @dd: the hfi1_ib device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled. It does not free any data structures.
- * Everything it does has to be setup again by hfi1_init(dd, 1)
- */
-static void shutdown_device(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd;
- unsigned pidx;
- int i;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- ppd->linkup = 0;
- if (ppd->statusp)
- *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
- HFI1_STATUS_IB_READY);
- }
- dd->flags &= ~HFI1_INITTED;
-
- /* mask interrupts, but not errors */
- set_intr_state(dd, 0);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- for (i = 0; i < dd->num_rcv_contexts; i++)
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
- HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_PKEY_DIS |
- HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
- /*
- * Gracefully stop all sends allowing any in progress to
- * trickle out first.
- */
- for (i = 0; i < dd->num_send_contexts; i++)
- sc_flush(dd->send_contexts[i].sc);
- }
-
- /*
- * Enough for anything that's going to trickle out to have actually
- * done so.
- */
- udelay(20);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- /* disable all contexts */
- for (i = 0; i < dd->num_send_contexts; i++)
- sc_disable(dd->send_contexts[i].sc);
- /* disable the send device */
- pio_send_control(dd, PSC_GLOBAL_DISABLE);
-
- shutdown_led_override(ppd);
-
- /*
- * Clear SerdesEnable.
- * We can't count on interrupts since we are stopping.
- */
- hfi1_quiet_serdes(ppd);
-
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- }
- sdma_exit(dd);
-}
-
-/**
- * hfi1_free_ctxtdata - free a context's allocated data
- * @dd: the hfi1_ib device
- * @rcd: the ctxtdata structure
- *
- * free up any allocated data for a context
- * This should not touch anything that would affect a simultaneous
- * re-allocation of context data, because it is called after hfi1_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- */
-void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
-{
- unsigned e;
-
- if (!rcd)
- return;
-
- if (rcd->rcvhdrq) {
- dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
- rcd->rcvhdrq, rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
- if (rcd->rcvhdrtail_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *)rcd->rcvhdrtail_kvaddr,
- rcd->rcvhdrqtailaddr_phys);
- rcd->rcvhdrtail_kvaddr = NULL;
- }
- }
-
- /* all the RcvArray entries should have been cleared by now */
- kfree(rcd->egrbufs.rcvtids);
-
- for (e = 0; e < rcd->egrbufs.alloced; e++) {
- if (rcd->egrbufs.buffers[e].phys)
- dma_free_coherent(&dd->pcidev->dev,
- rcd->egrbufs.buffers[e].len,
- rcd->egrbufs.buffers[e].addr,
- rcd->egrbufs.buffers[e].phys);
- }
- kfree(rcd->egrbufs.buffers);
-
- sc_free(rcd->sc);
- vfree(rcd->user_event_mask);
- vfree(rcd->subctxt_uregbase);
- vfree(rcd->subctxt_rcvegrbuf);
- vfree(rcd->subctxt_rcvhdr_base);
- kfree(rcd->opstats);
- kfree(rcd);
-}
-
-/*
- * Release our hold on the shared asic data. If we are the last one,
- * free the structure. Must be holding hfi1_devs_lock.
- */
-static void release_asic_data(struct hfi1_devdata *dd)
-{
- int other;
-
- if (!dd->asic_data)
- return;
- dd->asic_data->dds[dd->hfi1_id] = NULL;
- other = dd->hfi1_id ? 0 : 1;
- if (!dd->asic_data->dds[other]) {
- /* we are the last holder, free it */
- kfree(dd->asic_data);
- }
- dd->asic_data = NULL;
-}
-
-void hfi1_free_devdata(struct hfi1_devdata *dd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hfi1_devs_lock, flags);
- idr_remove(&hfi1_unit_table, dd->unit);
- list_del(&dd->list);
- release_asic_data(dd);
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- free_platform_config(dd);
- rcu_barrier(); /* wait for rcu callbacks to complete */
- free_percpu(dd->int_counter);
- free_percpu(dd->rcv_limit);
- hfi1_dev_affinity_free(dd);
- free_percpu(dd->send_schedule);
- ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
-}
-
-/*
- * Allocate our primary per-unit data structure. Must be done via verbs
- * allocator, because the verbs cleanup process both does cleanup and
- * free of the data structure.
- * "extra" is for chip-specific data.
- *
- * Use the idr mechanism to get a unit number for this unit.
- */
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
-{
- unsigned long flags;
- struct hfi1_devdata *dd;
- int ret, nports;
-
- /* extra is * number of ports */
- nports = extra / sizeof(struct hfi1_pportdata);
-
- dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
- nports);
- if (!dd)
- return ERR_PTR(-ENOMEM);
- dd->num_pports = nports;
- dd->pport = (struct hfi1_pportdata *)(dd + 1);
-
- INIT_LIST_HEAD(&dd->list);
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&hfi1_devs_lock, flags);
-
- ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
- if (ret >= 0) {
- dd->unit = ret;
- list_add(&dd->list, &hfi1_dev_list);
- }
-
- spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- idr_preload_end();
-
- if (ret < 0) {
- hfi1_early_err(&pdev->dev,
- "Could not allocate unit ID: error %d\n", -ret);
- goto bail;
- }
- /*
- * Initialize all locks for the device. This needs to be as early as
- * possible so locks are usable.
- */
- spin_lock_init(&dd->sc_lock);
- spin_lock_init(&dd->sendctrl_lock);
- spin_lock_init(&dd->rcvctrl_lock);
- spin_lock_init(&dd->uctxt_lock);
- spin_lock_init(&dd->hfi1_diag_trans_lock);
- spin_lock_init(&dd->sc_init_lock);
- spin_lock_init(&dd->dc8051_lock);
- spin_lock_init(&dd->dc8051_memlock);
- seqlock_init(&dd->sc2vl_lock);
- spin_lock_init(&dd->sde_map_lock);
- spin_lock_init(&dd->pio_map_lock);
- init_waitqueue_head(&dd->event_queue);
-
- dd->int_counter = alloc_percpu(u64);
- if (!dd->int_counter) {
- ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
- goto bail;
- }
-
- dd->rcv_limit = alloc_percpu(u64);
- if (!dd->rcv_limit) {
- ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu rcv_limit\n");
- goto bail;
- }
-
- dd->send_schedule = alloc_percpu(u64);
- if (!dd->send_schedule) {
- ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
- goto bail;
- }
-
- if (!hfi1_cpulist_count) {
- u32 count = num_online_cpus();
-
- hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
- GFP_KERNEL);
- if (hfi1_cpulist)
- hfi1_cpulist_count = count;
- else
- hfi1_early_err(
- &pdev->dev,
- "Could not alloc cpulist info, cpu affinity might be wrong\n");
- }
- return dd;
-
-bail:
- if (!list_empty(&dd->list))
- list_del_init(&dd->list);
- ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
- return ERR_PTR(ret);
-}
-
-/*
- * Called from freeze mode handlers, and from PCI error
- * reporting code. Should be paranoid about state of
- * system and data structures.
- */
-void hfi1_disable_after_error(struct hfi1_devdata *dd)
-{
- if (dd->flags & HFI1_INITTED) {
- u32 pidx;
-
- dd->flags &= ~HFI1_INITTED;
- if (dd->pport)
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- struct hfi1_pportdata *ppd;
-
- ppd = dd->pport + pidx;
- if (dd->flags & HFI1_PRESENT)
- set_link_state(ppd, HLS_DN_DISABLE);
-
- if (ppd->statusp)
- *ppd->statusp &= ~HFI1_STATUS_IB_READY;
- }
- }
-
- /*
- * Mark as having had an error for driver, and also
- * for /sys and status word mapped to user programs.
- * This marks unit as not usable, until reset.
- */
- if (dd->status)
- dd->status->dev |= HFI1_STATUS_HWERROR;
-}
-
-static void remove_one(struct pci_dev *);
-static int init_one(struct pci_dev *, const struct pci_device_id *);
-
-#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
-#define PFX DRIVER_NAME ": "
-
-static const struct pci_device_id hfi1_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
-
-static struct pci_driver hfi1_pci_driver = {
- .name = DRIVER_NAME,
- .probe = init_one,
- .remove = remove_one,
- .id_table = hfi1_pci_tbl,
- .err_handler = &hfi1_pci_err_handler,
-};
-
-static void __init compute_krcvqs(void)
-{
- int i;
-
- for (i = 0; i < krcvqsset; i++)
- n_krcvqs += krcvqs[i];
-}
-
-/*
- * Do all the generic driver unit- and chip-independent memory
- * allocation and initialization.
- */
-static int __init hfi1_mod_init(void)
-{
- int ret;
-
- ret = dev_init();
- if (ret)
- goto bail;
-
- /* validate max MTU before any devices start */
- if (!valid_opa_max_mtu(hfi1_max_mtu)) {
- pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
- hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
- hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
- }
- /* valid CUs run from 1-128 in powers of 2 */
- if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
- hfi1_cu = 1;
- /* valid credit return threshold is 0-100, variable is unsigned */
- if (user_credit_return_threshold > 100)
- user_credit_return_threshold = 100;
-
- compute_krcvqs();
- /*
- * sanitize receive interrupt count, time must wait until after
- * the hardware type is known
- */
- if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
- rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
- /* reject invalid combinations */
- if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
- pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
- rcv_intr_count = 1;
- }
- if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
- /*
- * Avoid indefinite packet delivery by requiring a timeout
- * if count is > 1.
- */
- pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
- rcv_intr_timeout = 1;
- }
- if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
- /*
- * The dynamic algorithm expects a non-zero timeout
- * and a count > 1.
- */
- pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
- rcv_intr_dynamic = 0;
- }
-
- /* sanitize link CRC options */
- link_crc_mask &= SUPPORTED_CRCS;
-
- /*
- * These must be called before the driver is registered with
- * the PCI subsystem.
- */
- idr_init(&hfi1_unit_table);
-
- hfi1_dbg_init();
- ret = hfi1_wss_init();
- if (ret < 0)
- goto bail_wss;
- ret = pci_register_driver(&hfi1_pci_driver);
- if (ret < 0) {
- pr_err("Unable to register driver: error %d\n", -ret);
- goto bail_dev;
- }
- goto bail; /* all OK */
-
-bail_dev:
- hfi1_wss_exit();
-bail_wss:
- hfi1_dbg_exit();
- idr_destroy(&hfi1_unit_table);
- dev_cleanup();
-bail:
- return ret;
-}
-
-module_init(hfi1_mod_init);
-
-/*
- * Do the non-unit driver cleanup, memory free, etc. at unload.
- */
-static void __exit hfi1_mod_cleanup(void)
-{
- pci_unregister_driver(&hfi1_pci_driver);
- hfi1_wss_exit();
- hfi1_dbg_exit();
- hfi1_cpulist_count = 0;
- kfree(hfi1_cpulist);
-
- idr_destroy(&hfi1_unit_table);
- dispose_firmware(); /* asymmetric with obtain_firmware() */
- dev_cleanup();
-}
-
-module_exit(hfi1_mod_cleanup);
-
-/* this can only be called after a successful initialization */
-static void cleanup_device_data(struct hfi1_devdata *dd)
-{
- int ctxt;
- int pidx;
- struct hfi1_ctxtdata **tmp;
- unsigned long flags;
-
- /* users can't do anything more with chip */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- struct hfi1_pportdata *ppd = &dd->pport[pidx];
- struct cc_state *cc_state;
- int i;
-
- if (ppd->statusp)
- *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
-
- for (i = 0; i < OPA_MAX_SLS; i++)
- hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
-
- spin_lock(&ppd->cc_state_lock);
- cc_state = get_cc_state(ppd);
- rcu_assign_pointer(ppd->cc_state, NULL);
- spin_unlock(&ppd->cc_state_lock);
-
- if (cc_state)
- call_rcu(&cc_state->rcu, cc_state_reclaim);
- }
-
- free_credit_return(dd);
-
- /*
- * Free any resources still in use (usually just kernel contexts)
- * at unload; we do for ctxtcnt, because that's what we allocate.
- * We acquire lock to be really paranoid that rcd isn't being
- * accessed from some interrupt-related code (that should not happen,
- * but best to be sure).
- */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- tmp = dd->rcd;
- dd->rcd = NULL;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- if (dd->rcvhdrtail_dummy_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
- (void *)dd->rcvhdrtail_dummy_kvaddr,
- dd->rcvhdrtail_dummy_physaddr);
- dd->rcvhdrtail_dummy_kvaddr = NULL;
- }
-
- for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
- struct hfi1_ctxtdata *rcd = tmp[ctxt];
-
- tmp[ctxt] = NULL; /* debugging paranoia */
- if (rcd) {
- hfi1_clear_tids(rcd);
- hfi1_free_ctxtdata(dd, rcd);
- }
- }
- kfree(tmp);
- free_pio_map(dd);
- /* must follow rcv context free - need to remove rcv's hooks */
- for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
- sc_free(dd->send_contexts[ctxt].sc);
- dd->num_send_contexts = 0;
- kfree(dd->send_contexts);
- dd->send_contexts = NULL;
- kfree(dd->hw_to_sw);
- dd->hw_to_sw = NULL;
- kfree(dd->boardname);
- vfree(dd->events);
- vfree(dd->status);
-}
-
-/*
- * Clean up on unit shutdown, or error during unit load after
- * successful initialization.
- */
-static void postinit_cleanup(struct hfi1_devdata *dd)
-{
- hfi1_start_cleanup(dd);
-
- hfi1_pcie_ddcleanup(dd);
- hfi1_pcie_cleanup(dd->pcidev);
-
- cleanup_device_data(dd);
-
- hfi1_free_devdata(dd);
-}
-
-static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret = 0, j, pidx, initfail;
- struct hfi1_devdata *dd = NULL;
- struct hfi1_pportdata *ppd;
-
- /* First, lock the non-writable module parameters */
- HFI1_CAP_LOCK();
-
- /* Validate some global module parameters */
- if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev, "Header queue count too small\n");
- ret = -EINVAL;
- goto bail;
- }
- if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(&pdev->dev,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
- ret = -EINVAL;
- goto bail;
- }
- /* use the encoding function as a sanitization check */
- if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
- hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
- hfi1_hdrq_entsize);
- ret = -EINVAL;
- goto bail;
- }
-
- /* The receive eager buffer size must be set before the receive
- * contexts are created.
- *
- * Set the eager buffer size. Validate that it falls in a range
- * allowed by the hardware - all powers of 2 between the min and
- * max. The maximum valid MTU is within the eager buffer range
- * so we do not need to cap the max_mtu by an eager buffer size
- * setting.
- */
- if (eager_buffer_size) {
- if (!is_power_of_2(eager_buffer_size))
- eager_buffer_size =
- roundup_pow_of_two(eager_buffer_size);
- eager_buffer_size =
- clamp_val(eager_buffer_size,
- MIN_EAGER_BUFFER * 8,
- MAX_EAGER_BUFFER_TOTAL);
- hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
- eager_buffer_size);
- } else {
- hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
- ret = -EINVAL;
- goto bail;
- }
-
- /* restrict value of hfi1_rcvarr_split */
- hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
-
- ret = hfi1_pcie_init(pdev, ent);
- if (ret)
- goto bail;
-
- /*
- * Do device-specific initialization, function table setup, dd
- * allocation, etc.
- */
- switch (ent->device) {
- case PCI_DEVICE_ID_INTEL0:
- case PCI_DEVICE_ID_INTEL1:
- dd = hfi1_init_dd(pdev, ent);
- break;
- default:
- hfi1_early_err(&pdev->dev,
- "Failing on unknown Intel deviceid 0x%x\n",
- ent->device);
- ret = -ENODEV;
- }
-
- if (IS_ERR(dd))
- ret = PTR_ERR(dd);
- if (ret)
- goto clean_bail; /* error already printed */
-
- ret = create_workqueues(dd);
- if (ret)
- goto clean_bail;
-
- /* do the generic initialization */
- initfail = hfi1_init(dd, 0);
-
- ret = hfi1_register_ib_device(dd);
-
- /*
- * Now ready for use. this should be cleared whenever we
- * detect a reset, or initiate one. If earlier failure,
- * we still create devices, so diags, etc. can be used
- * to determine cause of problem.
- */
- if (!initfail && !ret) {
- dd->flags |= HFI1_INITTED;
- /* create debufs files after init and ib register */
- hfi1_dbg_ibdev_init(&dd->verbs_dev);
- }
-
- j = hfi1_device_create(dd);
- if (j)
- dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
-
- if (initfail || ret) {
- stop_timers(dd);
- flush_workqueue(ib_wq);
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- hfi1_quiet_serdes(dd->pport + pidx);
- ppd = dd->pport + pidx;
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- }
- if (!j)
- hfi1_device_remove(dd);
- if (!ret)
- hfi1_unregister_ib_device(dd);
- postinit_cleanup(dd);
- if (initfail)
- ret = initfail;
- goto bail; /* everything already cleaned */
- }
-
- sdma_start(dd);
-
- return 0;
-
-clean_bail:
- hfi1_pcie_cleanup(pdev);
-bail:
- return ret;
-}
-
-static void remove_one(struct pci_dev *pdev)
-{
- struct hfi1_devdata *dd = pci_get_drvdata(pdev);
-
- /* close debugfs files before ib unregister */
- hfi1_dbg_ibdev_exit(&dd->verbs_dev);
- /* unregister from IB core */
- hfi1_unregister_ib_device(dd);
-
- /*
- * Disable the IB link, disable interrupts on the device,
- * clear dma engines, etc.
- */
- shutdown_device(dd);
-
- stop_timers(dd);
-
- /* wait until all of our (qsfp) queue_work() calls complete */
- flush_workqueue(ib_wq);
-
- hfi1_device_remove(dd);
-
- postinit_cleanup(dd);
-}
-
-/**
- * hfi1_create_rcvhdrq - create a receive header queue
- * @dd: the hfi1_ib device
- * @rcd: the context data
- *
- * This must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
-{
- unsigned amt;
- u64 reg;
-
- if (!rcd->rcvhdrq) {
- dma_addr_t phys_hdrqtail;
- gfp_t gfp_flags;
-
- /*
- * rcvhdrqentsize is in DWs, so we have to convert to bytes
- * (* sizeof(u32)).
- */
- amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
- sizeof(u32));
-
- gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
- GFP_USER : GFP_KERNEL;
- rcd->rcvhdrq = dma_zalloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
- gfp_flags | __GFP_COMP);
-
- if (!rcd->rcvhdrq) {
- dd_dev_err(dd,
- "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
- amt, rcd->ctxt);
- goto bail;
- }
-
- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
- rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
- gfp_flags);
- if (!rcd->rcvhdrtail_kvaddr)
- goto bail_free;
- rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
- }
-
- rcd->rcvhdrq_size = amt;
- }
- /*
- * These values are per-context:
- * RcvHdrCnt
- * RcvHdrEntSize
- * RcvHdrSize
- */
- reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
- & RCV_HDR_CNT_CNT_MASK)
- << RCV_HDR_CNT_CNT_SHIFT;
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
- reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
- & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
- << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
- reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
- << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
-
- /*
- * Program dummy tail address for every receive context
- * before enabling any receive context
- */
- write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
-
- return 0;
-
-bail_free:
- dd_dev_err(dd,
- "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
- rcd->ctxt);
- vfree(rcd->user_event_mask);
- rcd->user_event_mask = NULL;
- dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
- rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
-bail:
- return -ENOMEM;
-}
-
-/**
- * allocate eager buffers, both kernel and user contexts.
- * @rcd: the context we are setting up.
- *
- * Allocate the eager TID buffers and program them into hip.
- * They are no longer completely contiguous, we do multiple allocation
- * calls. Otherwise we get the OOM code involved, by asking for too
- * much per call, with disastrous results on some kernels.
- */
-int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
- gfp_t gfp_flags;
- u16 order;
- int ret = 0;
- u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
-
- /*
- * GFP_USER, but without GFP_FS, so buffer cache can be
- * coalesced (we hope); otherwise, even at order 4,
- * heavy filesystem activity makes these fail, and we can
- * use compound pages.
- */
- gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
- /*
- * The minimum size of the eager buffers is a groups of MTU-sized
- * buffers.
- * The global eager_buffer_size parameter is checked against the
- * theoretical lower limit of the value. Here, we check against the
- * MTU.
- */
- if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
- rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
- /*
- * If using one-pkt-per-egr-buffer, lower the eager buffer
- * size to the max MTU (page-aligned).
- */
- if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
- rcd->egrbufs.rcvtid_size = round_mtu;
-
- /*
- * Eager buffers sizes of 1MB or less require smaller TID sizes
- * to satisfy the "multiple of 8 RcvArray entries" requirement.
- */
- if (rcd->egrbufs.size <= (1 << 20))
- rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
- rounddown_pow_of_two(rcd->egrbufs.size / 8));
-
- while (alloced_bytes < rcd->egrbufs.size &&
- rcd->egrbufs.alloced < rcd->egrbufs.count) {
- rcd->egrbufs.buffers[idx].addr =
- dma_zalloc_coherent(&dd->pcidev->dev,
- rcd->egrbufs.rcvtid_size,
- &rcd->egrbufs.buffers[idx].phys,
- gfp_flags);
- if (rcd->egrbufs.buffers[idx].addr) {
- rcd->egrbufs.buffers[idx].len =
- rcd->egrbufs.rcvtid_size;
- rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
- rcd->egrbufs.buffers[idx].addr;
- rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys =
- rcd->egrbufs.buffers[idx].phys;
- rcd->egrbufs.alloced++;
- alloced_bytes += rcd->egrbufs.rcvtid_size;
- idx++;
- } else {
- u32 new_size, i, j;
- u64 offset = 0;
-
- /*
- * Fail the eager buffer allocation if:
- * - we are already using the lowest acceptable size
- * - we are using one-pkt-per-egr-buffer (this implies
- * that we are accepting only one size)
- */
- if (rcd->egrbufs.rcvtid_size == round_mtu ||
- !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
- dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
- rcd->ctxt);
- goto bail_rcvegrbuf_phys;
- }
-
- new_size = rcd->egrbufs.rcvtid_size / 2;
-
- /*
- * If the first attempt to allocate memory failed, don't
- * fail everything but continue with the next lower
- * size.
- */
- if (idx == 0) {
- rcd->egrbufs.rcvtid_size = new_size;
- continue;
- }
-
- /*
- * Re-partition already allocated buffers to a smaller
- * size.
- */
- rcd->egrbufs.alloced = 0;
- for (i = 0, j = 0, offset = 0; j < idx; i++) {
- if (i >= rcd->egrbufs.count)
- break;
- rcd->egrbufs.rcvtids[i].phys =
- rcd->egrbufs.buffers[j].phys + offset;
- rcd->egrbufs.rcvtids[i].addr =
- rcd->egrbufs.buffers[j].addr + offset;
- rcd->egrbufs.alloced++;
- if ((rcd->egrbufs.buffers[j].phys + offset +
- new_size) ==
- (rcd->egrbufs.buffers[j].phys +
- rcd->egrbufs.buffers[j].len)) {
- j++;
- offset = 0;
- } else {
- offset += new_size;
- }
- }
- rcd->egrbufs.rcvtid_size = new_size;
- }
- }
- rcd->egrbufs.numbufs = idx;
- rcd->egrbufs.size = alloced_bytes;
-
- hfi1_cdbg(PROC,
- "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
- rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
- rcd->egrbufs.size);
-
- /*
- * Set the contexts rcv array head update threshold to the closest
- * power of 2 (so we can use a mask instead of modulo) below half
- * the allocated entries.
- */
- rcd->egrbufs.threshold =
- rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
- /*
- * Compute the expected RcvArray entry base. This is done after
- * allocating the eager buffers in order to maximize the
- * expected RcvArray entries for the context.
- */
- max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
- egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
- rcd->expected_count = max_entries - egrtop;
- if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
- rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
-
- rcd->expected_base = rcd->eager_base + egrtop;
- hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
- rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
- rcd->eager_base, rcd->expected_base);
-
- if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
- hfi1_cdbg(PROC,
- "ctxt%u: current Eager buffer size is invalid %u\n",
- rcd->ctxt, rcd->egrbufs.rcvtid_size);
- ret = -EINVAL;
- goto bail;
- }
-
- for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
- hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
- rcd->egrbufs.rcvtids[idx].phys, order);
- cond_resched();
- }
- goto bail;
-
-bail_rcvegrbuf_phys:
- for (idx = 0; idx < rcd->egrbufs.alloced &&
- rcd->egrbufs.buffers[idx].addr;
- idx++) {
- dma_free_coherent(&dd->pcidev->dev,
- rcd->egrbufs.buffers[idx].len,
- rcd->egrbufs.buffers[idx].addr,
- rcd->egrbufs.buffers[idx].phys);
- rcd->egrbufs.buffers[idx].addr = NULL;
- rcd->egrbufs.buffers[idx].phys = 0;
- rcd->egrbufs.buffers[idx].len = 0;
- }
-bail:
- return ret;
-}
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c
deleted file mode 100644
index 65348d16a..000000000
--- a/drivers/staging/rdma/hfi1/intr.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "hfi.h"
-#include "common.h"
-#include "sdma.h"
-
-/**
- * format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
- */
-static void format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * hfi1_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
- */
-void hfi1_format_hwerrors(u64 hwerrs, const struct hfi1_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t msgl)
-{
- int i;
-
- for (i = 0; i < nhwerrmsgs; i++)
- if (hwerrs & hwerrmsgs[i].mask)
- format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-}
-
-static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
-{
- struct ib_event event;
- struct hfi1_devdata *dd = ppd->dd;
-
- /*
- * Only call ib_dispatch_event() if the IB device has been
- * registered. HFI1_INITED is set iff the driver has successfully
- * registered with the IB core.
- */
- if (!(dd->flags & HFI1_INITTED))
- return;
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = ppd->port;
- event.event = ev;
- ib_dispatch_event(&event);
-}
-
-/*
- * Handle a linkup or link down notification.
- * This is called outside an interrupt.
- */
-void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
-{
- struct hfi1_pportdata *ppd = &dd->pport[0];
- enum ib_event_type ev;
-
- if (!(ppd->linkup ^ !!linkup))
- return; /* no change, nothing to do */
-
- if (linkup) {
- /*
- * Quick linkup and all link up on the simulator does not
- * trigger or implement:
- * - VerifyCap interrupt
- * - VerifyCap frames
- * But rather moves directly to LinkUp.
- *
- * Do the work of the VerifyCap interrupt handler,
- * handle_verify_cap(), but do not try moving the state to
- * LinkUp as we are already there.
- *
- * NOTE: This uses this device's vAU, vCU, and vl15_init for
- * the remote values. Both sides must be using the values.
- */
- if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
- set_up_vl15(dd, dd->vau, dd->vl15_init);
- assign_remote_cm_au_table(dd, dd->vcu);
- ppd->neighbor_guid =
- read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
- ppd->neighbor_type =
- read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
- DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
- ppd->neighbor_port_number =
- read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
- DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
- dd_dev_info(dd, "Neighbor GUID: %llx Neighbor type %d\n",
- ppd->neighbor_guid,
- ppd->neighbor_type);
- }
-
- /* physical link went up */
- ppd->linkup = 1;
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
-
- /* link widths are not available until the link is fully up */
- get_linkup_link_widths(ppd);
-
- } else {
- /* physical link went down */
- ppd->linkup = 0;
-
- /* clear HW details of the previous connection */
- reset_link_credits(dd);
-
- /* freeze after a link down to guarantee a clean egress */
- start_freeze_handling(ppd, FREEZE_SELF | FREEZE_LINK_DOWN);
-
- ev = IB_EVENT_PORT_ERR;
-
- hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LINKDOWN_BIT);
-
- /* if we are down, the neighbor is down */
- ppd->neighbor_normal = 0;
-
- /* notify IB of the link change */
- signal_ib_event(ppd, ev);
- }
-}
-
-/*
- * Handle receive or urgent interrupts for user contexts. This means a user
- * process was waiting for a packet to arrive, and didn't want to poll.
- */
-void handle_user_interrupt(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- if (!rcd->cnt)
- goto done;
-
- if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) {
- wake_up_interruptible(&rcd->wait);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd->ctxt);
- } else if (test_and_clear_bit(HFI1_CTXT_WAITING_URG,
- &rcd->event_flags)) {
- rcd->urgent++;
- wake_up_interruptible(&rcd->wait);
- }
-done:
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-}
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h
deleted file mode 100644
index 2ec6ef38d..000000000
--- a/drivers/staging/rdma/hfi1/iowait.h
+++ /dev/null
@@ -1,300 +0,0 @@
-#ifndef _HFI1_IOWAIT_H
-#define _HFI1_IOWAIT_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-
-#include "sdma_txreq.h"
-
-/*
- * typedef (*restart_t)() - restart callback
- * @work: pointer to work structure
- */
-typedef void (*restart_t)(struct work_struct *work);
-
-struct sdma_txreq;
-struct sdma_engine;
-/**
- * struct iowait - linkage for delayed progress/waiting
- * @list: used to add/insert into QP/PQ wait lists
- * @tx_head: overflow list of sdma_txreq's
- * @sleep: no space callback
- * @wakeup: space callback wakeup
- * @sdma_drained: sdma count drained
- * @iowork: workqueue overhead
- * @wait_dma: wait for sdma_busy == 0
- * @wait_pio: wait for pio_busy == 0
- * @sdma_busy: # of packets in flight
- * @count: total number of descriptors in tx_head'ed list
- * @tx_limit: limit for overflow queuing
- * @tx_count: number of tx entry's in tx_head'ed list
- *
- * This is to be embedded in user's state structure
- * (QP or PQ).
- *
- * The sleep and wakeup members are a
- * bit misnamed. They do not strictly
- * speaking sleep or wake up, but they
- * are callbacks for the ULP to implement
- * what ever queuing/dequeuing of
- * the embedded iowait and its containing struct
- * when a resource shortage like SDMA ring space is seen.
- *
- * Both potentially have locks help
- * so sleeping is not allowed.
- *
- * The wait_dma member along with the iow
- */
-
-struct iowait {
- struct list_head list;
- struct list_head tx_head;
- int (*sleep)(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx,
- unsigned seq);
- void (*wakeup)(struct iowait *wait, int reason);
- void (*sdma_drained)(struct iowait *wait);
- struct work_struct iowork;
- wait_queue_head_t wait_dma;
- wait_queue_head_t wait_pio;
- atomic_t sdma_busy;
- atomic_t pio_busy;
- u32 count;
- u32 tx_limit;
- u32 tx_count;
-};
-
-#define SDMA_AVAIL_REASON 0
-
-/**
- * iowait_init() - initialize wait structure
- * @wait: wait struct to initialize
- * @tx_limit: limit for overflow queuing
- * @func: restart function for workqueue
- * @sleep: sleep function for no space
- * @resume: wakeup function for no space
- *
- * This function initializes the iowait
- * structure embedded in the QP or PQ.
- *
- */
-
-static inline void iowait_init(
- struct iowait *wait,
- u32 tx_limit,
- void (*func)(struct work_struct *work),
- int (*sleep)(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx,
- unsigned seq),
- void (*wakeup)(struct iowait *wait, int reason),
- void (*sdma_drained)(struct iowait *wait))
-{
- wait->count = 0;
- INIT_LIST_HEAD(&wait->list);
- INIT_LIST_HEAD(&wait->tx_head);
- INIT_WORK(&wait->iowork, func);
- init_waitqueue_head(&wait->wait_dma);
- init_waitqueue_head(&wait->wait_pio);
- atomic_set(&wait->sdma_busy, 0);
- atomic_set(&wait->pio_busy, 0);
- wait->tx_limit = tx_limit;
- wait->sleep = sleep;
- wait->wakeup = wakeup;
- wait->sdma_drained = sdma_drained;
-}
-
-/**
- * iowait_schedule() - initialize wait structure
- * @wait: wait struct to schedule
- * @wq: workqueue for schedule
- * @cpu: cpu
- */
-static inline void iowait_schedule(
- struct iowait *wait,
- struct workqueue_struct *wq,
- int cpu)
-{
- queue_work_on(cpu, wq, &wait->iowork);
-}
-
-/**
- * iowait_sdma_drain() - wait for DMAs to drain
- *
- * @wait: iowait structure
- *
- * This will delay until the iowait sdmas have
- * completed.
- */
-static inline void iowait_sdma_drain(struct iowait *wait)
-{
- wait_event(wait->wait_dma, !atomic_read(&wait->sdma_busy));
-}
-
-/**
- * iowait_sdma_pending() - return sdma pending count
- *
- * @wait: iowait structure
- *
- */
-static inline int iowait_sdma_pending(struct iowait *wait)
-{
- return atomic_read(&wait->sdma_busy);
-}
-
-/**
- * iowait_sdma_inc - note sdma io pending
- * @wait: iowait structure
- */
-static inline void iowait_sdma_inc(struct iowait *wait)
-{
- atomic_inc(&wait->sdma_busy);
-}
-
-/**
- * iowait_sdma_add - add count to pending
- * @wait: iowait structure
- */
-static inline void iowait_sdma_add(struct iowait *wait, int count)
-{
- atomic_add(count, &wait->sdma_busy);
-}
-
-/**
- * iowait_sdma_dec - note sdma complete
- * @wait: iowait structure
- */
-static inline int iowait_sdma_dec(struct iowait *wait)
-{
- return atomic_dec_and_test(&wait->sdma_busy);
-}
-
-/**
- * iowait_pio_drain() - wait for pios to drain
- *
- * @wait: iowait structure
- *
- * This will delay until the iowait pios have
- * completed.
- */
-static inline void iowait_pio_drain(struct iowait *wait)
-{
- wait_event_timeout(wait->wait_pio,
- !atomic_read(&wait->pio_busy),
- HZ);
-}
-
-/**
- * iowait_pio_pending() - return pio pending count
- *
- * @wait: iowait structure
- *
- */
-static inline int iowait_pio_pending(struct iowait *wait)
-{
- return atomic_read(&wait->pio_busy);
-}
-
-/**
- * iowait_pio_inc - note pio pending
- * @wait: iowait structure
- */
-static inline void iowait_pio_inc(struct iowait *wait)
-{
- atomic_inc(&wait->pio_busy);
-}
-
-/**
- * iowait_sdma_dec - note pio complete
- * @wait: iowait structure
- */
-static inline int iowait_pio_dec(struct iowait *wait)
-{
- return atomic_dec_and_test(&wait->pio_busy);
-}
-
-/**
- * iowait_drain_wakeup() - trigger iowait_drain() waiter
- *
- * @wait: iowait structure
- *
- * This will trigger any waiters.
- */
-static inline void iowait_drain_wakeup(struct iowait *wait)
-{
- wake_up(&wait->wait_dma);
- wake_up(&wait->wait_pio);
- if (wait->sdma_drained)
- wait->sdma_drained(wait);
-}
-
-/**
- * iowait_get_txhead() - get packet off of iowait list
- *
- * @wait wait struture
- */
-static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
-{
- struct sdma_txreq *tx = NULL;
-
- if (!list_empty(&wait->tx_head)) {
- tx = list_first_entry(
- &wait->tx_head,
- struct sdma_txreq,
- list);
- list_del_init(&tx->list);
- }
- return tx;
-}
-
-#endif
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
deleted file mode 100644
index d1e7f4d7c..000000000
--- a/drivers/staging/rdma/hfi1/mad.c
+++ /dev/null
@@ -1,4402 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/net.h>
-#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
- / (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
-
-#include "hfi.h"
-#include "mad.h"
-#include "trace.h"
-#include "qp.h"
-
-/* the reset value from the FM is supposed to be 0xffff, handle both */
-#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
-#define OPA_LINK_WIDTH_RESET 0xffff
-
-static int reply(struct ib_mad_hdr *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static inline void clear_opa_smp_data(struct opa_smp *smp)
-{
- void *data = opa_get_smp_data(smp);
- size_t size = opa_get_smp_data_size(smp);
-
- memset(data, 0, size);
-}
-
-static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
-{
- struct ib_mad_send_buf *send_buf;
- struct ib_mad_agent *agent;
- struct opa_smp *smp;
- int ret;
- unsigned long flags;
- unsigned long timeout;
- int pkey_idx;
- u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
-
- agent = ibp->rvp.send_agent;
- if (!agent)
- return;
-
- /* o14-3.2.1 */
- if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
- return;
-
- /* o14-2 */
- if (ibp->rvp.trap_timeout && time_before(jiffies,
- ibp->rvp.trap_timeout))
- return;
-
- pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
- if (pkey_idx < 0) {
- pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
- __func__, hfi1_get_pkey(ibp, 1));
- pkey_idx = 1;
- }
-
- send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
- IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC, IB_MGMT_BASE_VERSION);
- if (IS_ERR(send_buf))
- return;
-
- smp = send_buf->mad;
- smp->base_version = OPA_MGMT_BASE_VERSION;
- smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- smp->class_version = OPA_SMI_CLASS_VERSION;
- smp->method = IB_MGMT_METHOD_TRAP;
- ibp->rvp.tid++;
- smp->tid = cpu_to_be64(ibp->rvp.tid);
- smp->attr_id = IB_SMP_ATTR_NOTICE;
- /* o14-1: smp->mkey = 0; */
- memcpy(smp->route.lid.data, data, len);
-
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (!ibp->rvp.sm_ah) {
- if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
- struct ib_ah *ah;
-
- ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
- if (IS_ERR(ah)) {
- ret = PTR_ERR(ah);
- } else {
- send_buf->ah = ah;
- ibp->rvp.sm_ah = ibah_to_rvtah(ah);
- ret = 0;
- }
- } else {
- ret = -EINVAL;
- }
- } else {
- send_buf->ah = &ibp->rvp.sm_ah->ibah;
- ret = 0;
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (!ret) {
- /* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
- ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
- } else {
- ib_free_send_mad(send_buf);
- ibp->rvp.trap_timeout = 0;
- }
-}
-
-/*
- * Send a bad [PQ]_Key trap (ch. 14.3.8).
- */
-void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, u16 lid1, u16 lid2)
-{
- struct opa_mad_notice_attr data;
- u32 lid = ppd_from_ibp(ibp)->lid;
- u32 _lid1 = lid1;
- u32 _lid2 = lid2;
-
- memset(&data, 0, sizeof(data));
-
- if (trap_num == OPA_TRAP_BAD_P_KEY)
- ibp->rvp.pkey_violations++;
- else
- ibp->rvp.qkey_violations++;
- ibp->rvp.n_pkt_drops++;
-
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = trap_num;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
- data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
- data.ntc_257_258.key = cpu_to_be32(key);
- data.ntc_257_258.sl = sl << 3;
- data.ntc_257_258.qp1 = cpu_to_be32(qp1);
- data.ntc_257_258.qp2 = cpu_to_be32(qp2);
-
- send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a bad M_Key trap (ch. 14.3.9).
- */
-static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
- __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
-{
- struct opa_mad_notice_attr data;
- u32 lid = ppd_from_ibp(ibp)->lid;
-
- memset(&data, 0, sizeof(data));
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_BAD_M_KEY;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_256.lid = data.issuer_lid;
- data.ntc_256.method = mad->method;
- data.ntc_256.attr_id = mad->attr_id;
- data.ntc_256.attr_mod = mad->attr_mod;
- data.ntc_256.mkey = mkey;
- if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- data.ntc_256.dr_slid = dr_slid;
- data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
- if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
- data.ntc_256.dr_trunc_hop |=
- IB_NOTICE_TRAP_DR_TRUNC;
- hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
- }
- data.ntc_256.dr_trunc_hop |= hop_cnt;
- memcpy(data.ntc_256.dr_rtn_path, return_path,
- hop_cnt);
- }
-
- send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Port Capability Mask Changed trap (ch. 14.3.11).
- */
-void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
-{
- struct opa_mad_notice_attr data;
- struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
- struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
- struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
- u32 lid = ppd_from_ibp(ibp)->lid;
-
- memset(&data, 0, sizeof(data));
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_144.lid = data.issuer_lid;
- data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
-
- send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a System Image GUID Changed trap (ch. 14.3.12).
- */
-void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
-{
- struct opa_mad_notice_attr data;
- u32 lid = ppd_from_ibp(ibp)->lid;
-
- memset(&data, 0, sizeof(data));
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
- data.ntc_145.lid = data.issuer_lid;
-
- send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Node Description Changed trap (ch. 14.3.13).
- */
-void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
-{
- struct opa_mad_notice_attr data;
- u32 lid = ppd_from_ibp(ibp)->lid;
-
- memset(&data, 0, sizeof(data));
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_144.lid = data.issuer_lid;
- data.ntc_144.change_flags =
- cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
-
- send_trap(ibp, &data, sizeof(data));
-}
-
-static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- struct opa_node_description *nd;
-
- if (am) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- nd = (struct opa_node_description *)data;
-
- memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
-
- if (resp_len)
- *resp_len += sizeof(*nd);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct opa_node_info *ni;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
-
- ni = (struct opa_node_info *)data;
-
- /* GUID 0 is illegal */
- if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
- ni->base_version = OPA_MGMT_BASE_VERSION;
- ni->class_version = OPA_SMI_CLASS_VERSION;
- ni->node_type = 1; /* channel adapter */
- ni->num_ports = ibdev->phys_port_cnt;
- /* This is already in network order */
- ni->system_image_guid = ib_hfi1_sys_image_guid;
- /* Use first-port GUID as node */
- ni->node_guid = cpu_to_be64(dd->pport->guid);
- ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
- ni->device_id = cpu_to_be16(dd->pcidev->device);
- ni->revision = cpu_to_be32(dd->minrev);
- ni->local_port_num = port;
- ni->vendor_id[0] = dd->oui1;
- ni->vendor_id[1] = dd->oui2;
- ni->vendor_id[2] = dd->oui3;
-
- if (resp_len)
- *resp_len += sizeof(*ni);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
-
- /* GUID 0 is illegal */
- if (smp->attr_mod || pidx >= dd->num_pports ||
- dd->pport[pidx].guid == 0)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
-
- nip->base_version = OPA_MGMT_BASE_VERSION;
- nip->class_version = OPA_SMI_CLASS_VERSION;
- nip->node_type = 1; /* channel adapter */
- nip->num_ports = ibdev->phys_port_cnt;
- /* This is already in network order */
- nip->sys_guid = ib_hfi1_sys_image_guid;
- /* Use first-port GUID as node */
- nip->node_guid = cpu_to_be64(dd->pport->guid);
- nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
- nip->device_id = cpu_to_be16(dd->pcidev->device);
- nip->revision = cpu_to_be32(dd->minrev);
- nip->local_port_num = port;
- nip->vendor_id[0] = dd->oui1;
- nip->vendor_id[1] = dd->oui2;
- nip->vendor_id[2] = dd->oui3;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
-{
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
-{
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
-}
-
-static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
-{
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
-}
-
-static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
- int mad_flags, __be64 mkey, __be32 dr_slid,
- u8 return_path[], u8 hop_cnt)
-{
- int valid_mkey = 0;
- int ret = 0;
-
- /* Is the mkey in the process of expiring? */
- if (ibp->rvp.mkey_lease_timeout &&
- time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
- /* Clear timeout and mkey protection field. */
- ibp->rvp.mkey_lease_timeout = 0;
- ibp->rvp.mkeyprot = 0;
- }
-
- if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
- ibp->rvp.mkey == mkey)
- valid_mkey = 1;
-
- /* Unset lease timeout on any valid Get/Set/TrapRepress */
- if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
- (mad->method == IB_MGMT_METHOD_GET ||
- mad->method == IB_MGMT_METHOD_SET ||
- mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
- ibp->rvp.mkey_lease_timeout = 0;
-
- if (!valid_mkey) {
- switch (mad->method) {
- case IB_MGMT_METHOD_GET:
- /* Bad mkey not a violation below level 2 */
- if (ibp->rvp.mkeyprot < 2)
- break;
- case IB_MGMT_METHOD_SET:
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (ibp->rvp.mkey_violations != 0xFFFF)
- ++ibp->rvp.mkey_violations;
- if (!ibp->rvp.mkey_lease_timeout &&
- ibp->rvp.mkey_lease_period)
- ibp->rvp.mkey_lease_timeout = jiffies +
- ibp->rvp.mkey_lease_period * HZ;
- /* Generate a trap notice. */
- bad_mkey(ibp, mad, mkey, dr_slid, return_path,
- hop_cnt);
- ret = 1;
- }
- }
-
- return ret;
-}
-
-/*
- * The SMA caches reads from LCB registers in case the LCB is unavailable.
- * (The LCB is unavailable in certain link states, for example.)
- */
-struct lcb_datum {
- u32 off;
- u64 val;
-};
-
-static struct lcb_datum lcb_cache[] = {
- { DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
-};
-
-static int write_lcb_cache(u32 off, u64 val)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
- if (lcb_cache[i].off == off) {
- lcb_cache[i].val = val;
- return 0;
- }
- }
-
- pr_warn("%s bad offset 0x%x\n", __func__, off);
- return -1;
-}
-
-static int read_lcb_cache(u32 off, u64 *val)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
- if (lcb_cache[i].off == off) {
- *val = lcb_cache[i].val;
- return 0;
- }
- }
-
- pr_warn("%s bad offset 0x%x\n", __func__, off);
- return -1;
-}
-
-void read_ltp_rtt(struct hfi1_devdata *dd)
-{
- u64 reg;
-
- if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
- dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
- else
- write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
-}
-
-static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- int i;
- struct hfi1_devdata *dd;
- struct hfi1_pportdata *ppd;
- struct hfi1_ibport *ibp;
- struct opa_port_info *pi = (struct opa_port_info *)data;
- u8 mtu;
- u8 credit_rate;
- u8 is_beaconing_active;
- u32 state;
- u32 num_ports = OPA_AM_NPORT(am);
- u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
- u32 buffer_units;
- u64 tmp = 0;
-
- if (num_ports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hw from 0 */
- ppd = dd->pport + (port - 1);
- ibp = &ppd->ibport_data;
-
- if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
- ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- pi->lid = cpu_to_be32(ppd->lid);
-
- /* Only return the mkey if the protection field allows it. */
- if (!(smp->method == IB_MGMT_METHOD_GET &&
- ibp->rvp.mkey != smp->mkey &&
- ibp->rvp.mkeyprot == 1))
- pi->mkey = ibp->rvp.mkey;
-
- pi->subnet_prefix = ibp->rvp.gid_prefix;
- pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
- pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
- pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
- pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
- pi->sa_qp = cpu_to_be32(ppd->sa_qp);
-
- pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
- pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
- pi->link_width.active = cpu_to_be16(ppd->link_width_active);
-
- pi->link_width_downgrade.supported =
- cpu_to_be16(ppd->link_width_downgrade_supported);
- pi->link_width_downgrade.enabled =
- cpu_to_be16(ppd->link_width_downgrade_enabled);
- pi->link_width_downgrade.tx_active =
- cpu_to_be16(ppd->link_width_downgrade_tx_active);
- pi->link_width_downgrade.rx_active =
- cpu_to_be16(ppd->link_width_downgrade_rx_active);
-
- pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
- pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
- pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
-
- state = driver_lstate(ppd);
-
- if (start_of_sm_config && (state == IB_PORT_INIT))
- ppd->is_sm_config_started = 1;
-
- pi->port_phys_conf = (ppd->port_type & 0xf);
-
-#if PI_LED_ENABLE_SUP
- pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
- pi->port_states.ledenable_offlinereason |=
- ppd->is_sm_config_started << 5;
- /*
- * This pairs with the memory barrier in hfi1_start_led_override to
- * ensure that we read the correct state of LED beaconing represented
- * by led_override_timer_active
- */
- smp_rmb();
- is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
- pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
- pi->port_states.ledenable_offlinereason |=
- ppd->offline_disabled_reason;
-#else
- pi->port_states.offline_reason = ppd->neighbor_normal << 4;
- pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- pi->port_states.offline_reason |= ppd->offline_disabled_reason;
-#endif /* PI_LED_ENABLE_SUP */
-
- pi->port_states.portphysstate_portstate =
- (hfi1_ibphys_portstate(ppd) << 4) | state;
-
- pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
-
- memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
- for (i = 0; i < ppd->vls_supported; i++) {
- mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
- if ((i % 2) == 0)
- pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
- else
- pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
- }
- /* don't forget VL 15 */
- mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
- pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
- pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
- pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
- pi->partenforce_filterraw |=
- (ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
- if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
- pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
- if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
- pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
- pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
- /* P_KeyViolations are counted by hardware. */
- pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
- pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
-
- pi->vl.cap = ppd->vls_supported;
- pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
- pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
- pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
-
- pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
-
- pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
- OPA_PORT_LINK_MODE_OPA << 5 |
- OPA_PORT_LINK_MODE_OPA);
-
- pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
-
- pi->port_mode = cpu_to_be16(
- ppd->is_active_optimize_enabled ?
- OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
-
- pi->port_packet_format.supported =
- cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
- pi->port_packet_format.enabled =
- cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B);
-
- /* flit_control.interleave is (OPA V1, version .76):
- * bits use
- * ---- ---
- * 2 res
- * 2 DistanceSupported
- * 2 DistanceEnabled
- * 5 MaxNextLevelTxEnabled
- * 5 MaxNestLevelRxSupported
- *
- * HFI supports only "distance mode 1" (see OPA V1, version .76,
- * section 9.6.2), so set DistanceSupported, DistanceEnabled
- * to 0x1.
- */
- pi->flit_control.interleave = cpu_to_be16(0x1400);
-
- pi->link_down_reason = ppd->local_link_down_reason.sma;
- pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
- pi->port_error_action = cpu_to_be32(ppd->port_error_action);
- pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
-
- /* 32.768 usec. response time (guessing) */
- pi->resptimevalue = 3;
-
- pi->local_port_num = port;
-
- /* buffer info for FM */
- pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
-
- pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
- pi->neigh_port_num = ppd->neighbor_port_number;
- pi->port_neigh_mode =
- (ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
- (ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
- (ppd->neighbor_fm_security ?
- OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
-
- /* HFIs shall always return VL15 credits to their
- * neighbor in a timely manner, without any credit return pacing.
- */
- credit_rate = 0;
- buffer_units = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
- buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
- buffer_units |= (credit_rate << 6) &
- OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
- buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
- pi->buffer_units = cpu_to_be32(buffer_units);
-
- pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported);
-
- /* HFI supports a replay buffer 128 LTPs in size */
- pi->replay_depth.buffer = 0x80;
- /* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
- read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
-
- /*
- * this counter is 16 bits wide, but the replay_depth.wire
- * variable is only 8 bits
- */
- if (tmp > 0xff)
- tmp = 0xff;
- pi->replay_depth.wire = tmp;
-
- if (resp_len)
- *resp_len += sizeof(struct opa_port_info);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-/**
- * get_pkeys - return the PKEY table
- * @dd: the hfi1_ib device
- * @port: the IB port number
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
-{
- struct hfi1_pportdata *ppd = dd->pport + port - 1;
-
- memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
-
- return 0;
-}
-
-static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- u32 n_blocks_req = OPA_AM_NBLK(am);
- u32 start_block = am & 0x7ff;
- __be16 *p;
- u16 *q;
- int i;
- u16 n_blocks_avail;
- unsigned npkeys = hfi1_get_npkeys(dd);
- size_t size;
-
- if (n_blocks_req == 0) {
- pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
- port, start_block, n_blocks_req);
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
-
- size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
-
- if (start_block + n_blocks_req > n_blocks_avail ||
- n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
- pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
- "avail 0x%x; blk/smp 0x%lx\n",
- start_block, n_blocks_req, n_blocks_avail,
- OPA_NUM_PKEY_BLOCKS_PER_SMP);
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- p = (__be16 *)data;
- q = (u16 *)data;
- /* get the real pkeys if we are requesting the first block */
- if (start_block == 0) {
- get_pkeys(dd, port, q);
- for (i = 0; i < npkeys; i++)
- p[i] = cpu_to_be16(q[i]);
- if (resp_len)
- *resp_len += size;
- } else {
- smp->status |= IB_SMP_INVALID_FIELD;
- }
- return reply((struct ib_mad_hdr *)smp);
-}
-
-enum {
- HFI_TRANSITION_DISALLOWED,
- HFI_TRANSITION_IGNORED,
- HFI_TRANSITION_ALLOWED,
- HFI_TRANSITION_UNDEFINED,
-};
-
-/*
- * Use shortened names to improve readability of
- * {logical,physical}_state_transitions
- */
-enum {
- __D = HFI_TRANSITION_DISALLOWED,
- __I = HFI_TRANSITION_IGNORED,
- __A = HFI_TRANSITION_ALLOWED,
- __U = HFI_TRANSITION_UNDEFINED,
-};
-
-/*
- * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
- * represented in physical_state_transitions.
- */
-#define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
-
-/*
- * Within physical_state_transitions, rows represent "old" states,
- * columns "new" states, and physical_state_transitions.allowed[old][new]
- * indicates if the transition from old state to new state is legal (see
- * OPAg1v1, Table 6-4).
- */
-static const struct {
- u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
-} physical_state_transitions = {
- {
- /* 2 3 4 5 6 7 8 9 10 11 */
- /* 2 */ { __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
- /* 3 */ { __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
- /* 4 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
- /* 5 */ { __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
- /* 6 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
- /* 7 */ { __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
- /* 8 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
- /* 9 */ { __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
- /*10 */ { __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
- /*11 */ { __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
- }
-};
-
-/*
- * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
- * logical_state_transitions
- */
-
-#define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
-
-/*
- * Within logical_state_transitions rows represent "old" states,
- * columns "new" states, and logical_state_transitions.allowed[old][new]
- * indicates if the transition from old state to new state is legal (see
- * OPAg1v1, Table 9-12).
- */
-static const struct {
- u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
-} logical_state_transitions = {
- {
- /* 1 2 3 4 5 */
- /* 1 */ { __I, __D, __D, __D, __U},
- /* 2 */ { __D, __I, __A, __D, __U},
- /* 3 */ { __D, __D, __I, __A, __U},
- /* 4 */ { __D, __D, __I, __I, __U},
- /* 5 */ { __U, __U, __U, __U, __U},
- }
-};
-
-static int logical_transition_allowed(int old, int new)
-{
- if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
- new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
- pr_warn("invalid logical state(s) (old %d new %d)\n",
- old, new);
- return HFI_TRANSITION_UNDEFINED;
- }
-
- if (new == IB_PORT_NOP)
- return HFI_TRANSITION_ALLOWED; /* always allowed */
-
- /* adjust states for indexing into logical_state_transitions */
- old -= IB_PORT_DOWN;
- new -= IB_PORT_DOWN;
-
- if (old < 0 || new < 0)
- return HFI_TRANSITION_UNDEFINED;
- return logical_state_transitions.allowed[old][new];
-}
-
-static int physical_transition_allowed(int old, int new)
-{
- if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
- new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
- pr_warn("invalid physical state(s) (old %d new %d)\n",
- old, new);
- return HFI_TRANSITION_UNDEFINED;
- }
-
- if (new == IB_PORTPHYSSTATE_NOP)
- return HFI_TRANSITION_ALLOWED; /* always allowed */
-
- /* adjust states for indexing into physical_state_transitions */
- old -= IB_PORTPHYSSTATE_POLLING;
- new -= IB_PORTPHYSSTATE_POLLING;
-
- if (old < 0 || new < 0)
- return HFI_TRANSITION_UNDEFINED;
- return physical_state_transitions.allowed[old][new];
-}
-
-static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
- u32 logical_new, u32 physical_new)
-{
- u32 physical_old = driver_physical_state(ppd);
- u32 logical_old = driver_logical_state(ppd);
- int ret, logical_allowed, physical_allowed;
-
- ret = logical_transition_allowed(logical_old, logical_new);
- logical_allowed = ret;
-
- if (ret == HFI_TRANSITION_DISALLOWED ||
- ret == HFI_TRANSITION_UNDEFINED) {
- pr_warn("invalid logical state transition %s -> %s\n",
- opa_lstate_name(logical_old),
- opa_lstate_name(logical_new));
- return ret;
- }
-
- ret = physical_transition_allowed(physical_old, physical_new);
- physical_allowed = ret;
-
- if (ret == HFI_TRANSITION_DISALLOWED ||
- ret == HFI_TRANSITION_UNDEFINED) {
- pr_warn("invalid physical state transition %s -> %s\n",
- opa_pstate_name(physical_old),
- opa_pstate_name(physical_new));
- return ret;
- }
-
- if (logical_allowed == HFI_TRANSITION_IGNORED &&
- physical_allowed == HFI_TRANSITION_IGNORED)
- return HFI_TRANSITION_IGNORED;
-
- /*
- * A change request of Physical Port State from
- * 'Offline' to 'Polling' should be ignored.
- */
- if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
- (physical_new == IB_PORTPHYSSTATE_POLLING))
- return HFI_TRANSITION_IGNORED;
-
- /*
- * Either physical_allowed or logical_allowed is
- * HFI_TRANSITION_ALLOWED.
- */
- return HFI_TRANSITION_ALLOWED;
-}
-
-static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
- u32 logical_state, u32 phys_state,
- int suppress_idle_sma)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u32 link_state;
- int ret;
-
- ret = port_states_transition_allowed(ppd, logical_state, phys_state);
- if (ret == HFI_TRANSITION_DISALLOWED ||
- ret == HFI_TRANSITION_UNDEFINED) {
- /* error message emitted above */
- smp->status |= IB_SMP_INVALID_FIELD;
- return 0;
- }
-
- if (ret == HFI_TRANSITION_IGNORED)
- return 0;
-
- if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
- !(logical_state == IB_PORT_DOWN ||
- logical_state == IB_PORT_NOP)){
- pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
- logical_state, phys_state);
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- /*
- * Logical state changes are summarized in OPAv1g1 spec.,
- * Table 9-12; physical state changes are summarized in
- * OPAv1g1 spec., Table 6.4.
- */
- switch (logical_state) {
- case IB_PORT_NOP:
- if (phys_state == IB_PORTPHYSSTATE_NOP)
- break;
- /* FALLTHROUGH */
- case IB_PORT_DOWN:
- if (phys_state == IB_PORTPHYSSTATE_NOP) {
- link_state = HLS_DN_DOWNDEF;
- } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
- link_state = HLS_DN_POLL;
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
- 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
- } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
- link_state = HLS_DN_DISABLE;
- } else {
- pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
- phys_state);
- smp->status |= IB_SMP_INVALID_FIELD;
- break;
- }
-
- set_link_state(ppd, link_state);
- if (link_state == HLS_DN_DISABLE &&
- (ppd->offline_disabled_reason >
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
- ppd->offline_disabled_reason ==
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
- /*
- * Don't send a reply if the response would be sent
- * through the disabled port.
- */
- if (link_state == HLS_DN_DISABLE && smp->hop_cnt)
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- break;
- case IB_PORT_ARMED:
- ret = set_link_state(ppd, HLS_UP_ARMED);
- if ((ret == 0) && (suppress_idle_sma == 0))
- send_idle_sma(dd, SMA_IDLE_ARM);
- break;
- case IB_PORT_ACTIVE:
- if (ppd->neighbor_normal) {
- ret = set_link_state(ppd, HLS_UP_ACTIVE);
- if (ret == 0)
- send_idle_sma(dd, SMA_IDLE_ACTIVE);
- } else {
- pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
- smp->status |= IB_SMP_INVALID_FIELD;
- }
- break;
- default:
- pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
- logical_state);
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- return 0;
-}
-
-/**
- * subn_set_opa_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- */
-static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct opa_port_info *pi = (struct opa_port_info *)data;
- struct ib_event event;
- struct hfi1_devdata *dd;
- struct hfi1_pportdata *ppd;
- struct hfi1_ibport *ibp;
- u8 clientrereg;
- unsigned long flags;
- u32 smlid, opa_lid; /* tmp vars to hold LID values */
- u16 lid;
- u8 ls_old, ls_new, ps_new;
- u8 vls;
- u8 msl;
- u8 crc_enabled;
- u16 lse, lwe, mtu;
- u32 num_ports = OPA_AM_NPORT(am);
- u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
- int ret, i, invalid = 0, call_set_mtu = 0;
- int call_link_downgrade_policy = 0;
-
- if (num_ports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- opa_lid = be32_to_cpu(pi->lid);
- if (opa_lid & 0xFFFF0000) {
- pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
- smp->status |= IB_SMP_INVALID_FIELD;
- goto get_only;
- }
-
- lid = (u16)(opa_lid & 0x0000FFFF);
-
- smlid = be32_to_cpu(pi->sm_lid);
- if (smlid & 0xFFFF0000) {
- pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
- smp->status |= IB_SMP_INVALID_FIELD;
- goto get_only;
- }
- smlid &= 0x0000FFFF;
-
- clientrereg = (pi->clientrereg_subnettimeout &
- OPA_PI_MASK_CLIENT_REREGISTER);
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hw from 0 */
- ppd = dd->pport + (port - 1);
- ibp = &ppd->ibport_data;
- event.device = ibdev;
- event.element.port_num = port;
-
- ls_old = driver_lstate(ppd);
-
- ibp->rvp.mkey = pi->mkey;
- ibp->rvp.gid_prefix = pi->subnet_prefix;
- ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
-
- /* Must be a valid unicast LID address. */
- if ((lid == 0 && ls_old > IB_PORT_INIT) ||
- lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- smp->status |= IB_SMP_INVALID_FIELD;
- pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
- lid);
- } else if (ppd->lid != lid ||
- ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
- if (ppd->lid != lid)
- hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
- if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
- hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
- hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
- event.event = IB_EVENT_LID_CHANGE;
- ib_dispatch_event(&event);
- }
-
- msl = pi->smsl & OPA_PI_MASK_SMSL;
- if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
- ppd->linkinit_reason =
- (pi->partenforce_filterraw &
- OPA_PI_MASK_LINKINIT_REASON);
- /* enable/disable SW pkey checking as per FM control */
- if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_IN)
- ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
- else
- ppd->part_enforce &= ~HFI1_PART_ENFORCE_IN;
-
- if (pi->partenforce_filterraw & OPA_PI_MASK_PARTITION_ENFORCE_OUT)
- ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
- else
- ppd->part_enforce &= ~HFI1_PART_ENFORCE_OUT;
-
- /* Must be a valid unicast LID address. */
- if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
- smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- smp->status |= IB_SMP_INVALID_FIELD;
- pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
- } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
- pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (ibp->rvp.sm_ah) {
- if (smlid != ibp->rvp.sm_lid)
- ibp->rvp.sm_ah->attr.dlid = smlid;
- if (msl != ibp->rvp.sm_sl)
- ibp->rvp.sm_ah->attr.sl = msl;
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
- if (smlid != ibp->rvp.sm_lid)
- ibp->rvp.sm_lid = smlid;
- if (msl != ibp->rvp.sm_sl)
- ibp->rvp.sm_sl = msl;
- event.event = IB_EVENT_SM_CHANGE;
- ib_dispatch_event(&event);
- }
-
- if (pi->link_down_reason == 0) {
- ppd->local_link_down_reason.sma = 0;
- ppd->local_link_down_reason.latest = 0;
- }
-
- if (pi->neigh_link_down_reason == 0) {
- ppd->neigh_link_down_reason.sma = 0;
- ppd->neigh_link_down_reason.latest = 0;
- }
-
- ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
- ppd->sa_qp = be32_to_cpu(pi->sa_qp);
-
- ppd->port_error_action = be32_to_cpu(pi->port_error_action);
- lwe = be16_to_cpu(pi->link_width.enabled);
- if (lwe) {
- if (lwe == OPA_LINK_WIDTH_RESET ||
- lwe == OPA_LINK_WIDTH_RESET_OLD)
- set_link_width_enabled(ppd, ppd->link_width_supported);
- else if ((lwe & ~ppd->link_width_supported) == 0)
- set_link_width_enabled(ppd, lwe);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
- }
- lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
- /* LWD.E is always applied - 0 means "disabled" */
- if (lwe == OPA_LINK_WIDTH_RESET ||
- lwe == OPA_LINK_WIDTH_RESET_OLD) {
- set_link_width_downgrade_enabled(ppd,
- ppd->
- link_width_downgrade_supported
- );
- } else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
- /* only set and apply if something changed */
- if (lwe != ppd->link_width_downgrade_enabled) {
- set_link_width_downgrade_enabled(ppd, lwe);
- call_link_downgrade_policy = 1;
- }
- } else {
- smp->status |= IB_SMP_INVALID_FIELD;
- }
- lse = be16_to_cpu(pi->link_speed.enabled);
- if (lse) {
- if (lse & be16_to_cpu(pi->link_speed.supported))
- set_link_speed_enabled(ppd, lse);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- ibp->rvp.mkeyprot =
- (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
- ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
- ibp->rvp.vl_high_limit);
-
- if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
- ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
- for (i = 0; i < ppd->vls_supported; i++) {
- if ((i % 2) == 0)
- mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
- 4) & 0xF);
- else
- mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
- 0xF);
- if (mtu == 0xffff) {
- pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
- mtu,
- (pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
- smp->status |= IB_SMP_INVALID_FIELD;
- mtu = hfi1_max_mtu; /* use a valid MTU */
- }
- if (dd->vld[i].mtu != mtu) {
- dd_dev_info(dd,
- "MTU change on vl %d from %d to %d\n",
- i, dd->vld[i].mtu, mtu);
- dd->vld[i].mtu = mtu;
- call_set_mtu++;
- }
- }
- /* As per OPAV1 spec: VL15 must support and be configured
- * for operation with a 2048 or larger MTU.
- */
- mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
- if (mtu < 2048 || mtu == 0xffff)
- mtu = 2048;
- if (dd->vld[15].mtu != mtu) {
- dd_dev_info(dd,
- "MTU change on vl 15 from %d to %d\n",
- dd->vld[15].mtu, mtu);
- dd->vld[15].mtu = mtu;
- call_set_mtu++;
- }
- if (call_set_mtu)
- set_mtu(ppd);
-
- /* Set operational VLs */
- vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
- if (vls) {
- if (vls > ppd->vls_supported) {
- pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
- pi->operational_vls);
- smp->status |= IB_SMP_INVALID_FIELD;
- } else {
- if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
- vls) == -EINVAL)
- smp->status |= IB_SMP_INVALID_FIELD;
- }
- }
-
- if (pi->mkey_violations == 0)
- ibp->rvp.mkey_violations = 0;
-
- if (pi->pkey_violations == 0)
- ibp->rvp.pkey_violations = 0;
-
- if (pi->qkey_violations == 0)
- ibp->rvp.qkey_violations = 0;
-
- ibp->rvp.subnet_timeout =
- pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
-
- crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
- crc_enabled >>= 4;
- crc_enabled &= 0xf;
-
- if (crc_enabled != 0)
- ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
-
- ppd->is_active_optimize_enabled =
- !!(be16_to_cpu(pi->port_mode)
- & OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
-
- ls_new = pi->port_states.portphysstate_portstate &
- OPA_PI_MASK_PORT_STATE;
- ps_new = (pi->port_states.portphysstate_portstate &
- OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
-
- if (ls_old == IB_PORT_INIT) {
- if (start_of_sm_config) {
- if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
- ppd->is_sm_config_started = 1;
- } else if (ls_new == IB_PORT_ARMED) {
- if (ppd->is_sm_config_started == 0)
- invalid = 1;
- }
- }
-
- /* Handle CLIENT_REREGISTER event b/c SM asked us for it */
- if (clientrereg) {
- event.event = IB_EVENT_CLIENT_REREGISTER;
- ib_dispatch_event(&event);
- }
-
- /*
- * Do the port state change now that the other link parameters
- * have been set.
- * Changing the port physical state only makes sense if the link
- * is down or is being set to down.
- */
-
- ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
- if (ret)
- return ret;
-
- ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
-
- /* restore re-reg bit per o14-12.2.1 */
- pi->clientrereg_subnettimeout |= clientrereg;
-
- /*
- * Apply the new link downgrade policy. This may result in a link
- * bounce. Do this after everything else so things are settled.
- * Possible problem: if setting the port state above fails, then
- * the policy change is not applied.
- */
- if (call_link_downgrade_policy)
- apply_link_downgrade_policy(ppd, 0);
-
- return ret;
-
-get_only:
- return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
-}
-
-/**
- * set_pkeys - set the PKEY table for ctxt 0
- * @dd: the hfi1_ib device
- * @port: the IB port number
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
-{
- struct hfi1_pportdata *ppd;
- int i;
- int changed = 0;
- int update_includes_mgmt_partition = 0;
-
- /*
- * IB port one/two always maps to context zero/one,
- * always a kernel context, no locking needed
- * If we get here with ppd setup, no need to check
- * that rcd is valid.
- */
- ppd = dd->pport + (port - 1);
- /*
- * If the update does not include the management pkey, don't do it.
- */
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (pkeys[i] == LIM_MGMT_P_KEY) {
- update_includes_mgmt_partition = 1;
- break;
- }
- }
-
- if (!update_includes_mgmt_partition)
- return 1;
-
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- u16 key = pkeys[i];
- u16 okey = ppd->pkeys[i];
-
- if (key == okey)
- continue;
- /*
- * The SM gives us the complete PKey table. We have
- * to ensure that we put the PKeys in the matching
- * slots.
- */
- ppd->pkeys[i] = key;
- changed = 1;
- }
-
- if (changed) {
- struct ib_event event;
-
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
- }
- return 0;
-}
-
-static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- u32 n_blocks_sent = OPA_AM_NBLK(am);
- u32 start_block = am & 0x7ff;
- u16 *p = (u16 *)data;
- __be16 *q = (__be16 *)data;
- int i;
- u16 n_blocks_avail;
- unsigned npkeys = hfi1_get_npkeys(dd);
-
- if (n_blocks_sent == 0) {
- pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
- port, start_block, n_blocks_sent);
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
-
- if (start_block + n_blocks_sent > n_blocks_avail ||
- n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
- pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
- start_block, n_blocks_sent, n_blocks_avail,
- OPA_NUM_PKEY_BLOCKS_PER_SMP);
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
- p[i] = be16_to_cpu(q[i]);
-
- if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
-}
-
-static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
-{
- u64 *val = data;
-
- *val++ = read_csr(dd, SEND_SC2VLT0);
- *val++ = read_csr(dd, SEND_SC2VLT1);
- *val++ = read_csr(dd, SEND_SC2VLT2);
- *val++ = read_csr(dd, SEND_SC2VLT3);
- return 0;
-}
-
-#define ILLEGAL_VL 12
-/*
- * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
- * for SC15, which must map to VL15). If we don't remap things this
- * way it is possible for VL15 counters to increment when we try to
- * send on a SC which is mapped to an invalid VL.
- */
-static void filter_sc2vlt(void *data)
-{
- int i;
- u8 *pd = data;
-
- for (i = 0; i < OPA_MAX_SCS; i++) {
- if (i == 15)
- continue;
- if ((pd[i] & 0x1f) == 0xf)
- pd[i] = ILLEGAL_VL;
- }
-}
-
-static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
-{
- u64 *val = data;
-
- filter_sc2vlt(data);
-
- write_csr(dd, SEND_SC2VLT0, *val++);
- write_csr(dd, SEND_SC2VLT1, *val++);
- write_csr(dd, SEND_SC2VLT2, *val++);
- write_csr(dd, SEND_SC2VLT3, *val++);
- write_seqlock_irq(&dd->sc2vl_lock);
- memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
- write_sequnlock_irq(&dd->sc2vl_lock);
- return 0;
-}
-
-static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- u8 *p = data;
- size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
- unsigned i;
-
- if (am) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
- *p++ = ibp->sl_to_sc[i];
-
- if (resp_len)
- *resp_len += size;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- u8 *p = data;
- int i;
- u8 sc;
-
- if (am) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
- sc = *p++;
- if (ibp->sl_to_sc[i] != sc) {
- ibp->sl_to_sc[i] = sc;
-
- /* Put all stale qps into error state */
- hfi1_error_port_qps(ibp, i);
- }
- }
-
- return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
-}
-
-static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- u8 *p = data;
- size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
- unsigned i;
-
- if (am) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
- *p++ = ibp->sc_to_sl[i];
-
- if (resp_len)
- *resp_len += size;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- u8 *p = data;
- int i;
-
- if (am) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
- ibp->sc_to_sl[i] = *p++;
-
- return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
-}
-
-static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- u32 n_blocks = OPA_AM_NBLK(am);
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- void *vp = (void *)data;
- size_t size = 4 * sizeof(u64);
-
- if (n_blocks != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- get_sc2vlt_tables(dd, vp);
-
- if (resp_len)
- *resp_len += size;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- u32 n_blocks = OPA_AM_NBLK(am);
- int async_update = OPA_AM_ASYNC(am);
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- void *vp = (void *)data;
- struct hfi1_pportdata *ppd;
- int lstate;
-
- if (n_blocks != 1 || async_update) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- /* IB numbers ports from 1, hw from 0 */
- ppd = dd->pport + (port - 1);
- lstate = driver_lstate(ppd);
- /*
- * it's known that async_update is 0 by this point, but include
- * the explicit check for clarity
- */
- if (!async_update &&
- (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- set_sc2vlt_tables(dd, vp);
-
- return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
-}
-
-static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- u32 n_blocks = OPA_AM_NPORT(am);
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_pportdata *ppd;
- void *vp = (void *)data;
- int size;
-
- if (n_blocks != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- ppd = dd->pport + (port - 1);
-
- size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
-
- if (resp_len)
- *resp_len += size;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- u32 n_blocks = OPA_AM_NPORT(am);
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_pportdata *ppd;
- void *vp = (void *)data;
- int lstate;
-
- if (n_blocks != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- /* IB numbers ports from 1, hw from 0 */
- ppd = dd->pport + (port - 1);
- lstate = driver_lstate(ppd);
- if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- ppd = dd->pport + (port - 1);
-
- fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
-
- return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
-}
-
-static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- u32 nports = OPA_AM_NPORT(am);
- u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
- u32 lstate;
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
- struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
-
- if (nports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- ibp = to_iport(ibdev, port);
- ppd = ppd_from_ibp(ibp);
-
- lstate = driver_lstate(ppd);
-
- if (start_of_sm_config && (lstate == IB_PORT_INIT))
- ppd->is_sm_config_started = 1;
-
-#if PI_LED_ENABLE_SUP
- psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
- psi->port_states.ledenable_offlinereason |=
- ppd->is_sm_config_started << 5;
- psi->port_states.ledenable_offlinereason |=
- ppd->offline_disabled_reason;
-#else
- psi->port_states.offline_reason = ppd->neighbor_normal << 4;
- psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- psi->port_states.offline_reason |= ppd->offline_disabled_reason;
-#endif /* PI_LED_ENABLE_SUP */
-
- psi->port_states.portphysstate_portstate =
- (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
- psi->link_width_downgrade_tx_active =
- cpu_to_be16(ppd->link_width_downgrade_tx_active);
- psi->link_width_downgrade_rx_active =
- cpu_to_be16(ppd->link_width_downgrade_rx_active);
- if (resp_len)
- *resp_len += sizeof(struct opa_port_state_info);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- u32 nports = OPA_AM_NPORT(am);
- u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
- u32 ls_old;
- u8 ls_new, ps_new;
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
- struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
- int ret, invalid = 0;
-
- if (nports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- ibp = to_iport(ibdev, port);
- ppd = ppd_from_ibp(ibp);
-
- ls_old = driver_lstate(ppd);
-
- ls_new = port_states_to_logical_state(&psi->port_states);
- ps_new = port_states_to_phys_state(&psi->port_states);
-
- if (ls_old == IB_PORT_INIT) {
- if (start_of_sm_config) {
- if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
- ppd->is_sm_config_started = 1;
- } else if (ls_new == IB_PORT_ARMED) {
- if (ppd->is_sm_config_started == 0)
- invalid = 1;
- }
- }
-
- ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
- if (ret)
- return ret;
-
- if (invalid)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
-}
-
-static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- u32 addr = OPA_AM_CI_ADDR(am);
- u32 len = OPA_AM_CI_LEN(am) + 1;
- int ret;
-
-#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
-#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
-#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
-
- /*
- * check that addr is within spec, and
- * addr and (addr + len - 1) are on the same "page"
- */
- if (addr >= 4096 ||
- (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- ret = get_cable_info(dd, port, addr, len, data);
-
- if (ret == -ENODEV) {
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- /* The address range for the CableInfo SMA query is wider than the
- * memory available on the QSFP cable. We want to return a valid
- * response, albeit zeroed out, for address ranges beyond available
- * memory but that are within the CableInfo query spec
- */
- if (ret < 0 && ret != -ERANGE) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- if (resp_len)
- *resp_len += len;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
-{
- u32 num_ports = OPA_AM_NPORT(am);
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_pportdata *ppd;
- struct buffer_control *p = (struct buffer_control *)data;
- int size;
-
- if (num_ports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- ppd = dd->pport + (port - 1);
- size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
- trace_bct_get(dd, p);
- if (resp_len)
- *resp_len += size;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
-{
- u32 num_ports = OPA_AM_NPORT(am);
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_pportdata *ppd;
- struct buffer_control *p = (struct buffer_control *)data;
-
- if (num_ports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
- ppd = dd->pport + (port - 1);
- trace_bct_set(dd, p);
- if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
-}
-
-static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
- u32 num_ports = OPA_AM_NPORT(am);
- u8 section = (am & 0x00ff0000) >> 16;
- u8 *p = data;
- int size = 0;
-
- if (num_ports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- switch (section) {
- case OPA_VLARB_LOW_ELEMENTS:
- size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
- break;
- case OPA_VLARB_HIGH_ELEMENTS:
- size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
- break;
- case OPA_VLARB_PREEMPT_ELEMENTS:
- size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
- break;
- case OPA_VLARB_PREEMPT_MATRIX:
- size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
- break;
- default:
- pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
- be32_to_cpu(smp->attr_mod));
- smp->status |= IB_SMP_INVALID_FIELD;
- break;
- }
-
- if (size > 0 && resp_len)
- *resp_len += size;
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
- u32 num_ports = OPA_AM_NPORT(am);
- u8 section = (am & 0x00ff0000) >> 16;
- u8 *p = data;
-
- if (num_ports != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- switch (section) {
- case OPA_VLARB_LOW_ELEMENTS:
- (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
- break;
- case OPA_VLARB_HIGH_ELEMENTS:
- (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
- break;
- /*
- * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
- * can be changed from the default values
- */
- case OPA_VLARB_PREEMPT_ELEMENTS:
- /* FALLTHROUGH */
- case OPA_VLARB_PREEMPT_MATRIX:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- break;
- default:
- pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
- be32_to_cpu(smp->attr_mod));
- smp->status |= IB_SMP_INVALID_FIELD;
- break;
- }
-
- return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
-}
-
-struct opa_pma_mad {
- struct ib_mad_hdr mad_hdr;
- u8 data[2024];
-} __packed;
-
-struct opa_class_port_info {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- __be32 cap_mask2_resp_time;
-
- u8 redirect_gid[16];
- __be32 redirect_tc_fl;
- __be32 redirect_lid;
- __be32 redirect_sl_qp;
- __be32 redirect_qkey;
-
- u8 trap_gid[16];
- __be32 trap_tc_fl;
- __be32 trap_lid;
- __be32 trap_hl_qp;
- __be32 trap_qkey;
-
- __be16 trap_pkey;
- __be16 redirect_pkey;
-
- u8 trap_sl_rsvd;
- u8 reserved[3];
-} __packed;
-
-struct opa_port_status_req {
- __u8 port_num;
- __u8 reserved[3];
- __be32 vl_select_mask;
-};
-
-#define VL_MASK_ALL 0x000080ff
-
-struct opa_port_status_rsp {
- __u8 port_num;
- __u8 reserved[3];
- __be32 vl_select_mask;
-
- /* Data counters */
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_pkts;
- __be64 port_rcv_pkts;
- __be64 port_multicast_xmit_pkts;
- __be64 port_multicast_rcv_pkts;
- __be64 port_xmit_wait;
- __be64 sw_port_congestion;
- __be64 port_rcv_fecn;
- __be64 port_rcv_becn;
- __be64 port_xmit_time_cong;
- __be64 port_xmit_wasted_bw;
- __be64 port_xmit_wait_data;
- __be64 port_rcv_bubble;
- __be64 port_mark_fecn;
- /* Error counters */
- __be64 port_rcv_constraint_errors;
- __be64 port_rcv_switch_relay_errors;
- __be64 port_xmit_discards;
- __be64 port_xmit_constraint_errors;
- __be64 port_rcv_remote_physical_errors;
- __be64 local_link_integrity_errors;
- __be64 port_rcv_errors;
- __be64 excessive_buffer_overruns;
- __be64 fm_config_errors;
- __be32 link_error_recovery;
- __be32 link_downed;
- u8 uncorrectable_errors;
-
- u8 link_quality_indicator; /* 5res, 3bit */
- u8 res2[6];
- struct _vls_pctrs {
- /* per-VL Data counters */
- __be64 port_vl_xmit_data;
- __be64 port_vl_rcv_data;
- __be64 port_vl_xmit_pkts;
- __be64 port_vl_rcv_pkts;
- __be64 port_vl_xmit_wait;
- __be64 sw_port_vl_congestion;
- __be64 port_vl_rcv_fecn;
- __be64 port_vl_rcv_becn;
- __be64 port_xmit_time_cong;
- __be64 port_vl_xmit_wasted_bw;
- __be64 port_vl_xmit_wait_data;
- __be64 port_vl_rcv_bubble;
- __be64 port_vl_mark_fecn;
- __be64 port_vl_xmit_discards;
- } vls[0]; /* real array size defined by # bits set in vl_select_mask */
-};
-
-enum counter_selects {
- CS_PORT_XMIT_DATA = (1 << 31),
- CS_PORT_RCV_DATA = (1 << 30),
- CS_PORT_XMIT_PKTS = (1 << 29),
- CS_PORT_RCV_PKTS = (1 << 28),
- CS_PORT_MCAST_XMIT_PKTS = (1 << 27),
- CS_PORT_MCAST_RCV_PKTS = (1 << 26),
- CS_PORT_XMIT_WAIT = (1 << 25),
- CS_SW_PORT_CONGESTION = (1 << 24),
- CS_PORT_RCV_FECN = (1 << 23),
- CS_PORT_RCV_BECN = (1 << 22),
- CS_PORT_XMIT_TIME_CONG = (1 << 21),
- CS_PORT_XMIT_WASTED_BW = (1 << 20),
- CS_PORT_XMIT_WAIT_DATA = (1 << 19),
- CS_PORT_RCV_BUBBLE = (1 << 18),
- CS_PORT_MARK_FECN = (1 << 17),
- CS_PORT_RCV_CONSTRAINT_ERRORS = (1 << 16),
- CS_PORT_RCV_SWITCH_RELAY_ERRORS = (1 << 15),
- CS_PORT_XMIT_DISCARDS = (1 << 14),
- CS_PORT_XMIT_CONSTRAINT_ERRORS = (1 << 13),
- CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS = (1 << 12),
- CS_LOCAL_LINK_INTEGRITY_ERRORS = (1 << 11),
- CS_PORT_RCV_ERRORS = (1 << 10),
- CS_EXCESSIVE_BUFFER_OVERRUNS = (1 << 9),
- CS_FM_CONFIG_ERRORS = (1 << 8),
- CS_LINK_ERROR_RECOVERY = (1 << 7),
- CS_LINK_DOWNED = (1 << 6),
- CS_UNCORRECTABLE_ERRORS = (1 << 5),
-};
-
-struct opa_clear_port_status {
- __be64 port_select_mask[4];
- __be32 counter_select_mask;
-};
-
-struct opa_aggregate {
- __be16 attr_id;
- __be16 err_reqlength; /* 1 bit, 8 res, 7 bit */
- __be32 attr_mod;
- u8 data[0];
-};
-
-#define MSK_LLI 0x000000f0
-#define MSK_LLI_SFT 4
-#define MSK_LER 0x0000000f
-#define MSK_LER_SFT 0
-#define ADD_LLI 8
-#define ADD_LER 2
-
-/* Request contains first three fields, response contains those plus the rest */
-struct opa_port_data_counters_msg {
- __be64 port_select_mask[4];
- __be32 vl_select_mask;
- __be32 resolution;
-
- /* Response fields follow */
- struct _port_dctrs {
- u8 port_number;
- u8 reserved2[3];
- __be32 link_quality_indicator; /* 29res, 3bit */
-
- /* Data counters */
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_pkts;
- __be64 port_rcv_pkts;
- __be64 port_multicast_xmit_pkts;
- __be64 port_multicast_rcv_pkts;
- __be64 port_xmit_wait;
- __be64 sw_port_congestion;
- __be64 port_rcv_fecn;
- __be64 port_rcv_becn;
- __be64 port_xmit_time_cong;
- __be64 port_xmit_wasted_bw;
- __be64 port_xmit_wait_data;
- __be64 port_rcv_bubble;
- __be64 port_mark_fecn;
-
- __be64 port_error_counter_summary;
- /* Sum of error counts/port */
-
- struct _vls_dctrs {
- /* per-VL Data counters */
- __be64 port_vl_xmit_data;
- __be64 port_vl_rcv_data;
- __be64 port_vl_xmit_pkts;
- __be64 port_vl_rcv_pkts;
- __be64 port_vl_xmit_wait;
- __be64 sw_port_vl_congestion;
- __be64 port_vl_rcv_fecn;
- __be64 port_vl_rcv_becn;
- __be64 port_xmit_time_cong;
- __be64 port_vl_xmit_wasted_bw;
- __be64 port_vl_xmit_wait_data;
- __be64 port_vl_rcv_bubble;
- __be64 port_vl_mark_fecn;
- } vls[0];
- /* array size defined by #bits set in vl_select_mask*/
- } port[1]; /* array size defined by #ports in attribute modifier */
-};
-
-struct opa_port_error_counters64_msg {
- /*
- * Request contains first two fields, response contains the
- * whole magilla
- */
- __be64 port_select_mask[4];
- __be32 vl_select_mask;
-
- /* Response-only fields follow */
- __be32 reserved1;
- struct _port_ectrs {
- u8 port_number;
- u8 reserved2[7];
- __be64 port_rcv_constraint_errors;
- __be64 port_rcv_switch_relay_errors;
- __be64 port_xmit_discards;
- __be64 port_xmit_constraint_errors;
- __be64 port_rcv_remote_physical_errors;
- __be64 local_link_integrity_errors;
- __be64 port_rcv_errors;
- __be64 excessive_buffer_overruns;
- __be64 fm_config_errors;
- __be32 link_error_recovery;
- __be32 link_downed;
- u8 uncorrectable_errors;
- u8 reserved3[7];
- struct _vls_ectrs {
- __be64 port_vl_xmit_discards;
- } vls[0];
- /* array size defined by #bits set in vl_select_mask */
- } port[1]; /* array size defined by #ports in attribute modifier */
-};
-
-struct opa_port_error_info_msg {
- __be64 port_select_mask[4];
- __be32 error_info_select_mask;
- __be32 reserved1;
- struct _port_ei {
- u8 port_number;
- u8 reserved2[7];
-
- /* PortRcvErrorInfo */
- struct {
- u8 status_and_code;
- union {
- u8 raw[17];
- struct {
- /* EI1to12 format */
- u8 packet_flit1[8];
- u8 packet_flit2[8];
- u8 remaining_flit_bits12;
- } ei1to12;
- struct {
- u8 packet_bytes[8];
- u8 remaining_flit_bits;
- } ei13;
- } ei;
- u8 reserved3[6];
- } __packed port_rcv_ei;
-
- /* ExcessiveBufferOverrunInfo */
- struct {
- u8 status_and_sc;
- u8 reserved4[7];
- } __packed excessive_buffer_overrun_ei;
-
- /* PortXmitConstraintErrorInfo */
- struct {
- u8 status;
- u8 reserved5;
- __be16 pkey;
- __be32 slid;
- } __packed port_xmit_constraint_ei;
-
- /* PortRcvConstraintErrorInfo */
- struct {
- u8 status;
- u8 reserved6;
- __be16 pkey;
- __be32 slid;
- } __packed port_rcv_constraint_ei;
-
- /* PortRcvSwitchRelayErrorInfo */
- struct {
- u8 status_and_code;
- u8 reserved7[3];
- __u32 error_info;
- } __packed port_rcv_switch_relay_ei;
-
- /* UncorrectableErrorInfo */
- struct {
- u8 status_and_code;
- u8 reserved8;
- } __packed uncorrectable_ei;
-
- /* FMConfigErrorInfo */
- struct {
- u8 status_and_code;
- u8 error_info;
- } __packed fm_config_ei;
- __u32 reserved9;
- } port[1]; /* actual array size defined by #ports in attr modifier */
-};
-
-/* opa_port_error_info_msg error_info_select_mask bit definitions */
-enum error_info_selects {
- ES_PORT_RCV_ERROR_INFO = (1 << 31),
- ES_EXCESSIVE_BUFFER_OVERRUN_INFO = (1 << 30),
- ES_PORT_XMIT_CONSTRAINT_ERROR_INFO = (1 << 29),
- ES_PORT_RCV_CONSTRAINT_ERROR_INFO = (1 << 28),
- ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO = (1 << 27),
- ES_UNCORRECTABLE_ERROR_INFO = (1 << 26),
- ES_FM_CONFIG_ERROR_INFO = (1 << 25)
-};
-
-static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u32 *resp_len)
-{
- struct opa_class_port_info *p =
- (struct opa_class_port_info *)pmp->data;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- if (pmp->mad_hdr.attr_mod != 0)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- p->base_version = OPA_MGMT_BASE_VERSION;
- p->class_version = OPA_SMI_CLASS_VERSION;
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
- */
- p->cap_mask2_resp_time = cpu_to_be32(18);
-
- if (resp_len)
- *resp_len += sizeof(*p);
-
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static void a0_portstatus(struct hfi1_pportdata *ppd,
- struct opa_port_status_rsp *rsp, u32 vl_select_mask)
-{
- if (!is_bx(ppd->dd)) {
- unsigned long vl;
- u64 sum_vl_xmit_wait = 0;
- u32 vl_all_mask = VL_MASK_ALL;
-
- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
- 8 * sizeof(vl_all_mask)) {
- u64 tmp = sum_vl_xmit_wait +
- read_port_cntr(ppd, C_TX_WAIT_VL,
- idx_from_vl(vl));
- if (tmp < sum_vl_xmit_wait) {
- /* we wrapped */
- sum_vl_xmit_wait = (u64)~0;
- break;
- }
- sum_vl_xmit_wait = tmp;
- }
- if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
- rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
- }
-}
-
-static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
- struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- struct opa_port_status_req *req =
- (struct opa_port_status_req *)pmp->data;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct opa_port_status_rsp *rsp;
- u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
- unsigned long vl;
- size_t response_data_size;
- u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
- u8 port_num = req->port_num;
- u8 num_vls = hweight32(vl_select_mask);
- struct _vls_pctrs *vlinfo;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- int vfi;
- u64 tmp, tmp2;
-
- response_data_size = sizeof(struct opa_port_status_rsp) +
- num_vls * sizeof(struct _vls_pctrs);
- if (response_data_size > sizeof(pmp->data)) {
- pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- if (nports != 1 || (port_num && port_num != port) ||
- num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- rsp = (struct opa_port_status_rsp *)pmp->data;
- if (port_num)
- rsp->port_num = port_num;
- else
- rsp->port_num = port;
-
- rsp->port_rcv_constraint_errors =
- cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
- CNTR_INVALID_VL));
-
- hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
-
- rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
- rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
- CNTR_INVALID_VL));
- rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
- CNTR_INVALID_VL));
- rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
- CNTR_INVALID_VL));
- rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
- CNTR_INVALID_VL));
- rsp->port_multicast_xmit_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
- CNTR_INVALID_VL));
- rsp->port_multicast_rcv_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
- CNTR_INVALID_VL));
- rsp->port_xmit_wait =
- cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
- rsp->port_rcv_fecn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
- rsp->port_rcv_becn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
- rsp->port_xmit_discards =
- cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
- CNTR_INVALID_VL));
- rsp->port_xmit_constraint_errors =
- cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
- CNTR_INVALID_VL));
- rsp->port_rcv_remote_physical_errors =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
- CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- if (tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->local_link_integrity_errors = cpu_to_be64(~0);
- } else {
- rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
- }
- tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
- CNTR_INVALID_VL);
- if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->link_error_recovery = cpu_to_be32(~0);
- } else {
- rsp->link_error_recovery = cpu_to_be32(tmp2);
- }
- rsp->port_rcv_errors =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
- rsp->excessive_buffer_overruns =
- cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
- rsp->fm_config_errors =
- cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
- CNTR_INVALID_VL));
- rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
- CNTR_INVALID_VL));
-
- /* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
- tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
- rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
-
- vlinfo = &rsp->vls[0];
- vfi = 0;
- /* The vl_select_mask has been checked above, and we know
- * that it contains only entries which represent valid VLs.
- * So in the for_each_set_bit() loop below, we don't need
- * any additional checks for vl.
- */
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(vl_select_mask)) {
- memset(vlinfo, 0, sizeof(*vlinfo));
-
- tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
- rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
-
- rsp->vls[vfi].port_vl_rcv_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_xmit_data =
- cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_xmit_pkts =
- cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_xmit_wait =
- cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_rcv_fecn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_rcv_becn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
- idx_from_vl(vl)));
-
- vlinfo++;
- vfi++;
- }
-
- a0_portstatus(ppd, rsp, vl_select_mask);
-
- if (resp_len)
- *resp_len += response_data_size;
-
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
- u8 res_lli, u8 res_ler)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u64 error_counter_summary = 0, tmp;
-
- error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
- CNTR_INVALID_VL);
- /* port_rcv_switch_relay_errors is 0 for HFIs */
- error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
- CNTR_INVALID_VL);
- error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
- CNTR_INVALID_VL);
- error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
- CNTR_INVALID_VL);
- /* local link integrity must be right-shifted by the lli resolution */
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- error_counter_summary += (tmp >> res_lli);
- /* link error recovery must b right-shifted by the ler resolution */
- tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
- tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
- error_counter_summary += (tmp >> res_ler);
- error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
- CNTR_INVALID_VL);
- error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
- error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
- CNTR_INVALID_VL);
- /* ppd->link_downed is a 32-bit value */
- error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
- CNTR_INVALID_VL);
- tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
- /* this is an 8-bit quantity */
- error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
-
- return error_counter_summary;
-}
-
-static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
- u32 vl_select_mask)
-{
- if (!is_bx(ppd->dd)) {
- unsigned long vl;
- u64 sum_vl_xmit_wait = 0;
- u32 vl_all_mask = VL_MASK_ALL;
-
- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
- 8 * sizeof(vl_all_mask)) {
- u64 tmp = sum_vl_xmit_wait +
- read_port_cntr(ppd, C_TX_WAIT_VL,
- idx_from_vl(vl));
- if (tmp < sum_vl_xmit_wait) {
- /* we wrapped */
- sum_vl_xmit_wait = (u64)~0;
- break;
- }
- sum_vl_xmit_wait = tmp;
- }
- if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
- rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
- }
-}
-
-static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
- struct _port_dctrs *rsp)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
-
- rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
- CNTR_INVALID_VL));
- rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
- CNTR_INVALID_VL));
- rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
- CNTR_INVALID_VL));
- rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
- CNTR_INVALID_VL));
- rsp->port_multicast_xmit_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
- CNTR_INVALID_VL));
- rsp->port_multicast_rcv_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
- CNTR_INVALID_VL));
-}
-
-static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
- struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- struct opa_port_data_counters_msg *req =
- (struct opa_port_data_counters_msg *)pmp->data;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct _port_dctrs *rsp;
- struct _vls_dctrs *vlinfo;
- size_t response_data_size;
- u32 num_ports;
- u8 num_pslm;
- u8 lq, num_vls;
- u8 res_lli, res_ler;
- u64 port_mask;
- unsigned long port_num;
- unsigned long vl;
- u32 vl_select_mask;
- int vfi;
-
- num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
- num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
- num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
- vl_select_mask = be32_to_cpu(req->vl_select_mask);
- res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
- res_lli = res_lli ? res_lli + ADD_LLI : 0;
- res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
- res_ler = res_ler ? res_ler + ADD_LER : 0;
-
- if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- /* Sanity check */
- response_data_size = sizeof(struct opa_port_data_counters_msg) +
- num_vls * sizeof(struct _vls_dctrs);
-
- if (response_data_size > sizeof(pmp->data)) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- /*
- * The bit set in the mask needs to be consistent with the
- * port the request came in on.
- */
- port_mask = be64_to_cpu(req->port_select_mask[3]);
- port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
-
- if ((u8)port_num != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- rsp = &req->port[0];
- memset(rsp, 0, sizeof(*rsp));
-
- rsp->port_number = port;
- /*
- * Note that link_quality_indicator is a 32 bit quantity in
- * 'datacounters' queries (as opposed to 'portinfo' queries,
- * where it's a byte).
- */
- hfi1_read_link_quality(dd, &lq);
- rsp->link_quality_indicator = cpu_to_be32((u32)lq);
- pma_get_opa_port_dctrs(ibdev, rsp);
-
- rsp->port_xmit_wait =
- cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
- rsp->port_rcv_fecn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
- rsp->port_rcv_becn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
- rsp->port_error_counter_summary =
- cpu_to_be64(get_error_counter_summary(ibdev, port,
- res_lli, res_ler));
-
- vlinfo = &rsp->vls[0];
- vfi = 0;
- /* The vl_select_mask has been checked above, and we know
- * that it contains only entries which represent valid VLs.
- * So in the for_each_set_bit() loop below, we don't need
- * any additional checks for vl.
- */
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(req->vl_select_mask)) {
- memset(vlinfo, 0, sizeof(*vlinfo));
-
- rsp->vls[vfi].port_vl_xmit_data =
- cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_rcv_data =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_xmit_pkts =
- cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_rcv_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_xmit_wait =
- cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
- idx_from_vl(vl)));
-
- rsp->vls[vfi].port_vl_rcv_fecn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
- idx_from_vl(vl)));
- rsp->vls[vfi].port_vl_rcv_becn =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
- idx_from_vl(vl)));
-
- /* rsp->port_vl_xmit_time_cong is 0 for HFIs */
- /* rsp->port_vl_xmit_wasted_bw ??? */
- /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
- * does this differ from rsp->vls[vfi].port_vl_xmit_wait
- */
- /*rsp->vls[vfi].port_vl_mark_fecn =
- * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
- * + offset));
- */
- vlinfo++;
- vfi++;
- }
-
- a0_datacounters(ppd, rsp, vl_select_mask);
-
- if (resp_len)
- *resp_len += response_data_size;
-
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
- pmp->data;
- struct _port_dctrs rsp;
-
- if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
-
- memset(&rsp, 0, sizeof(rsp));
- pma_get_opa_port_dctrs(ibdev, &rsp);
-
- p->port_xmit_data = rsp.port_xmit_data;
- p->port_rcv_data = rsp.port_rcv_data;
- p->port_xmit_packets = rsp.port_xmit_pkts;
- p->port_rcv_packets = rsp.port_rcv_pkts;
- p->port_unicast_xmit_packets = 0;
- p->port_unicast_rcv_packets = 0;
- p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
- p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
-
-bail:
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
- struct _port_ectrs *rsp, u8 port)
-{
- u64 tmp, tmp2;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
- CNTR_INVALID_VL);
- if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->link_error_recovery = cpu_to_be32(~0);
- } else {
- rsp->link_error_recovery = cpu_to_be32(tmp2);
- }
-
- rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
- CNTR_INVALID_VL));
- rsp->port_rcv_errors =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
- rsp->port_rcv_remote_physical_errors =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
- CNTR_INVALID_VL));
- rsp->port_rcv_switch_relay_errors = 0;
- rsp->port_xmit_discards =
- cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
- CNTR_INVALID_VL));
- rsp->port_xmit_constraint_errors =
- cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
- CNTR_INVALID_VL));
- rsp->port_rcv_constraint_errors =
- cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
- CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- if (tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->local_link_integrity_errors = cpu_to_be64(~0);
- } else {
- rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
- }
- rsp->excessive_buffer_overruns =
- cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
-}
-
-static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
- struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- size_t response_data_size;
- struct _port_ectrs *rsp;
- u8 port_num;
- struct opa_port_error_counters64_msg *req;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- u32 num_ports;
- u8 num_pslm;
- u8 num_vls;
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
- struct _vls_ectrs *vlinfo;
- unsigned long vl;
- u64 port_mask, tmp;
- u32 vl_select_mask;
- int vfi;
-
- req = (struct opa_port_error_counters64_msg *)pmp->data;
-
- num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
-
- num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
- num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
-
- if (num_ports != 1 || num_ports != num_pslm) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- response_data_size = sizeof(struct opa_port_error_counters64_msg) +
- num_vls * sizeof(struct _vls_ectrs);
-
- if (response_data_size > sizeof(pmp->data)) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
- /*
- * The bit set in the mask needs to be consistent with the
- * port the request came in on.
- */
- port_mask = be64_to_cpu(req->port_select_mask[3]);
- port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
-
- if (port_num != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- rsp = &req->port[0];
-
- ibp = to_iport(ibdev, port_num);
- ppd = ppd_from_ibp(ibp);
-
- memset(rsp, 0, sizeof(*rsp));
- rsp->port_number = port_num;
-
- pma_get_opa_port_ectrs(ibdev, rsp, port_num);
-
- rsp->port_rcv_remote_physical_errors =
- cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
- CNTR_INVALID_VL));
- rsp->fm_config_errors =
- cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
- CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
-
- rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
-
- vlinfo = &rsp->vls[0];
- vfi = 0;
- vl_select_mask = be32_to_cpu(req->vl_select_mask);
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(req->vl_select_mask)) {
- memset(vlinfo, 0, sizeof(*vlinfo));
- /* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
- vlinfo += 1;
- vfi++;
- }
-
- if (resp_len)
- *resp_len += response_data_size;
-
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct _port_ectrs rsp;
- u64 temp_link_overrun_errors;
- u64 temp_64;
- u32 temp_32;
-
- memset(&rsp, 0, sizeof(rsp));
- pma_get_opa_port_ectrs(ibdev, &rsp, port);
-
- if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
-
- p->symbol_error_counter = 0; /* N/A for OPA */
-
- temp_32 = be32_to_cpu(rsp.link_error_recovery);
- if (temp_32 > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter = (u8)temp_32;
-
- temp_32 = be32_to_cpu(rsp.link_downed);
- if (temp_32 > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter = (u8)temp_32;
-
- temp_64 = be64_to_cpu(rsp.port_rcv_errors);
- if (temp_64 > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors = cpu_to_be16((u16)temp_64);
-
- temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
- if (temp_64 > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
-
- temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
- p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
-
- temp_64 = be64_to_cpu(rsp.port_xmit_discards);
- if (temp_64 > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards = cpu_to_be16((u16)temp_64);
-
- temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
- if (temp_64 > 0xFFUL)
- p->port_xmit_constraint_errors = 0xFF;
- else
- p->port_xmit_constraint_errors = (u8)temp_64;
-
- temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
- if (temp_64 > 0xFFUL)
- p->port_rcv_constraint_errors = 0xFFUL;
- else
- p->port_rcv_constraint_errors = (u8)temp_64;
-
- /* LocalLink: 7:4, BufferOverrun: 3:0 */
- temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
- if (temp_64 > 0xFUL)
- temp_64 = 0xFUL;
-
- temp_link_overrun_errors = temp_64 << 4;
-
- temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
- if (temp_64 > 0xFUL)
- temp_64 = 0xFUL;
- temp_link_overrun_errors |= temp_64;
-
- p->link_overrun_errors = (u8)temp_link_overrun_errors;
-
- p->vl15_dropped = 0; /* N/A for OPA */
-
-bail:
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
- struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- size_t response_data_size;
- struct _port_ei *rsp;
- struct opa_port_error_info_msg *req;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- u64 port_mask;
- u32 num_ports;
- u8 port_num;
- u8 num_pslm;
- u64 reg;
-
- req = (struct opa_port_error_info_msg *)pmp->data;
- rsp = &req->port[0];
-
- num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
- num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
-
- memset(rsp, 0, sizeof(*rsp));
-
- if (num_ports != 1 || num_ports != num_pslm) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- /* Sanity check */
- response_data_size = sizeof(struct opa_port_error_info_msg);
-
- if (response_data_size > sizeof(pmp->data)) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- /*
- * The bit set in the mask needs to be consistent with the port
- * the request came in on.
- */
- port_mask = be64_to_cpu(req->port_select_mask[3]);
- port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
-
- if (port_num != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- /* PortRcvErrorInfo */
- rsp->port_rcv_ei.status_and_code =
- dd->err_info_rcvport.status_and_code;
- memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
- &dd->err_info_rcvport.packet_flit1, sizeof(u64));
- memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
- &dd->err_info_rcvport.packet_flit2, sizeof(u64));
-
- /* ExcessiverBufferOverrunInfo */
- reg = read_csr(dd, RCV_ERR_INFO);
- if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
- /*
- * if the RcvExcessBufferOverrun bit is set, save SC of
- * first pkt that encountered an excess buffer overrun
- */
- u8 tmp = (u8)reg;
-
- tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
- tmp <<= 2;
- rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
- /* set the status bit */
- rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
- }
-
- rsp->port_xmit_constraint_ei.status =
- dd->err_info_xmit_constraint.status;
- rsp->port_xmit_constraint_ei.pkey =
- cpu_to_be16(dd->err_info_xmit_constraint.pkey);
- rsp->port_xmit_constraint_ei.slid =
- cpu_to_be32(dd->err_info_xmit_constraint.slid);
-
- rsp->port_rcv_constraint_ei.status =
- dd->err_info_rcv_constraint.status;
- rsp->port_rcv_constraint_ei.pkey =
- cpu_to_be16(dd->err_info_rcv_constraint.pkey);
- rsp->port_rcv_constraint_ei.slid =
- cpu_to_be32(dd->err_info_rcv_constraint.slid);
-
- /* UncorrectableErrorInfo */
- rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
-
- /* FMConfigErrorInfo */
- rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
-
- if (resp_len)
- *resp_len += response_data_size;
-
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
- struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- struct opa_clear_port_status *req =
- (struct opa_clear_port_status *)pmp->data;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
- u64 portn = be64_to_cpu(req->port_select_mask[3]);
- u32 counter_select = be32_to_cpu(req->counter_select_mask);
- u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
- unsigned long vl;
-
- if ((nports != 1) || (portn != 1 << port)) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
- /*
- * only counters returned by pma_get_opa_portstatus() are
- * handled, so when pma_get_opa_portstatus() gets a fix,
- * the corresponding change should be made here as well.
- */
-
- if (counter_select & CS_PORT_XMIT_DATA)
- write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_RCV_DATA)
- write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_XMIT_PKTS)
- write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_RCV_PKTS)
- write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
- write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_MCAST_RCV_PKTS)
- write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_XMIT_WAIT)
- write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
-
- /* ignore cs_sw_portCongestion for HFIs */
-
- if (counter_select & CS_PORT_RCV_FECN)
- write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_RCV_BECN)
- write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
-
- /* ignore cs_port_xmit_time_cong for HFIs */
- /* ignore cs_port_xmit_wasted_bw for now */
- /* ignore cs_port_xmit_wait_data for now */
- if (counter_select & CS_PORT_RCV_BUBBLE)
- write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
-
- /* Only applicable for switch */
- /* if (counter_select & CS_PORT_MARK_FECN)
- * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
- */
-
- if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
- write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
-
- /* ignore cs_port_rcv_switch_relay_errors for HFIs */
- if (counter_select & CS_PORT_XMIT_DISCARDS)
- write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
- write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
- write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
- write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
- write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
- }
-
- if (counter_select & CS_LINK_ERROR_RECOVERY) {
- write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
- write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
- CNTR_INVALID_VL, 0);
- }
-
- if (counter_select & CS_PORT_RCV_ERRORS)
- write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
- write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
- dd->rcv_ovfl_cnt = 0;
- }
-
- if (counter_select & CS_FM_CONFIG_ERRORS)
- write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_LINK_DOWNED)
- write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
-
- if (counter_select & CS_UNCORRECTABLE_ERRORS)
- write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
-
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(vl_select_mask)) {
- if (counter_select & CS_PORT_XMIT_DATA)
- write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
-
- if (counter_select & CS_PORT_RCV_DATA)
- write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
-
- if (counter_select & CS_PORT_XMIT_PKTS)
- write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
-
- if (counter_select & CS_PORT_RCV_PKTS)
- write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
-
- if (counter_select & CS_PORT_XMIT_WAIT)
- write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
-
- /* sw_port_vl_congestion is 0 for HFIs */
- if (counter_select & CS_PORT_RCV_FECN)
- write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
-
- if (counter_select & CS_PORT_RCV_BECN)
- write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
-
- /* port_vl_xmit_time_cong is 0 for HFIs */
- /* port_vl_xmit_wasted_bw ??? */
- /* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
- if (counter_select & CS_PORT_RCV_BUBBLE)
- write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
-
- /* if (counter_select & CS_PORT_MARK_FECN)
- * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
- */
- /* port_vl_xmit_discards ??? */
- }
-
- if (resp_len)
- *resp_len += sizeof(*req);
-
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
- struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- struct _port_ei *rsp;
- struct opa_port_error_info_msg *req;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- u64 port_mask;
- u32 num_ports;
- u8 port_num;
- u8 num_pslm;
- u32 error_info_select;
-
- req = (struct opa_port_error_info_msg *)pmp->data;
- rsp = &req->port[0];
-
- num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
- num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
-
- memset(rsp, 0, sizeof(*rsp));
-
- if (num_ports != 1 || num_ports != num_pslm) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- /*
- * The bit set in the mask needs to be consistent with the port
- * the request came in on.
- */
- port_mask = be64_to_cpu(req->port_select_mask[3]);
- port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
-
- if (port_num != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- error_info_select = be32_to_cpu(req->error_info_select_mask);
-
- /* PortRcvErrorInfo */
- if (error_info_select & ES_PORT_RCV_ERROR_INFO)
- /* turn off status bit */
- dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
-
- /* ExcessiverBufferOverrunInfo */
- if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
- /*
- * status bit is essentially kept in the h/w - bit 5 of
- * RCV_ERR_INFO
- */
- write_csr(dd, RCV_ERR_INFO,
- RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
-
- if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
- dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
-
- if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
- dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
-
- /* UncorrectableErrorInfo */
- if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
- /* turn off status bit */
- dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
-
- /* FMConfigErrorInfo */
- if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
- /* turn off status bit */
- dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
-
- if (resp_len)
- *resp_len += sizeof(*req);
-
- return reply((struct ib_mad_hdr *)pmp);
-}
-
-struct opa_congestion_info_attr {
- __be16 congestion_info;
- u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
- u8 congestion_log_length;
-} __packed;
-
-static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct opa_congestion_info_attr *p =
- (struct opa_congestion_info_attr *)data;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- p->congestion_info = 0;
- p->control_table_cap = ppd->cc_max_table_entries;
- p->congestion_log_length = OPA_CONG_LOG_ELEMS;
-
- if (resp_len)
- *resp_len += sizeof(*p);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- int i;
- struct opa_congestion_setting_attr *p =
- (struct opa_congestion_setting_attr *)data;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct opa_congestion_setting_entry_shadow *entries;
- struct cc_state *cc_state;
-
- rcu_read_lock();
-
- cc_state = get_cc_state(ppd);
-
- if (!cc_state) {
- rcu_read_unlock();
- return reply((struct ib_mad_hdr *)smp);
- }
-
- entries = cc_state->cong_setting.entries;
- p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
- p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
- for (i = 0; i < OPA_MAX_SLS; i++) {
- p->entries[i].ccti_increase = entries[i].ccti_increase;
- p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
- p->entries[i].trigger_threshold =
- entries[i].trigger_threshold;
- p->entries[i].ccti_min = entries[i].ccti_min;
- }
-
- rcu_read_unlock();
-
- if (resp_len)
- *resp_len += sizeof(*p);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct opa_congestion_setting_attr *p =
- (struct opa_congestion_setting_attr *)data;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct opa_congestion_setting_entry_shadow *entries;
- int i;
-
- ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
-
- entries = ppd->congestion_entries;
- for (i = 0; i < OPA_MAX_SLS; i++) {
- entries[i].ccti_increase = p->entries[i].ccti_increase;
- entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
- entries[i].trigger_threshold =
- p->entries[i].trigger_threshold;
- entries[i].ccti_min = p->entries[i].ccti_min;
- }
-
- return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
- resp_len);
-}
-
-static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len)
-{
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
- s64 ts;
- int i;
-
- if (am != 0) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- spin_lock_irq(&ppd->cc_log_lock);
-
- cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
- cong_log->congestion_flags = 0;
- cong_log->threshold_event_counter =
- cpu_to_be16(ppd->threshold_event_counter);
- memcpy(cong_log->threshold_cong_event_map,
- ppd->threshold_cong_event_map,
- sizeof(cong_log->threshold_cong_event_map));
- /* keep timestamp in units of 1.024 usec */
- ts = ktime_to_ns(ktime_get()) / 1024;
- cong_log->current_time_stamp = cpu_to_be32(ts);
- for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
- struct opa_hfi1_cong_log_event_internal *cce =
- &ppd->cc_events[ppd->cc_mad_idx++];
- if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
- ppd->cc_mad_idx = 0;
- /*
- * Entries which are older than twice the time
- * required to wrap the counter are supposed to
- * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
- */
- if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
- continue;
- memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
- memcpy(cong_log->events[i].remote_qp_number_cn_entry,
- &cce->rqpn, 3);
- cong_log->events[i].sl_svc_type_cn_entry =
- ((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
- cong_log->events[i].remote_lid_cn_entry =
- cpu_to_be32(cce->rlid);
- cong_log->events[i].timestamp_cn_entry =
- cpu_to_be32(cce->timestamp);
- }
-
- /*
- * Reset threshold_cong_event_map, and threshold_event_counter
- * to 0 when log is read.
- */
- memset(ppd->threshold_cong_event_map, 0x0,
- sizeof(ppd->threshold_cong_event_map));
- ppd->threshold_event_counter = 0;
-
- spin_unlock_irq(&ppd->cc_log_lock);
-
- if (resp_len)
- *resp_len += sizeof(struct opa_hfi1_cong_log);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct ib_cc_table_attr *cc_table_attr =
- (struct ib_cc_table_attr *)data;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 start_block = OPA_AM_START_BLK(am);
- u32 n_blocks = OPA_AM_NBLK(am);
- struct ib_cc_table_entry_shadow *entries;
- int i, j;
- u32 sentry, eentry;
- struct cc_state *cc_state;
-
- /* sanity check n_blocks, start_block */
- if (n_blocks == 0 ||
- start_block + n_blocks > ppd->cc_max_table_entries) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- rcu_read_lock();
-
- cc_state = get_cc_state(ppd);
-
- if (!cc_state) {
- rcu_read_unlock();
- return reply((struct ib_mad_hdr *)smp);
- }
-
- sentry = start_block * IB_CCT_ENTRIES;
- eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
-
- cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
-
- entries = cc_state->cct.entries;
-
- /* return n_blocks, though the last block may not be full */
- for (j = 0, i = sentry; i < eentry; j++, i++)
- cc_table_attr->ccti_entries[j].entry =
- cpu_to_be16(entries[i].entry);
-
- rcu_read_unlock();
-
- if (resp_len)
- *resp_len += sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-void cc_state_reclaim(struct rcu_head *rcu)
-{
- struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
-
- kfree(cc_state);
-}
-
-static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 start_block = OPA_AM_START_BLK(am);
- u32 n_blocks = OPA_AM_NBLK(am);
- struct ib_cc_table_entry_shadow *entries;
- int i, j;
- u32 sentry, eentry;
- u16 ccti_limit;
- struct cc_state *old_cc_state, *new_cc_state;
-
- /* sanity check n_blocks, start_block */
- if (n_blocks == 0 ||
- start_block + n_blocks > ppd->cc_max_table_entries) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- sentry = start_block * IB_CCT_ENTRIES;
- eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
- (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
-
- /* sanity check ccti_limit */
- ccti_limit = be16_to_cpu(p->ccti_limit);
- if (ccti_limit + 1 > eentry) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
- if (!new_cc_state)
- goto getit;
-
- spin_lock(&ppd->cc_state_lock);
-
- old_cc_state = get_cc_state(ppd);
-
- if (!old_cc_state) {
- spin_unlock(&ppd->cc_state_lock);
- kfree(new_cc_state);
- return reply((struct ib_mad_hdr *)smp);
- }
-
- *new_cc_state = *old_cc_state;
-
- new_cc_state->cct.ccti_limit = ccti_limit;
-
- entries = ppd->ccti_entries;
- ppd->total_cct_entry = ccti_limit + 1;
-
- for (j = 0, i = sentry; i < eentry; j++, i++)
- entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
-
- memcpy(new_cc_state->cct.entries, entries,
- eentry * sizeof(struct ib_cc_table_entry));
-
- new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
- new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
- memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
- OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
-
- rcu_assign_pointer(ppd->cc_state, new_cc_state);
-
- spin_unlock(&ppd->cc_state_lock);
-
- call_rcu(&old_cc_state->rcu, cc_state_reclaim);
-
-getit:
- return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
-}
-
-struct opa_led_info {
- __be32 rsvd_led_mask;
- __be32 rsvd;
-};
-
-#define OPA_LED_SHIFT 31
-#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
-
-static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_pportdata *ppd = dd->pport;
- struct opa_led_info *p = (struct opa_led_info *)data;
- u32 nport = OPA_AM_NPORT(am);
- u32 is_beaconing_active;
-
- if (nport != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- /*
- * This pairs with the memory barrier in hfi1_start_led_override to
- * ensure that we read the correct state of LED beaconing represented
- * by led_override_timer_active
- */
- smp_rmb();
- is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
- p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
-
- if (resp_len)
- *resp_len += sizeof(struct opa_led_info);
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct opa_led_info *p = (struct opa_led_info *)data;
- u32 nport = OPA_AM_NPORT(am);
- int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
-
- if (nport != 1) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- if (on)
- hfi1_start_led_override(dd->pport, 2000, 1500);
- else
- shutdown_led_override(dd->pport);
-
- return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
-}
-
-static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- int ret;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
-
- switch (attr_id) {
- case IB_SMP_ATTR_NODE_DESC:
- ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_NODE_INFO:
- ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_PORT_INFO:
- ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SL_TO_SC_MAP:
- ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SC_TO_SL_MAP:
- ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
- ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
- ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_PORT_STATE_INFO:
- ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
- ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_CABLE_INFO:
- ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_CONGESTION_INFO:
- ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
- ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
- port, resp_len);
- break;
- case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
- ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
- port, resp_len);
- break;
- case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
- ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_LED_INFO:
- ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (ibp->rvp.port_cap_flags & IB_PORT_SM)
- return IB_MAD_RESULT_SUCCESS;
- /* FALLTHROUGH */
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_mad_hdr *)smp);
- break;
- }
- return ret;
-}
-
-static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
- u8 *data, struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- int ret;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
-
- switch (attr_id) {
- case IB_SMP_ATTR_PORT_INFO:
- ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SL_TO_SC_MAP:
- ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SC_TO_SL_MAP:
- ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
- ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
- ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_PORT_STATE_INFO:
- ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
- ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
- resp_len);
- break;
- case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
- ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
- port, resp_len);
- break;
- case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
- ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_LED_INFO:
- ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
- resp_len);
- break;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (ibp->rvp.port_cap_flags & IB_PORT_SM)
- return IB_MAD_RESULT_SUCCESS;
- /* FALLTHROUGH */
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_mad_hdr *)smp);
- break;
- }
- return ret;
-}
-
-static inline void set_aggr_error(struct opa_aggregate *ag)
-{
- ag->err_reqlength |= cpu_to_be16(0x8000);
-}
-
-static int subn_get_opa_aggregate(struct opa_smp *smp,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- int i;
- u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
- u8 *next_smp = opa_get_smp_data(smp);
-
- if (num_attr < 1 || num_attr > 117) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- for (i = 0; i < num_attr; i++) {
- struct opa_aggregate *agg;
- size_t agg_data_len;
- size_t agg_size;
- u32 am;
-
- agg = (struct opa_aggregate *)next_smp;
- agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
- agg_size = sizeof(*agg) + agg_data_len;
- am = be32_to_cpu(agg->attr_mod);
-
- *resp_len += agg_size;
-
- if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- /* zero the payload for this segment */
- memset(next_smp + sizeof(*agg), 0, agg_data_len);
-
- (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
- ibdev, port, NULL);
- if (smp->status & ~IB_SMP_DIRECTION) {
- set_aggr_error(agg);
- return reply((struct ib_mad_hdr *)smp);
- }
- next_smp += agg_size;
- }
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-static int subn_set_opa_aggregate(struct opa_smp *smp,
- struct ib_device *ibdev, u8 port,
- u32 *resp_len)
-{
- int i;
- u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
- u8 *next_smp = opa_get_smp_data(smp);
-
- if (num_attr < 1 || num_attr > 117) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- for (i = 0; i < num_attr; i++) {
- struct opa_aggregate *agg;
- size_t agg_data_len;
- size_t agg_size;
- u32 am;
-
- agg = (struct opa_aggregate *)next_smp;
- agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
- agg_size = sizeof(*agg) + agg_data_len;
- am = be32_to_cpu(agg->attr_mod);
-
- *resp_len += agg_size;
-
- if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
- smp->status |= IB_SMP_INVALID_FIELD;
- return reply((struct ib_mad_hdr *)smp);
- }
-
- (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
- ibdev, port, NULL);
- if (smp->status & ~IB_SMP_DIRECTION) {
- set_aggr_error(agg);
- return reply((struct ib_mad_hdr *)smp);
- }
- next_smp += agg_size;
- }
-
- return reply((struct ib_mad_hdr *)smp);
-}
-
-/*
- * OPAv1 specifies that, on the transition to link up, these counters
- * are cleared:
- * PortRcvErrors [*]
- * LinkErrorRecovery
- * LocalLinkIntegrityErrors
- * ExcessiveBufferOverruns [*]
- *
- * [*] Error info associated with these counters is retained, but the
- * error info status is reset to 0.
- */
-void clear_linkup_counters(struct hfi1_devdata *dd)
-{
- /* PortRcvErrors */
- write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
- dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
- /* LinkErrorRecovery */
- write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
- write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
- /* LocalLinkIntegrityErrors */
- write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
- write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
- /* ExcessiveBufferOverruns */
- write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
- dd->rcv_ovfl_cnt = 0;
- dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
-}
-
-/*
- * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
- * local node, 0 otherwise.
- */
-static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
- const struct ib_wc *in_wc)
-{
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- const struct opa_smp *smp = (const struct opa_smp *)mad;
-
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- return (smp->hop_cnt == 0 &&
- smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
- smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
- }
-
- return (in_wc->slid == ppd->lid);
-}
-
-/*
- * opa_local_smp_check() should only be called on MADs for which
- * is_local_mad() returns true. It applies the SMP checks that are
- * specific to SMPs which are sent from, and destined to this node.
- * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
- * otherwise.
- *
- * SMPs which arrive from other nodes are instead checked by
- * opa_smp_check().
- */
-static int opa_local_smp_check(struct hfi1_ibport *ibp,
- const struct ib_wc *in_wc)
-{
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid = in_wc->slid;
- u16 pkey;
-
- if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
- return 1;
-
- pkey = ppd->pkeys[in_wc->pkey_index];
- /*
- * We need to do the "node-local" checks specified in OPAv1,
- * rev 0.90, section 9.10.26, which are:
- * - pkey is 0x7fff, or 0xffff
- * - Source QPN == 0 || Destination QPN == 0
- * - the MAD header's management class is either
- * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
- * IB_MGMT_CLASS_SUBN_LID_ROUTED
- * - SLID != 0
- *
- * However, we know (and so don't need to check again) that,
- * for local SMPs, the MAD stack passes MADs with:
- * - Source QPN of 0
- * - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
- * our own port's lid
- *
- */
- if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
- return 0;
- ingress_pkey_table_fail(ppd, pkey, slid);
- return 1;
-}
-
-static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct opa_mad *in_mad,
- struct opa_mad *out_mad,
- u32 *resp_len)
-{
- struct opa_smp *smp = (struct opa_smp *)out_mad;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- u8 *data;
- u32 am;
- __be16 attr_id;
- int ret;
-
- *out_mad = *in_mad;
- data = opa_get_smp_data(smp);
-
- am = be32_to_cpu(smp->attr_mod);
- attr_id = smp->attr_id;
- if (smp->class_version != OPA_SMI_CLASS_VERSION) {
- smp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_mad_hdr *)smp);
- return ret;
- }
- ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
- smp->route.dr.dr_slid, smp->route.dr.return_path,
- smp->hop_cnt);
- if (ret) {
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- /*
- * If this is a get/set portinfo, we already check the
- * M_Key if the MAD is for another port and the M_Key
- * is OK on the receiving port. This check is needed
- * to increment the error counters when the M_Key
- * fails to match on *both* ports.
- */
- if (attr_id == IB_SMP_ATTR_PORT_INFO &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET) &&
- port_num && port_num <= ibdev->phys_port_cnt &&
- port != port_num)
- (void)check_mkey(to_iport(ibdev, port_num),
- (struct ib_mad_hdr *)smp, 0,
- smp->mkey, smp->route.dr.dr_slid,
- smp->route.dr.return_path,
- smp->hop_cnt);
- ret = IB_MAD_RESULT_FAILURE;
- return ret;
- }
-
- *resp_len = opa_get_smp_header_size(smp);
-
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- switch (attr_id) {
- default:
- clear_opa_smp_data(smp);
- ret = subn_get_opa_sma(attr_id, smp, am, data,
- ibdev, port, resp_len);
- break;
- case OPA_ATTRIB_ID_AGGREGATE:
- ret = subn_get_opa_aggregate(smp, ibdev, port,
- resp_len);
- break;
- }
- break;
- case IB_MGMT_METHOD_SET:
- switch (attr_id) {
- default:
- ret = subn_set_opa_sma(attr_id, smp, am, data,
- ibdev, port, resp_len);
- break;
- case OPA_ATTRIB_ID_AGGREGATE:
- ret = subn_set_opa_aggregate(smp, ibdev, port,
- resp_len);
- break;
- }
- break;
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_REPORT:
- case IB_MGMT_METHOD_REPORT_RESP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- break;
- default:
- smp->status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_mad_hdr *)smp);
- break;
- }
-
- return ret;
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_smp *smp = (struct ib_smp *)out_mad;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- int ret;
-
- *out_mad = *in_mad;
- if (smp->class_version != 1) {
- smp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_mad_hdr *)smp);
- return ret;
- }
-
- ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
- smp->mkey, (__force __be32)smp->dr_slid,
- smp->return_path, smp->hop_cnt);
- if (ret) {
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- /*
- * If this is a get/set portinfo, we already check the
- * M_Key if the MAD is for another port and the M_Key
- * is OK on the receiving port. This check is needed
- * to increment the error counters when the M_Key
- * fails to match on *both* ports.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET) &&
- port_num && port_num <= ibdev->phys_port_cnt &&
- port != port_num)
- (void)check_mkey(to_iport(ibdev, port_num),
- (struct ib_mad_hdr *)smp, 0,
- smp->mkey,
- (__force __be32)smp->dr_slid,
- smp->return_path, smp->hop_cnt);
- ret = IB_MAD_RESULT_FAILURE;
- return ret;
- }
-
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_NODE_INFO:
- ret = subn_get_nodeinfo(smp, ibdev, port);
- break;
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_mad_hdr *)smp);
- break;
- }
- break;
- }
-
- return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port,
- const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
- struct ib_class_port_info *cpi = (struct ib_class_port_info *)
- &pmp->data;
- int ret = IB_MAD_RESULT_FAILURE;
-
- *out_mad = *in_mad;
- if (pmp->mad_hdr.class_version != 1) {
- pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_mad_hdr *)pmp);
- return ret;
- }
-
- switch (pmp->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_PORT_COUNTERS:
- ret = pma_get_ib_portcounters(pmp, ibdev, port);
- break;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
- break;
- case IB_PMA_CLASS_PORT_INFO:
- cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
- ret = reply((struct ib_mad_hdr *)pmp);
- break;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_mad_hdr *)pmp);
- break;
- }
- break;
-
- case IB_MGMT_METHOD_SET:
- if (pmp->mad_hdr.attr_id) {
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_mad_hdr *)pmp);
- }
- break;
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- break;
-
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_mad_hdr *)pmp);
- break;
- }
-
- return ret;
-}
-
-static int process_perf_opa(struct ib_device *ibdev, u8 port,
- const struct opa_mad *in_mad,
- struct opa_mad *out_mad, u32 *resp_len)
-{
- struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
- int ret;
-
- *out_mad = *in_mad;
-
- if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
- pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
- return reply((struct ib_mad_hdr *)pmp);
- }
-
- *resp_len = sizeof(pmp->mad_hdr);
-
- switch (pmp->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_CLASS_PORT_INFO:
- ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
- break;
- case OPA_PM_ATTRIB_ID_PORT_STATUS:
- ret = pma_get_opa_portstatus(pmp, ibdev, port,
- resp_len);
- break;
- case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
- ret = pma_get_opa_datacounters(pmp, ibdev, port,
- resp_len);
- break;
- case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
- ret = pma_get_opa_porterrors(pmp, ibdev, port,
- resp_len);
- break;
- case OPA_PM_ATTRIB_ID_ERROR_INFO:
- ret = pma_get_opa_errorinfo(pmp, ibdev, port,
- resp_len);
- break;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_mad_hdr *)pmp);
- break;
- }
- break;
-
- case IB_MGMT_METHOD_SET:
- switch (pmp->mad_hdr.attr_id) {
- case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
- ret = pma_set_opa_portstatus(pmp, ibdev, port,
- resp_len);
- break;
- case OPA_PM_ATTRIB_ID_ERROR_INFO:
- ret = pma_set_opa_errorinfo(pmp, ibdev, port,
- resp_len);
- break;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_mad_hdr *)pmp);
- break;
- }
- break;
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- break;
-
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_mad_hdr *)pmp);
- break;
- }
-
- return ret;
-}
-
-static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct opa_mad *in_mad,
- struct opa_mad *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- int ret;
- int pkey_idx;
- u32 resp_len = 0;
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
-
- pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
- if (pkey_idx < 0) {
- pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
- hfi1_get_pkey(ibp, 1));
- pkey_idx = 1;
- }
- *out_mad_pkey_index = (u16)pkey_idx;
-
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- if (is_local_mad(ibp, in_mad, in_wc)) {
- ret = opa_local_smp_check(ibp, in_wc);
- if (ret)
- return IB_MAD_RESULT_FAILURE;
- }
- ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
- out_mad, &resp_len);
- goto bail;
- case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf_opa(ibdev, port, in_mad, out_mad,
- &resp_len);
- goto bail;
-
- default:
- ret = IB_MAD_RESULT_SUCCESS;
- }
-
-bail:
- if (ret & IB_MAD_RESULT_REPLY)
- *out_mad_size = round_up(resp_len, 8);
- else if (ret & IB_MAD_RESULT_SUCCESS)
- *out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
-
- return ret;
-}
-
-static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- int ret;
-
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
- break;
- case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in_mad, out_mad);
- break;
- default:
- ret = IB_MAD_RESULT_SUCCESS;
- break;
- }
-
- return ret;
-}
-
-/**
- * hfi1_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
-{
- switch (in_mad->base_version) {
- case OPA_MGMT_BASE_VERSION:
- if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
- dev_err(ibdev->dma_device, "invalid in_mad_size\n");
- return IB_MAD_RESULT_FAILURE;
- }
- return hfi1_process_opa_mad(ibdev, mad_flags, port,
- in_wc, in_grh,
- (struct opa_mad *)in_mad,
- (struct opa_mad *)out_mad,
- out_mad_size,
- out_mad_pkey_index);
- case IB_MGMT_BASE_VERSION:
- return hfi1_process_ib_mad(ibdev, mad_flags, port,
- in_wc, in_grh,
- (const struct ib_mad *)in_mad,
- (struct ib_mad *)out_mad);
- default:
- break;
- }
-
- return IB_MAD_RESULT_FAILURE;
-}
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h
deleted file mode 100644
index 55ee08675..000000000
--- a/drivers/staging/rdma/hfi1/mad.h
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#ifndef _HFI1_MAD_H
-#define _HFI1_MAD_H
-
-#include <rdma/ib_pma.h>
-#define USE_PI_LED_ENABLE 1 /*
- * use led enabled bit in struct
- * opa_port_states, if available
- */
-#include <rdma/opa_smi.h>
-#include <rdma/opa_port_info.h>
-#ifndef PI_LED_ENABLE_SUP
-#define PI_LED_ENABLE_SUP 0
-#endif
-#include "opa_compat.h"
-
-/*
- * OPA Traps
- */
-#define OPA_TRAP_GID_NOW_IN_SERVICE cpu_to_be16(64)
-#define OPA_TRAP_GID_OUT_OF_SERVICE cpu_to_be16(65)
-#define OPA_TRAP_ADD_MULTICAST_GROUP cpu_to_be16(66)
-#define OPA_TRAL_DEL_MULTICAST_GROUP cpu_to_be16(67)
-#define OPA_TRAP_UNPATH cpu_to_be16(68)
-#define OPA_TRAP_REPATH cpu_to_be16(69)
-#define OPA_TRAP_PORT_CHANGE_STATE cpu_to_be16(128)
-#define OPA_TRAP_LINK_INTEGRITY cpu_to_be16(129)
-#define OPA_TRAP_EXCESSIVE_BUFFER_OVERRUN cpu_to_be16(130)
-#define OPA_TRAP_FLOW_WATCHDOG cpu_to_be16(131)
-#define OPA_TRAP_CHANGE_CAPABILITY cpu_to_be16(144)
-#define OPA_TRAP_CHANGE_SYSGUID cpu_to_be16(145)
-#define OPA_TRAP_BAD_M_KEY cpu_to_be16(256)
-#define OPA_TRAP_BAD_P_KEY cpu_to_be16(257)
-#define OPA_TRAP_BAD_Q_KEY cpu_to_be16(258)
-#define OPA_TRAP_SWITCH_BAD_PKEY cpu_to_be16(259)
-#define OPA_SMA_TRAP_DATA_LINK_WIDTH cpu_to_be16(2048)
-
-/*
- * Generic trap/notice other local changes flags (trap 144).
- */
-#define OPA_NOTICE_TRAP_LWDE_CHG 0x08 /* Link Width Downgrade Enable
- * changed
- */
-#define OPA_NOTICE_TRAP_LSE_CHG 0x04 /* Link Speed Enable changed */
-#define OPA_NOTICE_TRAP_LWE_CHG 0x02 /* Link Width Enable changed */
-#define OPA_NOTICE_TRAP_NODE_DESC_CHG 0x01
-
-struct opa_mad_notice_attr {
- u8 generic_type;
- u8 prod_type_msb;
- __be16 prod_type_lsb;
- __be16 trap_num;
- __be16 toggle_count;
- __be32 issuer_lid;
- __be32 reserved1;
- union ib_gid issuer_gid;
-
- union {
- struct {
- u8 details[64];
- } raw_data;
-
- struct {
- union ib_gid gid;
- } __packed ntc_64_65_66_67;
-
- struct {
- __be32 lid;
- } __packed ntc_128;
-
- struct {
- __be32 lid; /* where violation happened */
- u8 port_num; /* where violation happened */
- } __packed ntc_129_130_131;
-
- struct {
- __be32 lid; /* LID where change occurred */
- __be32 new_cap_mask; /* new capability mask */
- __be16 reserved2;
- __be16 cap_mask;
- __be16 change_flags; /* low 4 bits only */
- } __packed ntc_144;
-
- struct {
- __be64 new_sys_guid;
- __be32 lid; /* lid where sys guid changed */
- } __packed ntc_145;
-
- struct {
- __be32 lid;
- __be32 dr_slid;
- u8 method;
- u8 dr_trunc_hop;
- __be16 attr_id;
- __be32 attr_mod;
- __be64 mkey;
- u8 dr_rtn_path[30];
- } __packed ntc_256;
-
- struct {
- __be32 lid1;
- __be32 lid2;
- __be32 key;
- u8 sl; /* SL: high 5 bits */
- u8 reserved3[3];
- union ib_gid gid1;
- union ib_gid gid2;
- __be32 qp1; /* high 8 bits reserved */
- __be32 qp2; /* high 8 bits reserved */
- } __packed ntc_257_258;
-
- struct {
- __be16 flags; /* low 8 bits reserved */
- __be16 pkey;
- __be32 lid1;
- __be32 lid2;
- u8 sl; /* SL: high 5 bits */
- u8 reserved4[3];
- union ib_gid gid1;
- union ib_gid gid2;
- __be32 qp1; /* high 8 bits reserved */
- __be32 qp2; /* high 8 bits reserved */
- } __packed ntc_259;
-
- struct {
- __be32 lid;
- } __packed ntc_2048;
-
- };
- u8 class_data[0];
-};
-
-#define IB_VLARB_LOWPRI_0_31 1
-#define IB_VLARB_LOWPRI_32_63 2
-#define IB_VLARB_HIGHPRI_0_31 3
-#define IB_VLARB_HIGHPRI_32_63 4
-
-#define OPA_MAX_PREEMPT_CAP 32
-#define OPA_VLARB_LOW_ELEMENTS 0
-#define OPA_VLARB_HIGH_ELEMENTS 1
-#define OPA_VLARB_PREEMPT_ELEMENTS 2
-#define OPA_VLARB_PREEMPT_MATRIX 3
-
-#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
-
-struct ib_pma_portcounters_cong {
- u8 reserved;
- u8 reserved1;
- __be16 port_check_rate;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved2;
- u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
- __be16 reserved3;
- __be16 vl15_dropped;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_xmit_wait;
- __be64 port_adr_events;
-} __packed;
-
-#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
-#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
-#define IB_SMP_INVALID_FIELD cpu_to_be16(0x001C)
-
-#define OPA_MAX_PREEMPT_CAP 32
-#define OPA_VLARB_LOW_ELEMENTS 0
-#define OPA_VLARB_HIGH_ELEMENTS 1
-#define OPA_VLARB_PREEMPT_ELEMENTS 2
-#define OPA_VLARB_PREEMPT_MATRIX 3
-
-#define HFI1_XMIT_RATE_UNSUPPORTED 0x0
-#define HFI1_XMIT_RATE_PICO 0x7
-/* number of 4nsec cycles equaling 2secs */
-#define HFI1_CONG_TIMER_PSINTERVAL 0x1DCD64EC
-
-#define IB_CC_SVCTYPE_RC 0x0
-#define IB_CC_SVCTYPE_UC 0x1
-#define IB_CC_SVCTYPE_RD 0x2
-#define IB_CC_SVCTYPE_UD 0x3
-
-/*
- * There should be an equivalent IB #define for the following, but
- * I cannot find it.
- */
-#define OPA_CC_LOG_TYPE_HFI 2
-
-struct opa_hfi1_cong_log_event_internal {
- u32 lqpn;
- u32 rqpn;
- u8 sl;
- u8 svc_type;
- u32 rlid;
- s64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
-};
-
-struct opa_hfi1_cong_log_event {
- u8 local_qp_cn_entry[3];
- u8 remote_qp_number_cn_entry[3];
- u8 sl_svc_type_cn_entry; /* 5 bits SL, 3 bits svc type */
- u8 reserved;
- __be32 remote_lid_cn_entry;
- __be32 timestamp_cn_entry;
-} __packed;
-
-#define OPA_CONG_LOG_ELEMS 96
-
-struct opa_hfi1_cong_log {
- u8 log_type;
- u8 congestion_flags;
- __be16 threshold_event_counter;
- __be32 current_time_stamp;
- u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
- struct opa_hfi1_cong_log_event events[OPA_CONG_LOG_ELEMS];
-} __packed;
-
-#define IB_CC_TABLE_CAP_DEFAULT 31
-
-/* Port control flags */
-#define IB_CC_CCS_PC_SL_BASED 0x01
-
-struct opa_congestion_setting_entry {
- u8 ccti_increase;
- u8 reserved;
- __be16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct opa_congestion_setting_entry_shadow {
- u8 ccti_increase;
- u8 reserved;
- u16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct opa_congestion_setting_attr {
- __be32 control_map;
- __be16 port_control;
- struct opa_congestion_setting_entry entries[OPA_MAX_SLS];
-} __packed;
-
-struct opa_congestion_setting_attr_shadow {
- u32 control_map;
- u16 port_control;
- struct opa_congestion_setting_entry_shadow entries[OPA_MAX_SLS];
-} __packed;
-
-#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
-#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
-
-/* 64 Congestion Control table entries in a single MAD */
-#define IB_CCT_ENTRIES 64
-#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
-
-struct ib_cc_table_entry {
- __be16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_entry_shadow {
- u16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_attr {
- __be16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-struct ib_cc_table_attr_shadow {
- u16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-#define CC_TABLE_SHADOW_MAX \
- (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
-
-struct cc_table_shadow {
- u16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
-} __packed;
-
-/*
- * struct cc_state combines the (active) per-port congestion control
- * table, and the (active) per-SL congestion settings. cc_state data
- * may need to be read in code paths that we want to be fast, so it
- * is an RCU protected structure.
- */
-struct cc_state {
- struct rcu_head rcu;
- struct cc_table_shadow cct;
- struct opa_congestion_setting_attr_shadow cong_setting;
-};
-
-/*
- * OPA BufferControl MAD
- */
-
-/* attribute modifier macros */
-#define OPA_AM_NPORT_SHIFT 24
-#define OPA_AM_NPORT_MASK 0xff
-#define OPA_AM_NPORT_SMASK (OPA_AM_NPORT_MASK << OPA_AM_NPORT_SHIFT)
-#define OPA_AM_NPORT(am) (((am) >> OPA_AM_NPORT_SHIFT) & \
- OPA_AM_NPORT_MASK)
-
-#define OPA_AM_NBLK_SHIFT 24
-#define OPA_AM_NBLK_MASK 0xff
-#define OPA_AM_NBLK_SMASK (OPA_AM_NBLK_MASK << OPA_AM_NBLK_SHIFT)
-#define OPA_AM_NBLK(am) (((am) >> OPA_AM_NBLK_SHIFT) & \
- OPA_AM_NBLK_MASK)
-
-#define OPA_AM_START_BLK_SHIFT 0
-#define OPA_AM_START_BLK_MASK 0xff
-#define OPA_AM_START_BLK_SMASK (OPA_AM_START_BLK_MASK << \
- OPA_AM_START_BLK_SHIFT)
-#define OPA_AM_START_BLK(am) (((am) >> OPA_AM_START_BLK_SHIFT) & \
- OPA_AM_START_BLK_MASK)
-
-#define OPA_AM_PORTNUM_SHIFT 0
-#define OPA_AM_PORTNUM_MASK 0xff
-#define OPA_AM_PORTNUM_SMASK (OPA_AM_PORTNUM_MASK << OPA_AM_PORTNUM_SHIFT)
-#define OPA_AM_PORTNUM(am) (((am) >> OPA_AM_PORTNUM_SHIFT) & \
- OPA_AM_PORTNUM_MASK)
-
-#define OPA_AM_ASYNC_SHIFT 12
-#define OPA_AM_ASYNC_MASK 0x1
-#define OPA_AM_ASYNC_SMASK (OPA_AM_ASYNC_MASK << OPA_AM_ASYNC_SHIFT)
-#define OPA_AM_ASYNC(am) (((am) >> OPA_AM_ASYNC_SHIFT) & \
- OPA_AM_ASYNC_MASK)
-
-#define OPA_AM_START_SM_CFG_SHIFT 9
-#define OPA_AM_START_SM_CFG_MASK 0x1
-#define OPA_AM_START_SM_CFG_SMASK (OPA_AM_START_SM_CFG_MASK << \
- OPA_AM_START_SM_CFG_SHIFT)
-#define OPA_AM_START_SM_CFG(am) (((am) >> OPA_AM_START_SM_CFG_SHIFT) \
- & OPA_AM_START_SM_CFG_MASK)
-
-#define OPA_AM_CI_ADDR_SHIFT 19
-#define OPA_AM_CI_ADDR_MASK 0xfff
-#define OPA_AM_CI_ADDR_SMASK (OPA_AM_CI_ADDR_MASK << OPA_CI_ADDR_SHIFT)
-#define OPA_AM_CI_ADDR(am) (((am) >> OPA_AM_CI_ADDR_SHIFT) & \
- OPA_AM_CI_ADDR_MASK)
-
-#define OPA_AM_CI_LEN_SHIFT 13
-#define OPA_AM_CI_LEN_MASK 0x3f
-#define OPA_AM_CI_LEN_SMASK (OPA_AM_CI_LEN_MASK << OPA_CI_LEN_SHIFT)
-#define OPA_AM_CI_LEN(am) (((am) >> OPA_AM_CI_LEN_SHIFT) & \
- OPA_AM_CI_LEN_MASK)
-
-/* error info macros */
-#define OPA_EI_STATUS_SMASK 0x80
-#define OPA_EI_CODE_SMASK 0x0f
-
-struct vl_limit {
- __be16 dedicated;
- __be16 shared;
-};
-
-struct buffer_control {
- __be16 reserved;
- __be16 overall_shared_limit;
- struct vl_limit vl[OPA_MAX_VLS];
-};
-
-struct sc2vlnt {
- u8 vlnt[32]; /* 5 bit VL, 3 bits reserved */
-};
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 \
- cpu_to_be32(COUNTER_MASK(1, 0) | \
- COUNTER_MASK(1, 1) | \
- COUNTER_MASK(1, 2) | \
- COUNTER_MASK(1, 3) | \
- COUNTER_MASK(1, 4))
-
-#endif /* _HFI1_MAD_H */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
deleted file mode 100644
index b3f0682a3..000000000
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/list.h>
-#include <linux/mmu_notifier.h>
-#include <linux/interval_tree_generic.h>
-
-#include "mmu_rb.h"
-#include "trace.h"
-
-struct mmu_rb_handler {
- struct list_head list;
- struct mmu_notifier mn;
- struct rb_root *root;
- spinlock_t lock; /* protect the RB tree */
- struct mmu_rb_ops *ops;
-};
-
-static LIST_HEAD(mmu_rb_handlers);
-static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */
-
-static unsigned long mmu_node_start(struct mmu_rb_node *);
-static unsigned long mmu_node_last(struct mmu_rb_node *);
-static struct mmu_rb_handler *find_mmu_handler(struct rb_root *);
-static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
- unsigned long);
-static inline void mmu_notifier_range_start(struct mmu_notifier *,
- struct mm_struct *,
- unsigned long, unsigned long);
-static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
- struct mm_struct *,
- unsigned long, unsigned long);
-static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
- unsigned long, unsigned long);
-
-static struct mmu_notifier_ops mn_opts = {
- .invalidate_page = mmu_notifier_page,
- .invalidate_range_start = mmu_notifier_range_start,
-};
-
-INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
- mmu_node_start, mmu_node_last, static, __mmu_int_rb);
-
-static unsigned long mmu_node_start(struct mmu_rb_node *node)
-{
- return node->addr & PAGE_MASK;
-}
-
-static unsigned long mmu_node_last(struct mmu_rb_node *node)
-{
- return PAGE_ALIGN((node->addr & PAGE_MASK) + node->len) - 1;
-}
-
-int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
-{
- struct mmu_rb_handler *handlr;
- unsigned long flags;
-
- if (!ops->invalidate)
- return -EINVAL;
-
- handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
- if (!handlr)
- return -ENOMEM;
-
- handlr->root = root;
- handlr->ops = ops;
- INIT_HLIST_NODE(&handlr->mn.hlist);
- spin_lock_init(&handlr->lock);
- handlr->mn.ops = &mn_opts;
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_add_tail(&handlr->list, &mmu_rb_handlers);
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
-
- return mmu_notifier_register(&handlr->mn, current->mm);
-}
-
-void hfi1_mmu_rb_unregister(struct rb_root *root)
-{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
- unsigned long flags;
-
- if (!handler)
- return;
-
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_del(&handler->list);
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
-
- if (!RB_EMPTY_ROOT(root)) {
- struct rb_node *node;
- struct mmu_rb_node *rbnode;
-
- while ((node = rb_first(root))) {
- rbnode = rb_entry(node, struct mmu_rb_node, node);
- rb_erase(node, root);
- if (handler->ops->remove)
- handler->ops->remove(root, rbnode, NULL);
- }
- }
-
- if (current->mm)
- mmu_notifier_unregister(&handler->mn, current->mm);
- kfree(handler);
-}
-
-int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
-{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
- struct mmu_rb_node *node;
- unsigned long flags;
- int ret = 0;
-
- if (!handler)
- return -EINVAL;
-
- spin_lock_irqsave(&handler->lock, flags);
- hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr,
- mnode->len);
- node = __mmu_rb_search(handler, mnode->addr, mnode->len);
- if (node) {
- ret = -EINVAL;
- goto unlock;
- }
- __mmu_int_rb_insert(mnode, root);
-
- if (handler->ops->insert) {
- ret = handler->ops->insert(root, mnode);
- if (ret)
- __mmu_int_rb_remove(mnode, root);
- }
-unlock:
- spin_unlock_irqrestore(&handler->lock, flags);
- return ret;
-}
-
-/* Caller must hold handler lock */
-static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
- unsigned long addr,
- unsigned long len)
-{
- struct mmu_rb_node *node = NULL;
-
- hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len);
- if (!handler->ops->filter) {
- node = __mmu_int_rb_iter_first(handler->root, addr,
- (addr + len) - 1);
- } else {
- for (node = __mmu_int_rb_iter_first(handler->root, addr,
- (addr + len) - 1);
- node;
- node = __mmu_int_rb_iter_next(node, addr,
- (addr + len) - 1)) {
- if (handler->ops->filter(node, addr, len))
- return node;
- }
- }
- return node;
-}
-
-/* Caller must *not* hold handler lock. */
-static void __mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node, struct mm_struct *mm)
-{
- unsigned long flags;
-
- /* Validity of handler and node pointers has been checked by caller. */
- hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
- node->len);
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_int_rb_remove(node, handler->root);
- spin_unlock_irqrestore(&handler->lock, flags);
-
- if (handler->ops->remove)
- handler->ops->remove(handler->root, node, mm);
-}
-
-struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
- unsigned long len)
-{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
- struct mmu_rb_node *node;
- unsigned long flags;
-
- if (!handler)
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&handler->lock, flags);
- node = __mmu_rb_search(handler, addr, len);
- spin_unlock_irqrestore(&handler->lock, flags);
-
- return node;
-}
-
-void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
-{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
-
- if (!handler || !node)
- return;
-
- __mmu_rb_remove(handler, node, NULL);
-}
-
-static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
-{
- struct mmu_rb_handler *handler;
- unsigned long flags;
-
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_for_each_entry(handler, &mmu_rb_handlers, list) {
- if (handler->root == root)
- goto unlock;
- }
- handler = NULL;
-unlock:
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
- return handler;
-}
-
-static inline void mmu_notifier_page(struct mmu_notifier *mn,
- struct mm_struct *mm, unsigned long addr)
-{
- mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
-}
-
-static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
- mmu_notifier_mem_invalidate(mn, mm, start, end);
-}
-
-static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- struct mmu_rb_handler *handler =
- container_of(mn, struct mmu_rb_handler, mn);
- struct rb_root *root = handler->root;
- struct mmu_rb_node *node, *ptr = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&handler->lock, flags);
- for (node = __mmu_int_rb_iter_first(root, start, end - 1);
- node; node = ptr) {
- /* Guard against node removal. */
- ptr = __mmu_int_rb_iter_next(node, start, end - 1);
- hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
- node->addr, node->len);
- if (handler->ops->invalidate(root, node)) {
- spin_unlock_irqrestore(&handler->lock, flags);
- __mmu_rb_remove(handler, node, mm);
- spin_lock_irqsave(&handler->lock, flags);
- }
- }
- spin_unlock_irqrestore(&handler->lock, flags);
-}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
deleted file mode 100644
index 19a306e83..000000000
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#ifndef _HFI1_MMU_RB_H
-#define _HFI1_MMU_RB_H
-
-#include "hfi.h"
-
-struct mmu_rb_node {
- unsigned long addr;
- unsigned long len;
- unsigned long __last;
- struct rb_node node;
-};
-
-struct mmu_rb_ops {
- bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
- int (*insert)(struct rb_root *, struct mmu_rb_node *);
- void (*remove)(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
- int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
-};
-
-int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops);
-void hfi1_mmu_rb_unregister(struct rb_root *);
-int hfi1_mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-void hfi1_mmu_rb_remove(struct rb_root *, struct mmu_rb_node *);
-struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *, unsigned long,
- unsigned long);
-
-#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/staging/rdma/hfi1/opa_compat.h
deleted file mode 100644
index 6ef3c1cbd..000000000
--- a/drivers/staging/rdma/hfi1/opa_compat.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _LINUX_H
-#define _LINUX_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
- * This header file is for OPA-specific definitions which are
- * required by the HFI driver, and which aren't yet in the Linux
- * IB core. We'll collect these all here, then merge them into
- * the kernel when that's convenient.
- */
-
-/* OPA SMA attribute IDs */
-#define OPA_ATTRIB_ID_CONGESTION_INFO cpu_to_be16(0x008b)
-#define OPA_ATTRIB_ID_HFI_CONGESTION_LOG cpu_to_be16(0x008f)
-#define OPA_ATTRIB_ID_HFI_CONGESTION_SETTING cpu_to_be16(0x0090)
-#define OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0091)
-
-/* OPA PMA attribute IDs */
-#define OPA_PM_ATTRIB_ID_PORT_STATUS cpu_to_be16(0x0040)
-#define OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS cpu_to_be16(0x0041)
-#define OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS cpu_to_be16(0x0042)
-#define OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS cpu_to_be16(0x0043)
-#define OPA_PM_ATTRIB_ID_ERROR_INFO cpu_to_be16(0x0044)
-
-/* OPA status codes */
-#define OPA_PM_STATUS_REQUEST_TOO_LARGE cpu_to_be16(0x100)
-
-static inline u8 port_states_to_logical_state(struct opa_port_states *ps)
-{
- return ps->portphysstate_portstate & OPA_PI_MASK_PORT_STATE;
-}
-
-static inline u8 port_states_to_phys_state(struct opa_port_states *ps)
-{
- return ((ps->portphysstate_portstate &
- OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4) & 0xf;
-}
-
-/*
- * OPA port physical states
- * IB Volume 1, Table 146 PortInfo/IB Volume 2 Section 5.4.2(1) PortPhysState
- * values.
- *
- * When writing, only values 0-3 are valid, other values are ignored.
- * When reading, 0 is reserved.
- *
- * Returned by the ibphys_portstate() routine.
- */
-enum opa_port_phys_state {
- IB_PORTPHYSSTATE_NOP = 0,
- /* 1 is reserved */
- IB_PORTPHYSSTATE_POLLING = 2,
- IB_PORTPHYSSTATE_DISABLED = 3,
- IB_PORTPHYSSTATE_TRAINING = 4,
- IB_PORTPHYSSTATE_LINKUP = 5,
- IB_PORTPHYSSTATE_LINK_ERROR_RECOVERY = 6,
- IB_PORTPHYSSTATE_PHY_TEST = 7,
- /* 8 is reserved */
- OPA_PORTPHYSSTATE_OFFLINE = 9,
- OPA_PORTPHYSSTATE_GANGED = 10,
- OPA_PORTPHYSSTATE_TEST = 11,
- OPA_PORTPHYSSTATE_MAX = 11,
- /* values 12-15 are reserved/ignored */
-};
-
-#endif /* _LINUX_H */
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c
deleted file mode 100644
index 0bac21e6a..000000000
--- a/drivers/staging/rdma/hfi1/pcie.c
+++ /dev/null
@@ -1,1338 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/aer.h>
-#include <linux/module.h>
-
-#include "hfi.h"
-#include "chip_registers.h"
-#include "aspm.h"
-
-/* link speed vector for Gen3 speed - not in Linux headers */
-#define GEN1_SPEED_VECTOR 0x1
-#define GEN2_SPEED_VECTOR 0x2
-#define GEN3_SPEED_VECTOR 0x3
-
-/*
- * This file contains PCIe utility routines.
- */
-
-/*
- * Code to adjust PCIe capabilities.
- */
-static void tune_pcie_caps(struct hfi1_devdata *);
-
-/*
- * Do all the common PCIe setup and initialization.
- * devdata is not yet allocated, and is not allocated until after this
- * routine returns success. Therefore dd_dev_err() can't be used for error
- * printing.
- */
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- /*
- * This can happen (in theory) iff:
- * We did a chip reset, and then failed to reprogram the
- * BAR, or the chip reset due to an internal error. We then
- * unloaded the driver and reloaded it.
- *
- * Both reset cases set the BAR back to initial state. For
- * the latter case, the AER sticky error bit at offset 0x718
- * should be set, but the Linux kernel doesn't yet know
- * about that, it appears. If the original BAR was retained
- * in the kernel data structures, this may be OK.
- */
- hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n",
- -ret);
- goto done;
- }
-
- ret = pci_request_regions(pdev, DRIVER_NAME);
- if (ret) {
- hfi1_early_err(&pdev->dev,
- "pci_request_regions fails: err %d\n", -ret);
- goto bail;
- }
-
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- /*
- * If the 64 bit setup fails, try 32 bit. Some systems
- * do not setup 64 bit maps on systems with 2GB or less
- * memory installed.
- */
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA mask: %d\n", ret);
- goto bail;
- }
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- }
- if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA consistent mask: %d\n", ret);
- goto bail;
- }
-
- pci_set_master(pdev);
- (void)pci_enable_pcie_error_reporting(pdev);
- goto done;
-
-bail:
- hfi1_pcie_cleanup(pdev);
-done:
- return ret;
-}
-
-/*
- * Clean what was done in hfi1_pcie_init()
- */
-void hfi1_pcie_cleanup(struct pci_dev *pdev)
-{
- pci_disable_device(pdev);
- /*
- * Release regions should be called after the disable. OK to
- * call if request regions has not been called or failed.
- */
- pci_release_regions(pdev);
-}
-
-/*
- * Do remaining PCIe setup, once dd is allocated, and save away
- * fields required to re-initialize after a chip reset, or for
- * various other purposes
- */
-int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- unsigned long len;
- resource_size_t addr;
-
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
- addr = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
-
- /*
- * The TXE PIO buffers are at the tail end of the chip space.
- * Cut them off and map them separately.
- */
-
- /* sanity check vs expectations */
- if (len != TXE_PIO_SEND + TXE_PIO_SIZE) {
- dd_dev_err(dd, "chip PIO range does not match\n");
- return -EINVAL;
- }
-
- dd->kregbase = ioremap_nocache(addr, TXE_PIO_SEND);
- if (!dd->kregbase)
- return -ENOMEM;
-
- dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE);
- if (!dd->piobase) {
- iounmap(dd->kregbase);
- return -ENOMEM;
- }
-
- dd->flags |= HFI1_PRESENT; /* now register routines work */
-
- dd->kregend = dd->kregbase + TXE_PIO_SEND;
- dd->physaddr = addr; /* used for io_remap, etc. */
-
- /*
- * Re-map the chip's RcvArray as write-combining to allow us
- * to write an entire cacheline worth of entries in one shot.
- * If this re-map fails, just continue - the RcvArray programming
- * function will handle both cases.
- */
- dd->chip_rcv_array_count = read_csr(dd, RCV_ARRAY_CNT);
- dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
- dd->chip_rcv_array_count * 8);
- dd_dev_info(dd, "WC Remapped RcvArray: %p\n", dd->rcvarray_wc);
- /*
- * Save BARs and command to rewrite after device reset.
- */
- dd->pcibar0 = addr;
- dd->pcibar1 = addr >> 32;
- pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
- &dd->pcie_devctl2);
- pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
- pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, &dd->pci_lnkctl3);
- pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
-
- return 0;
-}
-
-/*
- * Do PCIe cleanup related to dd, after chip-specific cleanup, etc. Just prior
- * to releasing the dd memory.
- * Void because all of the core pcie cleanup functions are void.
- */
-void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
-{
- u64 __iomem *base = (void __iomem *)dd->kregbase;
-
- dd->flags &= ~HFI1_PRESENT;
- dd->kregbase = NULL;
- iounmap(base);
- if (dd->rcvarray_wc)
- iounmap(dd->rcvarray_wc);
- if (dd->piobase)
- iounmap(dd->piobase);
-}
-
-/*
- * Do a Function Level Reset (FLR) on the device.
- * Based on static function drivers/pci/pci.c:pcie_flr().
- */
-void hfi1_pcie_flr(struct hfi1_devdata *dd)
-{
- int i;
- u16 status;
-
- /* no need to check for the capability - we know the device has it */
-
- /* wait for Transaction Pending bit to clear, at most a few ms */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dd_dev_err(dd, "Transaction Pending bit is not clearing, proceeding with reset anyway\n");
-
-clear:
- pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
- /* PCIe spec requires the function to be back within 100ms */
- msleep(100);
-}
-
-static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
- struct hfi1_msix_entry *hfi1_msix_entry)
-{
- int ret;
- int nvec = *msixcnt;
- struct msix_entry *msix_entry;
- int i;
-
- /*
- * We can't pass hfi1_msix_entry array to msix_setup
- * so use a dummy msix_entry array and copy the allocated
- * irq back to the hfi1_msix_entry array.
- */
- msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL);
- if (!msix_entry) {
- ret = -ENOMEM;
- goto do_intx;
- }
-
- for (i = 0; i < nvec; i++)
- msix_entry[i] = hfi1_msix_entry[i].msix;
-
- ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
- if (ret < 0)
- goto free_msix_entry;
- nvec = ret;
-
- for (i = 0; i < nvec; i++)
- hfi1_msix_entry[i].msix = msix_entry[i];
-
- kfree(msix_entry);
- *msixcnt = nvec;
- return;
-
-free_msix_entry:
- kfree(msix_entry);
-
-do_intx:
- dd_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
- nvec, ret);
- *msixcnt = 0;
- hfi1_enable_intx(dd->pcidev);
-}
-
-/* return the PCIe link speed from the given link status */
-static u32 extract_speed(u16 linkstat)
-{
- u32 speed;
-
- switch (linkstat & PCI_EXP_LNKSTA_CLS) {
- default: /* not defined, assume Gen1 */
- case PCI_EXP_LNKSTA_CLS_2_5GB:
- speed = 2500; /* Gen 1, 2.5GHz */
- break;
- case PCI_EXP_LNKSTA_CLS_5_0GB:
- speed = 5000; /* Gen 2, 5GHz */
- break;
- case GEN3_SPEED_VECTOR:
- speed = 8000; /* Gen 3, 8GHz */
- break;
- }
- return speed;
-}
-
-/* return the PCIe link speed from the given link status */
-static u32 extract_width(u16 linkstat)
-{
- return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
-}
-
-/* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
-static void update_lbus_info(struct hfi1_devdata *dd)
-{
- u16 linkstat;
-
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
- dd->lbus_width = extract_width(linkstat);
- dd->lbus_speed = extract_speed(linkstat);
- snprintf(dd->lbus_info, sizeof(dd->lbus_info),
- "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
-}
-
-/*
- * Read in the current PCIe link width and speed. Find if the link is
- * Gen3 capable.
- */
-int pcie_speeds(struct hfi1_devdata *dd)
-{
- u32 linkcap;
- struct pci_dev *parent = dd->pcidev->bus->self;
-
- if (!pci_is_pcie(dd->pcidev)) {
- dd_dev_err(dd, "Can't find PCI Express capability!\n");
- return -EINVAL;
- }
-
- /* find if our max speed is Gen3 and parent supports Gen3 speeds */
- dd->link_gen3_capable = 1;
-
- pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
- if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
- dd_dev_info(dd,
- "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
- linkcap & PCI_EXP_LNKCAP_SLS);
- dd->link_gen3_capable = 0;
- }
-
- /*
- * bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
- */
- if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
- dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
- dd->link_gen3_capable = 0;
- }
-
- /* obtain the link width and current speed */
- update_lbus_info(dd);
-
- dd_dev_info(dd, "%s\n", dd->lbus_info);
-
- return 0;
-}
-
-/*
- * Returns in *nent:
- * - actual number of interrupts allocated
- * - 0 if fell back to INTx.
- */
-void request_msix(struct hfi1_devdata *dd, u32 *nent,
- struct hfi1_msix_entry *entry)
-{
- int pos;
-
- pos = dd->pcidev->msix_cap;
- if (*nent && pos) {
- msix_setup(dd, pos, nent, entry);
- /* did it, either MSI-X or INTx */
- } else {
- *nent = 0;
- hfi1_enable_intx(dd->pcidev);
- }
-
- tune_pcie_caps(dd);
-}
-
-void hfi1_enable_intx(struct pci_dev *pdev)
-{
- /* first, turn on INTx */
- pci_intx(pdev, 1);
- /* then turn off MSI-X */
- pci_disable_msix(pdev);
-}
-
-/* restore command and BARs after a reset has wiped them out */
-void restore_pci_variables(struct hfi1_devdata *dd)
-{
- pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0);
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->pcibar1);
- pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
- dd->pcie_devctl2);
- pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
- pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, dd->pci_lnkctl3);
- pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
-}
-
-/*
- * BIOS may not set PCIe bus-utilization parameters for best performance.
- * Check and optionally adjust them to maximize our throughput.
- */
-static int hfi1_pcie_caps;
-module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
-
-uint aspm_mode = ASPM_MODE_DISABLED;
-module_param_named(aspm, aspm_mode, uint, S_IRUGO);
-MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
-
-static void tune_pcie_caps(struct hfi1_devdata *dd)
-{
- struct pci_dev *parent;
- u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
- u16 rc_mrrs, ep_mrrs, max_mrrs, ectl;
-
- /*
- * Turn on extended tags in DevCtl in case the BIOS has turned it off
- * to improve WFR SDMA bandwidth
- */
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
- if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
- dd_dev_info(dd, "Enabling PCIe extended tags\n");
- ectl |= PCI_EXP_DEVCTL_EXT_TAG;
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
- }
- /* Find out supported and configured values for parent (root) */
- parent = dd->pcidev->bus->self;
- /*
- * The driver cannot perform the tuning if it does not have
- * access to the upstream component.
- */
- if (!parent)
- return;
- if (!pci_is_root_bus(parent->bus)) {
- dd_dev_info(dd, "Parent not root\n");
- return;
- }
-
- if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- return;
- rc_mpss = parent->pcie_mpss;
- rc_mps = ffs(pcie_get_mps(parent)) - 8;
- /* Find out supported and configured values for endpoint (us) */
- ep_mpss = dd->pcidev->pcie_mpss;
- ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
-
- /* Find max payload supported by root, endpoint */
- if (rc_mpss > ep_mpss)
- rc_mpss = ep_mpss;
-
- /* If Supported greater than limit in module param, limit it */
- if (rc_mpss > (hfi1_pcie_caps & 7))
- rc_mpss = hfi1_pcie_caps & 7;
- /* If less than (allowed, supported), bump root payload */
- if (rc_mpss > rc_mps) {
- rc_mps = rc_mpss;
- pcie_set_mps(parent, 128 << rc_mps);
- }
- /* If less than (allowed, supported), bump endpoint payload */
- if (rc_mpss > ep_mps) {
- ep_mps = rc_mpss;
- pcie_set_mps(dd->pcidev, 128 << ep_mps);
- }
-
- /*
- * Now the Read Request size.
- * No field for max supported, but PCIe spec limits it to 4096,
- * which is code '5' (log2(4096) - 7)
- */
- max_mrrs = 5;
- if (max_mrrs > ((hfi1_pcie_caps >> 4) & 7))
- max_mrrs = (hfi1_pcie_caps >> 4) & 7;
-
- max_mrrs = 128 << max_mrrs;
- rc_mrrs = pcie_get_readrq(parent);
- ep_mrrs = pcie_get_readrq(dd->pcidev);
-
- if (max_mrrs > rc_mrrs) {
- rc_mrrs = max_mrrs;
- pcie_set_readrq(parent, rc_mrrs);
- }
- if (max_mrrs > ep_mrrs) {
- ep_mrrs = max_mrrs;
- pcie_set_readrq(dd->pcidev, ep_mrrs);
- }
-}
-
-/* End of PCIe capability tuning */
-
-/*
- * From here through hfi1_pci_err_handler definition is invoked via
- * PCI error infrastructure, registered via pci
- */
-static pci_ers_result_t
-pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
-{
- struct hfi1_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- switch (state) {
- case pci_channel_io_normal:
- dd_dev_info(dd, "State Normal, ignoring\n");
- break;
-
- case pci_channel_io_frozen:
- dd_dev_info(dd, "State Frozen, requesting reset\n");
- pci_disable_device(pdev);
- ret = PCI_ERS_RESULT_NEED_RESET;
- break;
-
- case pci_channel_io_perm_failure:
- if (dd) {
- dd_dev_info(dd, "State Permanent Failure, disabling\n");
- /* no more register accesses! */
- dd->flags &= ~HFI1_PRESENT;
- hfi1_disable_after_error(dd);
- }
- /* else early, or other problem */
- ret = PCI_ERS_RESULT_DISCONNECT;
- break;
-
- default: /* shouldn't happen */
- dd_dev_info(dd, "HFI1 PCI errors detected (state %d)\n",
- state);
- break;
- }
- return ret;
-}
-
-static pci_ers_result_t
-pci_mmio_enabled(struct pci_dev *pdev)
-{
- u64 words = 0U;
- struct hfi1_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- if (dd && dd->pport) {
- words = read_port_cntr(dd->pport, C_RX_WORDS, CNTR_INVALID_VL);
- if (words == ~0ULL)
- ret = PCI_ERS_RESULT_NEED_RESET;
- dd_dev_info(dd,
- "HFI1 mmio_enabled function called, read wordscntr %Lx, returning %d\n",
- words, ret);
- }
- return ret;
-}
-
-static pci_ers_result_t
-pci_slot_reset(struct pci_dev *pdev)
-{
- struct hfi1_devdata *dd = pci_get_drvdata(pdev);
-
- dd_dev_info(dd, "HFI1 slot_reset function called, ignored\n");
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static pci_ers_result_t
-pci_link_reset(struct pci_dev *pdev)
-{
- struct hfi1_devdata *dd = pci_get_drvdata(pdev);
-
- dd_dev_info(dd, "HFI1 link_reset function called, ignored\n");
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static void
-pci_resume(struct pci_dev *pdev)
-{
- struct hfi1_devdata *dd = pci_get_drvdata(pdev);
-
- dd_dev_info(dd, "HFI1 resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
- /*
- * Running jobs will fail, since it's asynchronous
- * unlike sysfs-requested reset. Better than
- * doing nothing.
- */
- hfi1_init(dd, 1); /* same as re-init after reset */
-}
-
-const struct pci_error_handlers hfi1_pci_err_handler = {
- .error_detected = pci_error_detected,
- .mmio_enabled = pci_mmio_enabled,
- .link_reset = pci_link_reset,
- .slot_reset = pci_slot_reset,
- .resume = pci_resume,
-};
-
-/*============================================================================*/
-/* PCIe Gen3 support */
-
-/*
- * This code is separated out because it is expected to be removed in the
- * final shipping product. If not, then it will be revisited and items
- * will be moved to more standard locations.
- */
-
-/* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_STS field values */
-#define DL_STATUS_HFI0 0x1 /* hfi0 firmware download complete */
-#define DL_STATUS_HFI1 0x2 /* hfi1 firmware download complete */
-#define DL_STATUS_BOTH 0x3 /* hfi0 and hfi1 firmware download complete */
-
-/* ASIC_PCI_SD_HOST_STATUS.FW_DNLD_ERR field values */
-#define DL_ERR_NONE 0x0 /* no error */
-#define DL_ERR_SWAP_PARITY 0x1 /* parity error in SerDes interrupt */
- /* or response data */
-#define DL_ERR_DISABLED 0x2 /* hfi disabled */
-#define DL_ERR_SECURITY 0x3 /* security check failed */
-#define DL_ERR_SBUS 0x4 /* SBus status error */
-#define DL_ERR_XFR_PARITY 0x5 /* parity error during ROM transfer*/
-
-/* gasket block secondary bus reset delay */
-#define SBR_DELAY_US 200000 /* 200ms */
-
-/* mask for PCIe capability register lnkctl2 target link speed */
-#define LNKCTL2_TARGET_LINK_SPEED_MASK 0xf
-
-static uint pcie_target = 3;
-module_param(pcie_target, uint, S_IRUGO);
-MODULE_PARM_DESC(pcie_target, "PCIe target speed (0 skip, 1-3 Gen1-3)");
-
-static uint pcie_force;
-module_param(pcie_force, uint, S_IRUGO);
-MODULE_PARM_DESC(pcie_force, "Force driver to do a PCIe firmware download even if already at target speed");
-
-static uint pcie_retry = 5;
-module_param(pcie_retry, uint, S_IRUGO);
-MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested speed");
-
-#define UNSET_PSET 255
-#define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
-#define DEFAULT_MCP_PSET 4 /* MCP HFI */
-static uint pcie_pset = UNSET_PSET;
-module_param(pcie_pset, uint, S_IRUGO);
-MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
-
-/* equalization columns */
-#define PREC 0
-#define ATTN 1
-#define POST 2
-
-/* discrete silicon preliminary equalization values */
-static const u8 discrete_preliminary_eq[11][3] = {
- /* prec attn post */
- { 0x00, 0x00, 0x12 }, /* p0 */
- { 0x00, 0x00, 0x0c }, /* p1 */
- { 0x00, 0x00, 0x0f }, /* p2 */
- { 0x00, 0x00, 0x09 }, /* p3 */
- { 0x00, 0x00, 0x00 }, /* p4 */
- { 0x06, 0x00, 0x00 }, /* p5 */
- { 0x09, 0x00, 0x00 }, /* p6 */
- { 0x06, 0x00, 0x0f }, /* p7 */
- { 0x09, 0x00, 0x09 }, /* p8 */
- { 0x0c, 0x00, 0x00 }, /* p9 */
- { 0x00, 0x00, 0x18 }, /* p10 */
-};
-
-/* integrated silicon preliminary equalization values */
-static const u8 integrated_preliminary_eq[11][3] = {
- /* prec attn post */
- { 0x00, 0x1e, 0x07 }, /* p0 */
- { 0x00, 0x1e, 0x05 }, /* p1 */
- { 0x00, 0x1e, 0x06 }, /* p2 */
- { 0x00, 0x1e, 0x04 }, /* p3 */
- { 0x00, 0x1e, 0x00 }, /* p4 */
- { 0x03, 0x1e, 0x00 }, /* p5 */
- { 0x04, 0x1e, 0x00 }, /* p6 */
- { 0x03, 0x1e, 0x06 }, /* p7 */
- { 0x03, 0x1e, 0x04 }, /* p8 */
- { 0x05, 0x1e, 0x00 }, /* p9 */
- { 0x00, 0x1e, 0x0a }, /* p10 */
-};
-
-/* helper to format the value to write to hardware */
-#define eq_value(pre, curr, post) \
- ((((u32)(pre)) << \
- PCIE_CFG_REG_PL102_GEN3_EQ_PRE_CURSOR_PSET_SHIFT) \
- | (((u32)(curr)) << PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT) \
- | (((u32)(post)) << \
- PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT))
-
-/*
- * Load the given EQ preset table into the PCIe hardware.
- */
-static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
- u8 div)
-{
- struct pci_dev *pdev = dd->pcidev;
- u32 hit_error = 0;
- u32 violation;
- u32 i;
- u8 c_minus1, c0, c_plus1;
-
- for (i = 0; i < 11; i++) {
- /* set index */
- pci_write_config_dword(pdev, PCIE_CFG_REG_PL103, i);
- /* write the value */
- c_minus1 = eq[i][PREC] / div;
- c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div);
- c_plus1 = eq[i][POST] / div;
- pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
- eq_value(c_minus1, c0, c_plus1));
- /* check if these coefficients violate EQ rules */
- pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105,
- &violation);
- if (violation
- & PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
- if (hit_error == 0) {
- dd_dev_err(dd,
- "Gen3 EQ Table Coefficient rule violations\n");
- dd_dev_err(dd, " prec attn post\n");
- }
- dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
- i, (u32)eq[i][0], (u32)eq[i][1],
- (u32)eq[i][2]);
- dd_dev_err(dd, " %02x %02x %02x\n",
- (u32)c_minus1, (u32)c0, (u32)c_plus1);
- hit_error = 1;
- }
- }
- if (hit_error)
- return -EINVAL;
- return 0;
-}
-
-/*
- * Steps to be done after the PCIe firmware is downloaded and
- * before the SBR for the Pcie Gen3.
- * The SBus resource is already being held.
- */
-static void pcie_post_steps(struct hfi1_devdata *dd)
-{
- int i;
-
- set_sbus_fast_mode(dd);
- /*
- * Write to the PCIe PCSes to set the G3_LOCKED_NEXT bits to 1.
- * This avoids a spurious framing error that can otherwise be
- * generated by the MAC layer.
- *
- * Use individual addresses since no broadcast is set up.
- */
- for (i = 0; i < NUM_PCIE_SERDES; i++) {
- sbus_request(dd, pcie_pcs_addrs[dd->hfi1_id][i],
- 0x03, WRITE_SBUS_RECEIVER, 0x00022132);
- }
-
- clear_sbus_fast_mode(dd);
-}
-
-/*
- * Trigger a secondary bus reset (SBR) on ourselves using our parent.
- *
- * Based on pci_parent_bus_reset() which is not exported by the
- * kernel core.
- */
-static int trigger_sbr(struct hfi1_devdata *dd)
-{
- struct pci_dev *dev = dd->pcidev;
- struct pci_dev *pdev;
-
- /* need a parent */
- if (!dev->bus->self) {
- dd_dev_err(dd, "%s: no parent device\n", __func__);
- return -ENOTTY;
- }
-
- /* should not be anyone else on the bus */
- list_for_each_entry(pdev, &dev->bus->devices, bus_list)
- if (pdev != dev) {
- dd_dev_err(dd,
- "%s: another device is on the same bus\n",
- __func__);
- return -ENOTTY;
- }
-
- /*
- * A secondary bus reset (SBR) issues a hot reset to our device.
- * The following routine does a 1s wait after the reset is dropped
- * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 -
- * Conventional Reset, paragraph 3, line 35 also says that a 1s
- * delay after a reset is required. Per spec requirements,
- * the link is either working or not after that point.
- */
- pci_reset_bridge_secondary_bus(dev->bus->self);
-
- return 0;
-}
-
-/*
- * Write the given gasket interrupt register.
- */
-static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
- u16 code, u16 data)
-{
- write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
- (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) |
- ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
-}
-
-/*
- * Tell the gasket logic how to react to the reset.
- */
-static void arm_gasket_logic(struct hfi1_devdata *dd)
-{
- u64 reg;
-
- reg = (((u64)1 << dd->hfi1_id) <<
- ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) |
- ((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
- ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT |
- ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK |
- ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) <<
- ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT);
- write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
- /* read back to push the write */
- read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
-}
-
-/*
- * CCE_PCIE_CTRL long name helpers
- * We redefine these shorter macros to use in the code while leaving
- * chip_registers.h to be autogenerated from the hardware spec.
- */
-#define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK
-#define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT
-#define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK
-#define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT
-#define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT
-#define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT
-#define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK
-#define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT
-#define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK
-#define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT
-
- /*
- * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C).
- */
-static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname)
-{
- u64 pcie_ctrl;
- u64 xmt_margin;
- u64 xmt_margin_oe;
- u64 lane_delay;
- u64 lane_bundle;
-
- pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL);
-
- /*
- * For Discrete, use full-swing.
- * - PCIe TX defaults to full-swing.
- * Leave this register as default.
- * For Integrated, use half-swing
- * - Copy xmt_margin and xmt_margin_oe
- * from Gen1/Gen2 to Gen3.
- */
- if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */
- /* extract initial fields */
- xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT)
- & MARGIN_GEN1_GEN2_MASK;
- xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT)
- & MARGIN_G1_G2_OVERWRITE_MASK;
- lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK;
- lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT)
- & LANE_BUNDLE_MASK;
-
- /*
- * For A0, EFUSE values are not set. Override with the
- * correct values.
- */
- if (is_ax(dd)) {
- /*
- * xmt_margin and OverwiteEnabel should be the
- * same for Gen1/Gen2 and Gen3
- */
- xmt_margin = 0x5;
- xmt_margin_oe = 0x1;
- lane_delay = 0xF; /* Delay 240ns. */
- lane_bundle = 0x0; /* Set to 1 lane. */
- }
-
- /* overwrite existing values */
- pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT)
- | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT)
- | (xmt_margin << MARGIN_SHIFT)
- | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT)
- | (lane_delay << LANE_DELAY_SHIFT)
- | (lane_bundle << LANE_BUNDLE_SHIFT);
-
- write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl);
- }
-
- dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n",
- fname, pcie_ctrl);
-}
-
-/*
- * Do all the steps needed to transition the PCIe link to Gen3 speed.
- */
-int do_pcie_gen3_transition(struct hfi1_devdata *dd)
-{
- struct pci_dev *parent = dd->pcidev->bus->self;
- u64 fw_ctrl;
- u64 reg, therm;
- u32 reg32, fs, lf;
- u32 status, err;
- int ret;
- int do_retry, retry_count = 0;
- uint default_pset;
- u16 target_vector, target_speed;
- u16 lnkctl2, vendor;
- u8 div;
- const u8 (*eq)[3];
- int return_error = 0;
-
- /* PCIe Gen3 is for the ASIC only */
- if (dd->icode != ICODE_RTL_SILICON)
- return 0;
-
- if (pcie_target == 1) { /* target Gen1 */
- target_vector = GEN1_SPEED_VECTOR;
- target_speed = 2500;
- } else if (pcie_target == 2) { /* target Gen2 */
- target_vector = GEN2_SPEED_VECTOR;
- target_speed = 5000;
- } else if (pcie_target == 3) { /* target Gen3 */
- target_vector = GEN3_SPEED_VECTOR;
- target_speed = 8000;
- } else {
- /* off or invalid target - skip */
- dd_dev_info(dd, "%s: Skipping PCIe transition\n", __func__);
- return 0;
- }
-
- /* if already at target speed, done (unless forced) */
- if (dd->lbus_speed == target_speed) {
- dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
- pcie_target,
- pcie_force ? "re-doing anyway" : "skipping");
- if (!pcie_force)
- return 0;
- }
-
- /*
- * The driver cannot do the transition if it has no access to the
- * upstream component
- */
- if (!parent) {
- dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n",
- __func__);
- return 0;
- }
-
- /*
- * Do the Gen3 transition. Steps are those of the PCIe Gen3
- * recipe.
- */
-
- /* step 1: pcie link working in gen1/gen2 */
-
- /* step 2: if either side is not capable of Gen3, done */
- if (pcie_target == 3 && !dd->link_gen3_capable) {
- dd_dev_err(dd, "The PCIe link is not Gen3 capable\n");
- ret = -ENOSYS;
- goto done_no_mutex;
- }
-
- /* hold the SBus resource across the firmware download and SBR */
- ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
- if (ret) {
- dd_dev_err(dd, "%s: unable to acquire SBus resource\n",
- __func__);
- return ret;
- }
-
- /* make sure thermal polling is not causing interrupts */
- therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN);
- if (therm) {
- write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
- msleep(100);
- dd_dev_info(dd, "%s: Disabled therm polling\n",
- __func__);
- }
-
-retry:
- /* the SBus download will reset the spico for thermal */
-
- /* step 3: download SBus Master firmware */
- /* step 4: download PCIe Gen3 SerDes firmware */
- dd_dev_info(dd, "%s: downloading firmware\n", __func__);
- ret = load_pcie_firmware(dd);
- if (ret) {
- /* do not proceed if the firmware cannot be downloaded */
- return_error = 1;
- goto done;
- }
-
- /* step 5: set up device parameter settings */
- dd_dev_info(dd, "%s: setting PCIe registers\n", __func__);
-
- /*
- * PcieCfgSpcie1 - Link Control 3
- * Leave at reset value. No need to set PerfEq - link equalization
- * will be performed automatically after the SBR when the target
- * speed is 8GT/s.
- */
-
- /* clear all 16 per-lane error bits (PCIe: Lane Error Status) */
- pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, 0xffff);
-
- /* step 5a: Set Synopsys Port Logic registers */
-
- /*
- * PcieCfgRegPl2 - Port Force Link
- *
- * Set the low power field to 0x10 to avoid unnecessary power
- * management messages. All other fields are zero.
- */
- reg32 = 0x10ul << PCIE_CFG_REG_PL2_LOW_PWR_ENT_CNT_SHIFT;
- pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL2, reg32);
-
- /*
- * PcieCfgRegPl100 - Gen3 Control
- *
- * turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl
- * turn on PcieCfgRegPl100.EqEieosCnt
- * Everything else zero.
- */
- reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK;
- pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL100, reg32);
-
- /*
- * PcieCfgRegPl101 - Gen3 EQ FS and LF
- * PcieCfgRegPl102 - Gen3 EQ Presets to Coefficients Mapping
- * PcieCfgRegPl103 - Gen3 EQ Preset Index
- * PcieCfgRegPl105 - Gen3 EQ Status
- *
- * Give initial EQ settings.
- */
- if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0) { /* discrete */
- /* 1000mV, FS=24, LF = 8 */
- fs = 24;
- lf = 8;
- div = 3;
- eq = discrete_preliminary_eq;
- default_pset = DEFAULT_DISCRETE_PSET;
- } else {
- /* 400mV, FS=29, LF = 9 */
- fs = 29;
- lf = 9;
- div = 1;
- eq = integrated_preliminary_eq;
- default_pset = DEFAULT_MCP_PSET;
- }
- pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
- (fs <<
- PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) |
- (lf <<
- PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
- ret = load_eq_table(dd, eq, fs, div);
- if (ret)
- goto done;
-
- /*
- * PcieCfgRegPl106 - Gen3 EQ Control
- *
- * Set Gen3EqPsetReqVec, leave other fields 0.
- */
- if (pcie_pset == UNSET_PSET)
- pcie_pset = default_pset;
- if (pcie_pset > 10) { /* valid range is 0-10, inclusive */
- dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
- __func__, pcie_pset, default_pset);
- pcie_pset = default_pset;
- }
- dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset);
- pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
- ((1 << pcie_pset) <<
- PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) |
- PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK |
- PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
-
- /*
- * step 5b: Do post firmware download steps via SBus
- */
- dd_dev_info(dd, "%s: doing pcie post steps\n", __func__);
- pcie_post_steps(dd);
-
- /*
- * step 5c: Program gasket interrupts
- */
- /* set the Rx Bit Rate to REFCLK ratio */
- write_gasket_interrupt(dd, 0, 0x0006, 0x0050);
- /* disable pCal for PCIe Gen3 RX equalization */
- write_gasket_interrupt(dd, 1, 0x0026, 0x5b01);
- /*
- * Enable iCal for PCIe Gen3 RX equalization, and set which
- * evaluation of RX_EQ_EVAL will launch the iCal procedure.
- */
- write_gasket_interrupt(dd, 2, 0x0026, 0x5202);
- /* terminate list */
- write_gasket_interrupt(dd, 3, 0x0000, 0x0000);
-
- /*
- * step 5d: program XMT margin
- */
- write_xmt_margin(dd, __func__);
-
- /*
- * step 5e: disable active state power management (ASPM). It
- * will be enabled if required later
- */
- dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
- aspm_hw_disable_l1(dd);
-
- /*
- * step 5f: clear DirectSpeedChange
- * PcieCfgRegPl67.DirectSpeedChange must be zero to prevent the
- * change in the speed target from starting before we are ready.
- * This field defaults to 0 and we are not changing it, so nothing
- * needs to be done.
- */
-
- /* step 5g: Set target link speed */
- /*
- * Set target link speed to be target on both device and parent.
- * On setting the parent: Some system BIOSs "helpfully" set the
- * parent target speed to Gen2 to match the ASIC's initial speed.
- * We can set the target Gen3 because we have already checked
- * that it is Gen3 capable earlier.
- */
- dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
- pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
- dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
- /* only write to parent if target is not as high as ours */
- if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
- lnkctl2 |= target_vector;
- dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2);
- } else {
- dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
- }
-
- dd_dev_info(dd, "%s: setting target link speed\n", __func__);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
- dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
- lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
- lnkctl2 |= target_vector;
- dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
-
- /* step 5h: arm gasket logic */
- /* hold DC in reset across the SBR */
- write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
- (void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
- /* save firmware control across the SBR */
- fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
-
- dd_dev_info(dd, "%s: arming gasket logic\n", __func__);
- arm_gasket_logic(dd);
-
- /*
- * step 6: quiesce PCIe link
- * The chip has already been reset, so there will be no traffic
- * from the chip. Linux has no easy way to enforce that it will
- * not try to access the device, so we just need to hope it doesn't
- * do it while we are doing the reset.
- */
-
- /*
- * step 7: initiate the secondary bus reset (SBR)
- * step 8: hardware brings the links back up
- * step 9: wait for link speed transition to be complete
- */
- dd_dev_info(dd, "%s: calling trigger_sbr\n", __func__);
- ret = trigger_sbr(dd);
- if (ret)
- goto done;
-
- /* step 10: decide what to do next */
-
- /* check if we can read PCI space */
- ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
- if (ret) {
- dd_dev_info(dd,
- "%s: read of VendorID failed after SBR, err %d\n",
- __func__, ret);
- return_error = 1;
- goto done;
- }
- if (vendor == 0xffff) {
- dd_dev_info(dd, "%s: VendorID is all 1s after SBR\n", __func__);
- return_error = 1;
- ret = -EIO;
- goto done;
- }
-
- /* restore PCI space registers we know were reset */
- dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__);
- restore_pci_variables(dd);
- /* restore firmware control */
- write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl);
-
- /*
- * Check the gasket block status.
- *
- * This is the first CSR read after the SBR. If the read returns
- * all 1s (fails), the link did not make it back.
- *
- * Once we're sure we can read and write, clear the DC reset after
- * the SBR. Then check for any per-lane errors. Then look over
- * the status.
- */
- reg = read_csr(dd, ASIC_PCIE_SD_HOST_STATUS);
- dd_dev_info(dd, "%s: gasket block status: 0x%llx\n", __func__, reg);
- if (reg == ~0ull) { /* PCIe read failed/timeout */
- dd_dev_err(dd, "SBR failed - unable to read from device\n");
- return_error = 1;
- ret = -ENOSYS;
- goto done;
- }
-
- /* clear the DC reset */
- write_csr(dd, CCE_DC_CTRL, 0);
-
- /* Set the LED off */
- setextled(dd, 0);
-
- /* check for any per-lane errors */
- pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32);
- dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32);
-
- /* extract status, look for our HFI */
- status = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_SHIFT)
- & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK;
- if ((status & (1 << dd->hfi1_id)) == 0) {
- dd_dev_err(dd,
- "%s: gasket status 0x%x, expecting 0x%x\n",
- __func__, status, 1 << dd->hfi1_id);
- ret = -EIO;
- goto done;
- }
-
- /* extract error */
- err = (reg >> ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_SHIFT)
- & ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_ERR_MASK;
- if (err) {
- dd_dev_err(dd, "%s: gasket error %d\n", __func__, err);
- ret = -EIO;
- goto done;
- }
-
- /* update our link information cache */
- update_lbus_info(dd);
- dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
- dd->lbus_info);
-
- if (dd->lbus_speed != target_speed) { /* not target */
- /* maybe retry */
- do_retry = retry_count < pcie_retry;
- dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
- pcie_target, do_retry ? ", retrying" : "");
- retry_count++;
- if (do_retry) {
- msleep(100); /* allow time to settle */
- goto retry;
- }
- ret = -EIO;
- }
-
-done:
- if (therm) {
- write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
- msleep(100);
- dd_dev_info(dd, "%s: Re-enable therm polling\n",
- __func__);
- }
- release_chip_resource(dd, CR_SBUS);
-done_no_mutex:
- /* return no error if it is OK to be at current speed */
- if (ret && !return_error) {
- dd_dev_err(dd, "Proceeding at current speed PCIe speed\n");
- ret = 0;
- }
-
- dd_dev_info(dd, "%s: done\n", __func__);
- return ret;
-}
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c
deleted file mode 100644
index c6849ce9e..000000000
--- a/drivers/staging/rdma/hfi1/pio.c
+++ /dev/null
@@ -1,2043 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/delay.h>
-#include "hfi.h"
-#include "qp.h"
-#include "trace.h"
-
-#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
-
-#define SC(name) SEND_CTXT_##name
-/*
- * Send Context functions
- */
-static void sc_wait_for_packet_egress(struct send_context *sc, int pause);
-
-/*
- * Set the CM reset bit and wait for it to clear. Use the provided
- * sendctrl register. This routine has no locking.
- */
-void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
-{
- write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK);
- while (1) {
- udelay(1);
- sendctrl = read_csr(dd, SEND_CTRL);
- if ((sendctrl & SEND_CTRL_CM_RESET_SMASK) == 0)
- break;
- }
-}
-
-/* defined in header release 48 and higher */
-#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
-#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
-#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
-#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
- << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
-#endif
-
-/* global control of PIO send */
-void pio_send_control(struct hfi1_devdata *dd, int op)
-{
- u64 reg, mask;
- unsigned long flags;
- int write = 1; /* write sendctrl back */
- int flush = 0; /* re-read sendctrl to make sure it is flushed */
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- reg = read_csr(dd, SEND_CTRL);
- switch (op) {
- case PSC_GLOBAL_ENABLE:
- reg |= SEND_CTRL_SEND_ENABLE_SMASK;
- /* Fall through */
- case PSC_DATA_VL_ENABLE:
- /* Disallow sending on VLs not enabled */
- mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
- SEND_CTRL_UNSUPPORTED_VL_SHIFT;
- reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
- break;
- case PSC_GLOBAL_DISABLE:
- reg &= ~SEND_CTRL_SEND_ENABLE_SMASK;
- break;
- case PSC_GLOBAL_VLARB_ENABLE:
- reg |= SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
- break;
- case PSC_GLOBAL_VLARB_DISABLE:
- reg &= ~SEND_CTRL_VL_ARBITER_ENABLE_SMASK;
- break;
- case PSC_CM_RESET:
- __cm_reset(dd, reg);
- write = 0; /* CSR already written (and flushed) */
- break;
- case PSC_DATA_VL_DISABLE:
- reg |= SEND_CTRL_UNSUPPORTED_VL_SMASK;
- flush = 1;
- break;
- default:
- dd_dev_err(dd, "%s: invalid control %d\n", __func__, op);
- break;
- }
-
- if (write) {
- write_csr(dd, SEND_CTRL, reg);
- if (flush)
- (void)read_csr(dd, SEND_CTRL); /* flush write */
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/* number of send context memory pools */
-#define NUM_SC_POOLS 2
-
-/* Send Context Size (SCS) wildcards */
-#define SCS_POOL_0 -1
-#define SCS_POOL_1 -2
-/* Send Context Count (SCC) wildcards */
-#define SCC_PER_VL -1
-#define SCC_PER_CPU -2
-
-#define SCC_PER_KRCVQ -3
-#define SCC_ACK_CREDITS 32
-
-#define PIO_WAIT_BATCH_SIZE 5
-
-/* default send context sizes */
-static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
- [SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */
- .count = SCC_PER_VL },/* one per NUMA */
- [SC_ACK] = { .size = SCC_ACK_CREDITS,
- .count = SCC_PER_KRCVQ },
- [SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */
- .count = SCC_PER_CPU }, /* one per CPU */
-
-};
-
-/* send context memory pool configuration */
-struct mem_pool_config {
- int centipercent; /* % of memory, in 100ths of 1% */
- int absolute_blocks; /* absolute block count */
-};
-
-/* default memory pool configuration: 100% in pool 0 */
-static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
- /* centi%, abs blocks */
- { 10000, -1 }, /* pool 0 */
- { 0, -1 }, /* pool 1 */
-};
-
-/* memory pool information, used when calculating final sizes */
-struct mem_pool_info {
- int centipercent; /*
- * 100th of 1% of memory to use, -1 if blocks
- * already set
- */
- int count; /* count of contexts in the pool */
- int blocks; /* block size of the pool */
- int size; /* context size, in blocks */
-};
-
-/*
- * Convert a pool wildcard to a valid pool index. The wildcards
- * start at -1 and increase negatively. Map them as:
- * -1 => 0
- * -2 => 1
- * etc.
- *
- * Return -1 on non-wildcard input, otherwise convert to a pool number.
- */
-static int wildcard_to_pool(int wc)
-{
- if (wc >= 0)
- return -1; /* non-wildcard */
- return -wc - 1;
-}
-
-static const char *sc_type_names[SC_MAX] = {
- "kernel",
- "ack",
- "user"
-};
-
-static const char *sc_type_name(int index)
-{
- if (index < 0 || index >= SC_MAX)
- return "unknown";
- return sc_type_names[index];
-}
-
-/*
- * Read the send context memory pool configuration and send context
- * size configuration. Replace any wildcards and come up with final
- * counts and sizes for the send context types.
- */
-int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
-{
- struct mem_pool_info mem_pool_info[NUM_SC_POOLS] = { { 0 } };
- int total_blocks = (dd->chip_pio_mem_size / PIO_BLOCK_SIZE) - 1;
- int total_contexts = 0;
- int fixed_blocks;
- int pool_blocks;
- int used_blocks;
- int cp_total; /* centipercent total */
- int ab_total; /* absolute block total */
- int extra;
- int i;
-
- /*
- * Step 0:
- * - copy the centipercents/absolute sizes from the pool config
- * - sanity check these values
- * - add up centipercents, then later check for full value
- * - add up absolute blocks, then later check for over-commit
- */
- cp_total = 0;
- ab_total = 0;
- for (i = 0; i < NUM_SC_POOLS; i++) {
- int cp = sc_mem_pool_config[i].centipercent;
- int ab = sc_mem_pool_config[i].absolute_blocks;
-
- /*
- * A negative value is "unused" or "invalid". Both *can*
- * be valid, but centipercent wins, so check that first
- */
- if (cp >= 0) { /* centipercent valid */
- cp_total += cp;
- } else if (ab >= 0) { /* absolute blocks valid */
- ab_total += ab;
- } else { /* neither valid */
- dd_dev_err(
- dd,
- "Send context memory pool %d: both the block count and centipercent are invalid\n",
- i);
- return -EINVAL;
- }
-
- mem_pool_info[i].centipercent = cp;
- mem_pool_info[i].blocks = ab;
- }
-
- /* do not use both % and absolute blocks for different pools */
- if (cp_total != 0 && ab_total != 0) {
- dd_dev_err(
- dd,
- "All send context memory pools must be described as either centipercent or blocks, no mixing between pools\n");
- return -EINVAL;
- }
-
- /* if any percentages are present, they must add up to 100% x 100 */
- if (cp_total != 0 && cp_total != 10000) {
- dd_dev_err(
- dd,
- "Send context memory pool centipercent is %d, expecting 10000\n",
- cp_total);
- return -EINVAL;
- }
-
- /* the absolute pool total cannot be more than the mem total */
- if (ab_total > total_blocks) {
- dd_dev_err(
- dd,
- "Send context memory pool absolute block count %d is larger than the memory size %d\n",
- ab_total, total_blocks);
- return -EINVAL;
- }
-
- /*
- * Step 2:
- * - copy from the context size config
- * - replace context type wildcard counts with real values
- * - add up non-memory pool block sizes
- * - add up memory pool user counts
- */
- fixed_blocks = 0;
- for (i = 0; i < SC_MAX; i++) {
- int count = sc_config_sizes[i].count;
- int size = sc_config_sizes[i].size;
- int pool;
-
- /*
- * Sanity check count: Either a positive value or
- * one of the expected wildcards is valid. The positive
- * value is checked later when we compare against total
- * memory available.
- */
- if (i == SC_ACK) {
- count = dd->n_krcv_queues;
- } else if (i == SC_KERNEL) {
- count = (INIT_SC_PER_VL * num_vls) + 1 /* VL15 */;
- } else if (count == SCC_PER_CPU) {
- count = dd->num_rcv_contexts - dd->n_krcv_queues;
- } else if (count < 0) {
- dd_dev_err(
- dd,
- "%s send context invalid count wildcard %d\n",
- sc_type_name(i), count);
- return -EINVAL;
- }
- if (total_contexts + count > dd->chip_send_contexts)
- count = dd->chip_send_contexts - total_contexts;
-
- total_contexts += count;
-
- /*
- * Sanity check pool: The conversion will return a pool
- * number or -1 if a fixed (non-negative) value. The fixed
- * value is checked later when we compare against
- * total memory available.
- */
- pool = wildcard_to_pool(size);
- if (pool == -1) { /* non-wildcard */
- fixed_blocks += size * count;
- } else if (pool < NUM_SC_POOLS) { /* valid wildcard */
- mem_pool_info[pool].count += count;
- } else { /* invalid wildcard */
- dd_dev_err(
- dd,
- "%s send context invalid pool wildcard %d\n",
- sc_type_name(i), size);
- return -EINVAL;
- }
-
- dd->sc_sizes[i].count = count;
- dd->sc_sizes[i].size = size;
- }
- if (fixed_blocks > total_blocks) {
- dd_dev_err(
- dd,
- "Send context fixed block count, %u, larger than total block count %u\n",
- fixed_blocks, total_blocks);
- return -EINVAL;
- }
-
- /* step 3: calculate the blocks in the pools, and pool context sizes */
- pool_blocks = total_blocks - fixed_blocks;
- if (ab_total > pool_blocks) {
- dd_dev_err(
- dd,
- "Send context fixed pool sizes, %u, larger than pool block count %u\n",
- ab_total, pool_blocks);
- return -EINVAL;
- }
- /* subtract off the fixed pool blocks */
- pool_blocks -= ab_total;
-
- for (i = 0; i < NUM_SC_POOLS; i++) {
- struct mem_pool_info *pi = &mem_pool_info[i];
-
- /* % beats absolute blocks */
- if (pi->centipercent >= 0)
- pi->blocks = (pool_blocks * pi->centipercent) / 10000;
-
- if (pi->blocks == 0 && pi->count != 0) {
- dd_dev_err(
- dd,
- "Send context memory pool %d has %u contexts, but no blocks\n",
- i, pi->count);
- return -EINVAL;
- }
- if (pi->count == 0) {
- /* warn about wasted blocks */
- if (pi->blocks != 0)
- dd_dev_err(
- dd,
- "Send context memory pool %d has %u blocks, but zero contexts\n",
- i, pi->blocks);
- pi->size = 0;
- } else {
- pi->size = pi->blocks / pi->count;
- }
- }
-
- /* step 4: fill in the context type sizes from the pool sizes */
- used_blocks = 0;
- for (i = 0; i < SC_MAX; i++) {
- if (dd->sc_sizes[i].size < 0) {
- unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size);
-
- WARN_ON_ONCE(pool >= NUM_SC_POOLS);
- dd->sc_sizes[i].size = mem_pool_info[pool].size;
- }
- /* make sure we are not larger than what is allowed by the HW */
-#define PIO_MAX_BLOCKS 1024
- if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS)
- dd->sc_sizes[i].size = PIO_MAX_BLOCKS;
-
- /* calculate our total usage */
- used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count;
- }
- extra = total_blocks - used_blocks;
- if (extra != 0)
- dd_dev_info(dd, "unused send context blocks: %d\n", extra);
-
- return total_contexts;
-}
-
-int init_send_contexts(struct hfi1_devdata *dd)
-{
- u16 base;
- int ret, i, j, context;
-
- ret = init_credit_return(dd);
- if (ret)
- return ret;
-
- dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8),
- GFP_KERNEL);
- dd->send_contexts = kcalloc(dd->num_send_contexts,
- sizeof(struct send_context_info),
- GFP_KERNEL);
- if (!dd->send_contexts || !dd->hw_to_sw) {
- kfree(dd->hw_to_sw);
- kfree(dd->send_contexts);
- free_credit_return(dd);
- return -ENOMEM;
- }
-
- /* hardware context map starts with invalid send context indices */
- for (i = 0; i < TXE_NUM_CONTEXTS; i++)
- dd->hw_to_sw[i] = INVALID_SCI;
-
- /*
- * All send contexts have their credit sizes. Allocate credits
- * for each context one after another from the global space.
- */
- context = 0;
- base = 1;
- for (i = 0; i < SC_MAX; i++) {
- struct sc_config_sizes *scs = &dd->sc_sizes[i];
-
- for (j = 0; j < scs->count; j++) {
- struct send_context_info *sci =
- &dd->send_contexts[context];
- sci->type = i;
- sci->base = base;
- sci->credits = scs->size;
-
- context++;
- base += scs->size;
- }
- }
-
- return 0;
-}
-
-/*
- * Allocate a software index and hardware context of the given type.
- *
- * Must be called with dd->sc_lock held.
- */
-static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index,
- u32 *hw_context)
-{
- struct send_context_info *sci;
- u32 index;
- u32 context;
-
- for (index = 0, sci = &dd->send_contexts[0];
- index < dd->num_send_contexts; index++, sci++) {
- if (sci->type == type && sci->allocated == 0) {
- sci->allocated = 1;
- /* use a 1:1 mapping, but make them non-equal */
- context = dd->chip_send_contexts - index - 1;
- dd->hw_to_sw[context] = index;
- *sw_index = index;
- *hw_context = context;
- return 0; /* success */
- }
- }
- dd_dev_err(dd, "Unable to locate a free type %d send context\n", type);
- return -ENOSPC;
-}
-
-/*
- * Free the send context given by its software index.
- *
- * Must be called with dd->sc_lock held.
- */
-static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
-{
- struct send_context_info *sci;
-
- sci = &dd->send_contexts[sw_index];
- if (!sci->allocated) {
- dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
- __func__, sw_index, hw_context);
- }
- sci->allocated = 0;
- dd->hw_to_sw[hw_context] = INVALID_SCI;
-}
-
-/* return the base context of a context in a group */
-static inline u32 group_context(u32 context, u32 group)
-{
- return (context >> group) << group;
-}
-
-/* return the size of a group */
-static inline u32 group_size(u32 group)
-{
- return 1 << group;
-}
-
-/*
- * Obtain the credit return addresses, kernel virtual and physical, for the
- * given sc.
- *
- * To understand this routine:
- * o va and pa are arrays of struct credit_return. One for each physical
- * send context, per NUMA.
- * o Each send context always looks in its relative location in a struct
- * credit_return for its credit return.
- * o Each send context in a group must have its return address CSR programmed
- * with the same value. Use the address of the first send context in the
- * group.
- */
-static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa)
-{
- u32 gc = group_context(sc->hw_context, sc->group);
- u32 index = sc->hw_context & 0x7;
-
- sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
- *pa = (unsigned long)
- &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc];
-}
-
-/*
- * Work queue function triggered in error interrupt routine for
- * kernel contexts.
- */
-static void sc_halted(struct work_struct *work)
-{
- struct send_context *sc;
-
- sc = container_of(work, struct send_context, halt_work);
- sc_restart(sc);
-}
-
-/*
- * Calculate PIO block threshold for this send context using the given MTU.
- * Trigger a return when one MTU plus optional header of credits remain.
- *
- * Parameter mtu is in bytes.
- * Parameter hdrqentsize is in DWORDs.
- *
- * Return value is what to write into the CSR: trigger return when
- * unreturned credits pass this count.
- */
-u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
-{
- u32 release_credits;
- u32 threshold;
-
- /* add in the header size, then divide by the PIO block size */
- mtu += hdrqentsize << 2;
- release_credits = DIV_ROUND_UP(mtu, PIO_BLOCK_SIZE);
-
- /* check against this context's credits */
- if (sc->credits <= release_credits)
- threshold = 1;
- else
- threshold = sc->credits - release_credits;
-
- return threshold;
-}
-
-/*
- * Calculate credit threshold in terms of percent of the allocated credits.
- * Trigger when unreturned credits equal or exceed the percentage of the whole.
- *
- * Return value is what to write into the CSR: trigger return when
- * unreturned credits pass this count.
- */
-static u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
-{
- return (sc->credits * percent) / 100;
-}
-
-/*
- * Set the credit return threshold.
- */
-void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
-{
- unsigned long flags;
- u32 old_threshold;
- int force_return = 0;
-
- spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
-
- old_threshold = (sc->credit_ctrl >>
- SC(CREDIT_CTRL_THRESHOLD_SHIFT))
- & SC(CREDIT_CTRL_THRESHOLD_MASK);
-
- if (new_threshold != old_threshold) {
- sc->credit_ctrl =
- (sc->credit_ctrl
- & ~SC(CREDIT_CTRL_THRESHOLD_SMASK))
- | ((new_threshold
- & SC(CREDIT_CTRL_THRESHOLD_MASK))
- << SC(CREDIT_CTRL_THRESHOLD_SHIFT));
- write_kctxt_csr(sc->dd, sc->hw_context,
- SC(CREDIT_CTRL), sc->credit_ctrl);
-
- /* force a credit return on change to avoid a possible stall */
- force_return = 1;
- }
-
- spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
-
- if (force_return)
- sc_return_credits(sc);
-}
-
-/*
- * set_pio_integrity
- *
- * Set the CHECK_ENABLE register for the send context 'sc'.
- */
-void set_pio_integrity(struct send_context *sc)
-{
- struct hfi1_devdata *dd = sc->dd;
- u64 reg = 0;
- u32 hw_context = sc->hw_context;
- int type = sc->type;
-
- /*
- * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if
- * we're snooping.
- */
- if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
- dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
- reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
-
- write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
-}
-
-static u32 get_buffers_allocated(struct send_context *sc)
-{
- int cpu;
- u32 ret = 0;
-
- for_each_possible_cpu(cpu)
- ret += *per_cpu_ptr(sc->buffers_allocated, cpu);
- return ret;
-}
-
-static void reset_buffers_allocated(struct send_context *sc)
-{
- int cpu;
-
- for_each_possible_cpu(cpu)
- (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0;
-}
-
-/*
- * Allocate a NUMA relative send context structure of the given type along
- * with a HW context.
- */
-struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
- uint hdrqentsize, int numa)
-{
- struct send_context_info *sci;
- struct send_context *sc = NULL;
- dma_addr_t pa;
- unsigned long flags;
- u64 reg;
- u32 thresh;
- u32 sw_index;
- u32 hw_context;
- int ret;
- u8 opval, opmask;
-
- /* do not allocate while frozen */
- if (dd->flags & HFI1_FROZEN)
- return NULL;
-
- sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
- if (!sc)
- return NULL;
-
- sc->buffers_allocated = alloc_percpu(u32);
- if (!sc->buffers_allocated) {
- kfree(sc);
- dd_dev_err(dd,
- "Cannot allocate buffers_allocated per cpu counters\n"
- );
- return NULL;
- }
-
- spin_lock_irqsave(&dd->sc_lock, flags);
- ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
- if (ret) {
- spin_unlock_irqrestore(&dd->sc_lock, flags);
- free_percpu(sc->buffers_allocated);
- kfree(sc);
- return NULL;
- }
-
- sci = &dd->send_contexts[sw_index];
- sci->sc = sc;
-
- sc->dd = dd;
- sc->node = numa;
- sc->type = type;
- spin_lock_init(&sc->alloc_lock);
- spin_lock_init(&sc->release_lock);
- spin_lock_init(&sc->credit_ctrl_lock);
- INIT_LIST_HEAD(&sc->piowait);
- INIT_WORK(&sc->halt_work, sc_halted);
- init_waitqueue_head(&sc->halt_wait);
-
- /* grouping is always single context for now */
- sc->group = 0;
-
- sc->sw_index = sw_index;
- sc->hw_context = hw_context;
- cr_group_addresses(sc, &pa);
- sc->credits = sci->credits;
-
-/* PIO Send Memory Address details */
-#define PIO_ADDR_CONTEXT_MASK 0xfful
-#define PIO_ADDR_CONTEXT_SHIFT 16
- sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK)
- << PIO_ADDR_CONTEXT_SHIFT);
-
- /* set base and credits */
- reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK))
- << SC(CTRL_CTXT_DEPTH_SHIFT))
- | ((sci->base & SC(CTRL_CTXT_BASE_MASK))
- << SC(CTRL_CTXT_BASE_SHIFT));
- write_kctxt_csr(dd, hw_context, SC(CTRL), reg);
-
- set_pio_integrity(sc);
-
- /* unmask all errors */
- write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1);
-
- /* set the default partition key */
- write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
- (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
- DEFAULT_PKEY) <<
- SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
-
- /* per context type checks */
- if (type == SC_USER) {
- opval = USER_OPCODE_CHECK_VAL;
- opmask = USER_OPCODE_CHECK_MASK;
- } else {
- opval = OPCODE_CHECK_VAL_DISABLED;
- opmask = OPCODE_CHECK_MASK_DISABLED;
- }
-
- /* set the send context check opcode mask and value */
- write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
- ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
- ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
-
- /* set up credit return */
- reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
- write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
-
- /*
- * Calculate the initial credit return threshold.
- *
- * For Ack contexts, set a threshold for half the credits.
- * For User contexts use the given percentage. This has been
- * sanitized on driver start-up.
- * For Kernel contexts, use the default MTU plus a header.
- */
- if (type == SC_ACK) {
- thresh = sc_percent_to_threshold(sc, 50);
- } else if (type == SC_USER) {
- thresh = sc_percent_to_threshold(sc,
- user_credit_return_threshold);
- } else { /* kernel */
- thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize);
- }
- reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
- /* add in early return */
- if (type == SC_USER && HFI1_CAP_IS_USET(EARLY_CREDIT_RETURN))
- reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
- else if (HFI1_CAP_IS_KSET(EARLY_CREDIT_RETURN)) /* kernel, ack */
- reg |= SC(CREDIT_CTRL_EARLY_RETURN_SMASK);
-
- /* set up write-through credit_ctrl */
- sc->credit_ctrl = reg;
- write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg);
-
- /* User send contexts should not allow sending on VL15 */
- if (type == SC_USER) {
- reg = 1ULL << 15;
- write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg);
- }
-
- spin_unlock_irqrestore(&dd->sc_lock, flags);
-
- /*
- * Allocate shadow ring to track outstanding PIO buffers _after_
- * unlocking. We don't know the size until the lock is held and
- * we can't allocate while the lock is held. No one is using
- * the context yet, so allocate it now.
- *
- * User contexts do not get a shadow ring.
- */
- if (type != SC_USER) {
- /*
- * Size the shadow ring 1 larger than the number of credits
- * so head == tail can mean empty.
- */
- sc->sr_size = sci->credits + 1;
- sc->sr = kzalloc_node(sizeof(union pio_shadow_ring) *
- sc->sr_size, GFP_KERNEL, numa);
- if (!sc->sr) {
- sc_free(sc);
- return NULL;
- }
- }
-
- hfi1_cdbg(PIO,
- "Send context %u(%u) %s group %u credits %u credit_ctrl 0x%llx threshold %u\n",
- sw_index,
- hw_context,
- sc_type_name(type),
- sc->group,
- sc->credits,
- sc->credit_ctrl,
- thresh);
-
- return sc;
-}
-
-/* free a per-NUMA send context structure */
-void sc_free(struct send_context *sc)
-{
- struct hfi1_devdata *dd;
- unsigned long flags;
- u32 sw_index;
- u32 hw_context;
-
- if (!sc)
- return;
-
- sc->flags |= SCF_IN_FREE; /* ensure no restarts */
- dd = sc->dd;
- if (!list_empty(&sc->piowait))
- dd_dev_err(dd, "piowait list not empty!\n");
- sw_index = sc->sw_index;
- hw_context = sc->hw_context;
- sc_disable(sc); /* make sure the HW is disabled */
- flush_work(&sc->halt_work);
-
- spin_lock_irqsave(&dd->sc_lock, flags);
- dd->send_contexts[sw_index].sc = NULL;
-
- /* clear/disable all registers set in sc_alloc */
- write_kctxt_csr(dd, hw_context, SC(CTRL), 0);
- write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0);
- write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0);
- write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0);
- write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0);
- write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0);
- write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0);
-
- /* release the index and context for re-use */
- sc_hw_free(dd, sw_index, hw_context);
- spin_unlock_irqrestore(&dd->sc_lock, flags);
-
- kfree(sc->sr);
- free_percpu(sc->buffers_allocated);
- kfree(sc);
-}
-
-/* disable the context */
-void sc_disable(struct send_context *sc)
-{
- u64 reg;
- unsigned long flags;
- struct pio_buf *pbuf;
-
- if (!sc)
- return;
-
- /* do all steps, even if already disabled */
- spin_lock_irqsave(&sc->alloc_lock, flags);
- reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
- reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
- sc->flags &= ~SCF_ENABLED;
- sc_wait_for_packet_egress(sc, 1);
- write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
-
- /*
- * Flush any waiters. Once the context is disabled,
- * credit return interrupts are stopped (although there
- * could be one in-process when the context is disabled).
- * Wait one microsecond for any lingering interrupts, then
- * proceed with the flush.
- */
- udelay(1);
- spin_lock_irqsave(&sc->release_lock, flags);
- if (sc->sr) { /* this context has a shadow ring */
- while (sc->sr_tail != sc->sr_head) {
- pbuf = &sc->sr[sc->sr_tail].pbuf;
- if (pbuf->cb)
- (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE);
- sc->sr_tail++;
- if (sc->sr_tail >= sc->sr_size)
- sc->sr_tail = 0;
- }
- }
- spin_unlock_irqrestore(&sc->release_lock, flags);
-}
-
-/* return SendEgressCtxtStatus.PacketOccupancy */
-#define packet_occupancy(r) \
- (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
- >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
-
-/* is egress halted on the context? */
-#define egress_halted(r) \
- ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
-
-/* wait for packet egress, optionally pause for credit return */
-static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
-{
- struct hfi1_devdata *dd = sc->dd;
- u64 reg = 0;
- u64 reg_prev;
- u32 loop = 0;
-
- while (1) {
- reg_prev = reg;
- reg = read_csr(dd, sc->hw_context * 8 +
- SEND_EGRESS_CTXT_STATUS);
- /* done if egress is stopped */
- if (egress_halted(reg))
- break;
- reg = packet_occupancy(reg);
- if (reg == 0)
- break;
- /* counter is reset if occupancy count changes */
- if (reg != reg_prev)
- loop = 0;
- if (loop > 500) {
- /* timed out - bounce the link */
- dd_dev_err(dd,
- "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
- __func__, sc->sw_index,
- sc->hw_context, (u32)reg);
- queue_work(dd->pport->hfi1_wq,
- &dd->pport->link_bounce_work);
- break;
- }
- loop++;
- udelay(1);
- }
-
- if (pause)
- /* Add additional delay to ensure chip returns all credits */
- pause_for_credit_return(dd);
-}
-
-void sc_wait(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = 0; i < dd->num_send_contexts; i++) {
- struct send_context *sc = dd->send_contexts[i].sc;
-
- if (!sc)
- continue;
- sc_wait_for_packet_egress(sc, 0);
- }
-}
-
-/*
- * Restart a context after it has been halted due to error.
- *
- * If the first step fails - wait for the halt to be asserted, return early.
- * Otherwise complain about timeouts but keep going.
- *
- * It is expected that allocations (enabled flag bit) have been shut off
- * already (only applies to kernel contexts).
- */
-int sc_restart(struct send_context *sc)
-{
- struct hfi1_devdata *dd = sc->dd;
- u64 reg;
- u32 loop;
- int count;
-
- /* bounce off if not halted, or being free'd */
- if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE))
- return -EINVAL;
-
- dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
- sc->hw_context);
-
- /*
- * Step 1: Wait for the context to actually halt.
- *
- * The error interrupt is asynchronous to actually setting halt
- * on the context.
- */
- loop = 0;
- while (1) {
- reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS));
- if (reg & SC(STATUS_CTXT_HALTED_SMASK))
- break;
- if (loop > 100) {
- dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
- __func__, sc->sw_index, sc->hw_context);
- return -ETIME;
- }
- loop++;
- udelay(1);
- }
-
- /*
- * Step 2: Ensure no users are still trying to write to PIO.
- *
- * For kernel contexts, we have already turned off buffer allocation.
- * Now wait for the buffer count to go to zero.
- *
- * For user contexts, the user handling code has cut off write access
- * to the context's PIO pages before calling this routine and will
- * restore write access after this routine returns.
- */
- if (sc->type != SC_USER) {
- /* kernel context */
- loop = 0;
- while (1) {
- count = get_buffers_allocated(sc);
- if (count == 0)
- break;
- if (loop > 100) {
- dd_dev_err(dd,
- "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
- __func__, sc->sw_index,
- sc->hw_context, count);
- }
- loop++;
- udelay(1);
- }
- }
-
- /*
- * Step 3: Wait for all packets to egress.
- * This is done while disabling the send context
- *
- * Step 4: Disable the context
- *
- * This is a superset of the halt. After the disable, the
- * errors can be cleared.
- */
- sc_disable(sc);
-
- /*
- * Step 5: Enable the context
- *
- * This enable will clear the halted flag and per-send context
- * error flags.
- */
- return sc_enable(sc);
-}
-
-/*
- * PIO freeze processing. To be called after the TXE block is fully frozen.
- * Go through all frozen send contexts and disable them. The contexts are
- * already stopped by the freeze.
- */
-void pio_freeze(struct hfi1_devdata *dd)
-{
- struct send_context *sc;
- int i;
-
- for (i = 0; i < dd->num_send_contexts; i++) {
- sc = dd->send_contexts[i].sc;
- /*
- * Don't disable unallocated, unfrozen, or user send contexts.
- * User send contexts will be disabled when the process
- * calls into the driver to reset its context.
- */
- if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
- continue;
-
- /* only need to disable, the context is already stopped */
- sc_disable(sc);
- }
-}
-
-/*
- * Unfreeze PIO for kernel send contexts. The precondition for calling this
- * is that all PIO send contexts have been disabled and the SPC freeze has
- * been cleared. Now perform the last step and re-enable each kernel context.
- * User (PSM) processing will occur when PSM calls into the kernel to
- * acknowledge the freeze.
- */
-void pio_kernel_unfreeze(struct hfi1_devdata *dd)
-{
- struct send_context *sc;
- int i;
-
- for (i = 0; i < dd->num_send_contexts; i++) {
- sc = dd->send_contexts[i].sc;
- if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
- continue;
-
- sc_enable(sc); /* will clear the sc frozen flag */
- }
-}
-
-/*
- * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
- * Returns:
- * -ETIMEDOUT - if we wait too long
- * -EIO - if there was an error
- */
-static int pio_init_wait_progress(struct hfi1_devdata *dd)
-{
- u64 reg;
- int max, count = 0;
-
- /* max is the longest possible HW init time / delay */
- max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5;
- while (1) {
- reg = read_csr(dd, SEND_PIO_INIT_CTXT);
- if (!(reg & SEND_PIO_INIT_CTXT_PIO_INIT_IN_PROGRESS_SMASK))
- break;
- if (count >= max)
- return -ETIMEDOUT;
- udelay(5);
- count++;
- }
-
- return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0;
-}
-
-/*
- * Reset all of the send contexts to their power-on state. Used
- * only during manual init - no lock against sc_enable needed.
- */
-void pio_reset_all(struct hfi1_devdata *dd)
-{
- int ret;
-
- /* make sure the init engine is not busy */
- ret = pio_init_wait_progress(dd);
- /* ignore any timeout */
- if (ret == -EIO) {
- /* clear the error */
- write_csr(dd, SEND_PIO_ERR_CLEAR,
- SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
- }
-
- /* reset init all */
- write_csr(dd, SEND_PIO_INIT_CTXT,
- SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
- udelay(2);
- ret = pio_init_wait_progress(dd);
- if (ret < 0) {
- dd_dev_err(dd,
- "PIO send context init %s while initializing all PIO blocks\n",
- ret == -ETIMEDOUT ? "is stuck" : "had an error");
- }
-}
-
-/* enable the context */
-int sc_enable(struct send_context *sc)
-{
- u64 sc_ctrl, reg, pio;
- struct hfi1_devdata *dd;
- unsigned long flags;
- int ret = 0;
-
- if (!sc)
- return -EINVAL;
- dd = sc->dd;
-
- /*
- * Obtain the allocator lock to guard against any allocation
- * attempts (which should not happen prior to context being
- * enabled). On the release/disable side we don't need to
- * worry about locking since the releaser will not do anything
- * if the context accounting values have not changed.
- */
- spin_lock_irqsave(&sc->alloc_lock, flags);
- sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
- if ((sc_ctrl & SC(CTRL_CTXT_ENABLE_SMASK)))
- goto unlock; /* already enabled */
-
- /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */
-
- *sc->hw_free = 0;
- sc->free = 0;
- sc->alloc_free = 0;
- sc->fill = 0;
- sc->sr_head = 0;
- sc->sr_tail = 0;
- sc->flags = 0;
- /* the alloc lock insures no fast path allocation */
- reset_buffers_allocated(sc);
-
- /*
- * Clear all per-context errors. Some of these will be set when
- * we are re-enabling after a context halt. Now that the context
- * is disabled, the halt will not clear until after the PIO init
- * engine runs below.
- */
- reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
- if (reg)
- write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
-
- /*
- * The HW PIO initialization engine can handle only one init
- * request at a time. Serialize access to each device's engine.
- */
- spin_lock(&dd->sc_init_lock);
- /*
- * Since access to this code block is serialized and
- * each access waits for the initialization to complete
- * before releasing the lock, the PIO initialization engine
- * should not be in use, so we don't have to wait for the
- * InProgress bit to go down.
- */
- pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) <<
- SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_SHIFT) |
- SEND_PIO_INIT_CTXT_PIO_SINGLE_CTXT_INIT_SMASK;
- write_csr(dd, SEND_PIO_INIT_CTXT, pio);
- /*
- * Wait until the engine is done. Give the chip the required time
- * so, hopefully, we read the register just once.
- */
- udelay(2);
- ret = pio_init_wait_progress(dd);
- spin_unlock(&dd->sc_init_lock);
- if (ret) {
- dd_dev_err(dd,
- "sctxt%u(%u): Context not enabled due to init failure %d\n",
- sc->sw_index, sc->hw_context, ret);
- goto unlock;
- }
-
- /*
- * All is well. Enable the context.
- */
- sc_ctrl |= SC(CTRL_CTXT_ENABLE_SMASK);
- write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl);
- /*
- * Read SendCtxtCtrl to force the write out and prevent a timing
- * hazard where a PIO write may reach the context before the enable.
- */
- read_kctxt_csr(dd, sc->hw_context, SC(CTRL));
- sc->flags |= SCF_ENABLED;
-
-unlock:
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
-
- return ret;
-}
-
-/* force a credit return on the context */
-void sc_return_credits(struct send_context *sc)
-{
- if (!sc)
- return;
-
- /* a 0->1 transition schedules a credit return */
- write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
- SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
- /*
- * Ensure that the write is flushed and the credit return is
- * scheduled. We care more about the 0 -> 1 transition.
- */
- read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE));
- /* set back to 0 for next time */
- write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0);
-}
-
-/* allow all in-flight packets to drain on the context */
-void sc_flush(struct send_context *sc)
-{
- if (!sc)
- return;
-
- sc_wait_for_packet_egress(sc, 1);
-}
-
-/* drop all packets on the context, no waiting until they are sent */
-void sc_drop(struct send_context *sc)
-{
- if (!sc)
- return;
-
- dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
- __func__, sc->sw_index, sc->hw_context);
-}
-
-/*
- * Start the software reaction to a context halt or SPC freeze:
- * - mark the context as halted or frozen
- * - stop buffer allocations
- *
- * Called from the error interrupt. Other work is deferred until
- * out of the interrupt.
- */
-void sc_stop(struct send_context *sc, int flag)
-{
- unsigned long flags;
-
- /* mark the context */
- sc->flags |= flag;
-
- /* stop buffer allocations */
- spin_lock_irqsave(&sc->alloc_lock, flags);
- sc->flags &= ~SCF_ENABLED;
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
- wake_up(&sc->halt_wait);
-}
-
-#define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
-#define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
-
-/*
- * The send context buffer "allocator".
- *
- * @sc: the PIO send context we are allocating from
- * @len: length of whole packet - including PBC - in dwords
- * @cb: optional callback to call when the buffer is finished sending
- * @arg: argument for cb
- *
- * Return a pointer to a PIO buffer if successful, NULL if not enough room.
- */
-struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
- pio_release_cb cb, void *arg)
-{
- struct pio_buf *pbuf = NULL;
- unsigned long flags;
- unsigned long avail;
- unsigned long blocks = dwords_to_blocks(dw_len);
- unsigned long start_fill;
- int trycount = 0;
- u32 head, next;
-
- spin_lock_irqsave(&sc->alloc_lock, flags);
- if (!(sc->flags & SCF_ENABLED)) {
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
- goto done;
- }
-
-retry:
- avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free);
- if (blocks > avail) {
- /* not enough room */
- if (unlikely(trycount)) { /* already tried to get more room */
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
- goto done;
- }
- /* copy from receiver cache line and recalculate */
- sc->alloc_free = ACCESS_ONCE(sc->free);
- avail =
- (unsigned long)sc->credits -
- (sc->fill - sc->alloc_free);
- if (blocks > avail) {
- /* still no room, actively update */
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
- sc_release_update(sc);
- spin_lock_irqsave(&sc->alloc_lock, flags);
- sc->alloc_free = ACCESS_ONCE(sc->free);
- trycount++;
- goto retry;
- }
- }
-
- /* there is enough room */
-
- preempt_disable();
- this_cpu_inc(*sc->buffers_allocated);
-
- /* read this once */
- head = sc->sr_head;
-
- /* "allocate" the buffer */
- start_fill = sc->fill;
- sc->fill += blocks;
-
- /*
- * Fill the parts that the releaser looks at before moving the head.
- * The only necessary piece is the sent_at field. The credits
- * we have just allocated cannot have been returned yet, so the
- * cb and arg will not be looked at for a "while". Put them
- * on this side of the memory barrier anyway.
- */
- pbuf = &sc->sr[head].pbuf;
- pbuf->sent_at = sc->fill;
- pbuf->cb = cb;
- pbuf->arg = arg;
- pbuf->sc = sc; /* could be filled in at sc->sr init time */
- /* make sure this is in memory before updating the head */
-
- /* calculate next head index, do not store */
- next = head + 1;
- if (next >= sc->sr_size)
- next = 0;
- /*
- * update the head - must be last! - the releaser can look at fields
- * in pbuf once we move the head
- */
- smp_wmb();
- sc->sr_head = next;
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
-
- /* finish filling in the buffer outside the lock */
- pbuf->start = sc->base_addr + ((start_fill % sc->credits)
- * PIO_BLOCK_SIZE);
- pbuf->size = sc->credits * PIO_BLOCK_SIZE;
- pbuf->end = sc->base_addr + pbuf->size;
- pbuf->block_count = blocks;
- pbuf->qw_written = 0;
- pbuf->carry_bytes = 0;
- pbuf->carry.val64 = 0;
-done:
- return pbuf;
-}
-
-/*
- * There are at least two entities that can turn on credit return
- * interrupts and they can overlap. Avoid problems by implementing
- * a count scheme that is enforced by a lock. The lock is needed because
- * the count and CSR write must be paired.
- */
-
-/*
- * Start credit return interrupts. This is managed by a count. If already
- * on, just increment the count.
- */
-void sc_add_credit_return_intr(struct send_context *sc)
-{
- unsigned long flags;
-
- /* lock must surround both the count change and the CSR update */
- spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
- if (sc->credit_intr_count == 0) {
- sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
- write_kctxt_csr(sc->dd, sc->hw_context,
- SC(CREDIT_CTRL), sc->credit_ctrl);
- }
- sc->credit_intr_count++;
- spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
-}
-
-/*
- * Stop credit return interrupts. This is managed by a count. Decrement the
- * count, if the last user, then turn the credit interrupts off.
- */
-void sc_del_credit_return_intr(struct send_context *sc)
-{
- unsigned long flags;
-
- WARN_ON(sc->credit_intr_count == 0);
-
- /* lock must surround both the count change and the CSR update */
- spin_lock_irqsave(&sc->credit_ctrl_lock, flags);
- sc->credit_intr_count--;
- if (sc->credit_intr_count == 0) {
- sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
- write_kctxt_csr(sc->dd, sc->hw_context,
- SC(CREDIT_CTRL), sc->credit_ctrl);
- }
- spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
-}
-
-/*
- * The caller must be careful when calling this. All needint calls
- * must be paired with !needint.
- */
-void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint)
-{
- if (needint)
- sc_add_credit_return_intr(sc);
- else
- sc_del_credit_return_intr(sc);
- trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl);
- if (needint) {
- mmiowb();
- sc_return_credits(sc);
- }
-}
-
-/**
- * sc_piobufavail - callback when a PIO buffer is available
- * @sc: the send context
- *
- * This is called from the interrupt handler when a PIO buffer is
- * available after hfi1_verbs_send() returned an error that no buffers were
- * available. Disable the interrupt if there are no more QPs waiting.
- */
-static void sc_piobufavail(struct send_context *sc)
-{
- struct hfi1_devdata *dd = sc->dd;
- struct hfi1_ibdev *dev = &dd->verbs_dev;
- struct list_head *list;
- struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
- struct rvt_qp *qp;
- struct hfi1_qp_priv *priv;
- unsigned long flags;
- unsigned i, n = 0;
-
- if (dd->send_contexts[sc->sw_index].type != SC_KERNEL)
- return;
- list = &sc->piowait;
- /*
- * Note: checking that the piowait list is empty and clearing
- * the buffer available interrupt needs to be atomic or we
- * could end up with QPs on the wait list with the interrupt
- * disabled.
- */
- write_seqlock_irqsave(&dev->iowait_lock, flags);
- while (!list_empty(list)) {
- struct iowait *wait;
-
- if (n == ARRAY_SIZE(qps))
- break;
- wait = list_first_entry(list, struct iowait, list);
- qp = iowait_to_qp(wait);
- priv = qp->priv;
- list_del_init(&priv->s_iowait.list);
- /* refcount held until actual wake up */
- qps[n++] = qp;
- }
- /*
- * If there had been waiters and there are more
- * insure that we redo the force to avoid a potential hang.
- */
- if (n) {
- hfi1_sc_wantpiobuf_intr(sc, 0);
- if (!list_empty(list))
- hfi1_sc_wantpiobuf_intr(sc, 1);
- }
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
-
- for (i = 0; i < n; i++)
- hfi1_qp_wakeup(qps[i],
- RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
-}
-
-/* translate a send credit update to a bit code of reasons */
-static inline int fill_code(u64 hw_free)
-{
- int code = 0;
-
- if (hw_free & CR_STATUS_SMASK)
- code |= PRC_STATUS_ERR;
- if (hw_free & CR_CREDIT_RETURN_DUE_TO_PBC_SMASK)
- code |= PRC_PBC;
- if (hw_free & CR_CREDIT_RETURN_DUE_TO_THRESHOLD_SMASK)
- code |= PRC_THRESHOLD;
- if (hw_free & CR_CREDIT_RETURN_DUE_TO_ERR_SMASK)
- code |= PRC_FILL_ERR;
- if (hw_free & CR_CREDIT_RETURN_DUE_TO_FORCE_SMASK)
- code |= PRC_SC_DISABLE;
- return code;
-}
-
-/* use the jiffies compare to get the wrap right */
-#define sent_before(a, b) time_before(a, b) /* a < b */
-
-/*
- * The send context buffer "releaser".
- */
-void sc_release_update(struct send_context *sc)
-{
- struct pio_buf *pbuf;
- u64 hw_free;
- u32 head, tail;
- unsigned long old_free;
- unsigned long free;
- unsigned long extra;
- unsigned long flags;
- int code;
-
- if (!sc)
- return;
-
- spin_lock_irqsave(&sc->release_lock, flags);
- /* update free */
- hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */
- old_free = sc->free;
- extra = (((hw_free & CR_COUNTER_SMASK) >> CR_COUNTER_SHIFT)
- - (old_free & CR_COUNTER_MASK))
- & CR_COUNTER_MASK;
- free = old_free + extra;
- trace_hfi1_piofree(sc, extra);
-
- /* call sent buffer callbacks */
- code = -1; /* code not yet set */
- head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
- tail = sc->sr_tail;
- while (head != tail) {
- pbuf = &sc->sr[tail].pbuf;
-
- if (sent_before(free, pbuf->sent_at)) {
- /* not sent yet */
- break;
- }
- if (pbuf->cb) {
- if (code < 0) /* fill in code on first user */
- code = fill_code(hw_free);
- (*pbuf->cb)(pbuf->arg, code);
- }
-
- tail++;
- if (tail >= sc->sr_size)
- tail = 0;
- }
- sc->sr_tail = tail;
- /* make sure tail is updated before free */
- smp_wmb();
- sc->free = free;
- spin_unlock_irqrestore(&sc->release_lock, flags);
- sc_piobufavail(sc);
-}
-
-/*
- * Send context group releaser. Argument is the send context that caused
- * the interrupt. Called from the send context interrupt handler.
- *
- * Call release on all contexts in the group.
- *
- * This routine takes the sc_lock without an irqsave because it is only
- * called from an interrupt handler. Adjust if that changes.
- */
-void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
-{
- struct send_context *sc;
- u32 sw_index;
- u32 gc, gc_end;
-
- spin_lock(&dd->sc_lock);
- sw_index = dd->hw_to_sw[hw_context];
- if (unlikely(sw_index >= dd->num_send_contexts)) {
- dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
- __func__, hw_context, sw_index);
- goto done;
- }
- sc = dd->send_contexts[sw_index].sc;
- if (unlikely(!sc))
- goto done;
-
- gc = group_context(hw_context, sc->group);
- gc_end = gc + group_size(sc->group);
- for (; gc < gc_end; gc++) {
- sw_index = dd->hw_to_sw[gc];
- if (unlikely(sw_index >= dd->num_send_contexts)) {
- dd_dev_err(dd,
- "%s: invalid hw (%u) to sw (%u) mapping\n",
- __func__, hw_context, sw_index);
- continue;
- }
- sc_release_update(dd->send_contexts[sw_index].sc);
- }
-done:
- spin_unlock(&dd->sc_lock);
-}
-
-/*
- * pio_select_send_context_vl() - select send context
- * @dd: devdata
- * @selector: a spreading factor
- * @vl: this vl
- *
- * This function returns a send context based on the selector and a vl.
- * The mapping fields are protected by RCU
- */
-struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
- u32 selector, u8 vl)
-{
- struct pio_vl_map *m;
- struct pio_map_elem *e;
- struct send_context *rval;
-
- /*
- * NOTE This should only happen if SC->VL changed after the initial
- * checks on the QP/AH
- * Default will return VL0's send context below
- */
- if (unlikely(vl >= num_vls)) {
- rval = NULL;
- goto done;
- }
-
- rcu_read_lock();
- m = rcu_dereference(dd->pio_map);
- if (unlikely(!m)) {
- rcu_read_unlock();
- return dd->vld[0].sc;
- }
- e = m->map[vl & m->mask];
- rval = e->ksc[selector & e->mask];
- rcu_read_unlock();
-
-done:
- rval = !rval ? dd->vld[0].sc : rval;
- return rval;
-}
-
-/*
- * pio_select_send_context_sc() - select send context
- * @dd: devdata
- * @selector: a spreading factor
- * @sc5: the 5 bit sc
- *
- * This function returns an send context based on the selector and an sc
- */
-struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
- u32 selector, u8 sc5)
-{
- u8 vl = sc_to_vlt(dd, sc5);
-
- return pio_select_send_context_vl(dd, selector, vl);
-}
-
-/*
- * Free the indicated map struct
- */
-static void pio_map_free(struct pio_vl_map *m)
-{
- int i;
-
- for (i = 0; m && i < m->actual_vls; i++)
- kfree(m->map[i]);
- kfree(m);
-}
-
-/*
- * Handle RCU callback
- */
-static void pio_map_rcu_callback(struct rcu_head *list)
-{
- struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
-
- pio_map_free(m);
-}
-
-/*
- * pio_map_init - called when #vls change
- * @dd: hfi1_devdata
- * @port: port number
- * @num_vls: number of vls
- * @vl_scontexts: per vl send context mapping (optional)
- *
- * This routine changes the mapping based on the number of vls.
- *
- * vl_scontexts is used to specify a non-uniform vl/send context
- * loading. NULL implies auto computing the loading and giving each
- * VL an uniform distribution of send contexts per VL.
- *
- * The auto algorithm computers the sc_per_vl and the number of extra
- * send contexts. Any extra send contexts are added from the last VL
- * on down
- *
- * rcu locking is used here to control access to the mapping fields.
- *
- * If either the num_vls or num_send_contexts are non-power of 2, the
- * array sizes in the struct pio_vl_map and the struct pio_map_elem are
- * rounded up to the next highest power of 2 and the first entry is
- * reused in a round robin fashion.
- *
- * If an error occurs the map change is not done and the mapping is not
- * chaged.
- *
- */
-int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
-{
- int i, j;
- int extra, sc_per_vl;
- int scontext = 1;
- int num_kernel_send_contexts = 0;
- u8 lvl_scontexts[OPA_MAX_VLS];
- struct pio_vl_map *oldmap, *newmap;
-
- if (!vl_scontexts) {
- /* send context 0 reserved for VL15 */
- for (i = 1; i < dd->num_send_contexts; i++)
- if (dd->send_contexts[i].type == SC_KERNEL)
- num_kernel_send_contexts++;
- /* truncate divide */
- sc_per_vl = num_kernel_send_contexts / num_vls;
- /* extras */
- extra = num_kernel_send_contexts % num_vls;
- vl_scontexts = lvl_scontexts;
- /* add extras from last vl down */
- for (i = num_vls - 1; i >= 0; i--, extra--)
- vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
- }
- /* build new map */
- newmap = kzalloc(sizeof(*newmap) +
- roundup_pow_of_two(num_vls) *
- sizeof(struct pio_map_elem *),
- GFP_KERNEL);
- if (!newmap)
- goto bail;
- newmap->actual_vls = num_vls;
- newmap->vls = roundup_pow_of_two(num_vls);
- newmap->mask = (1 << ilog2(newmap->vls)) - 1;
- for (i = 0; i < newmap->vls; i++) {
- /* save for wrap around */
- int first_scontext = scontext;
-
- if (i < newmap->actual_vls) {
- int sz = roundup_pow_of_two(vl_scontexts[i]);
-
- /* only allocate once */
- newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
- sz * sizeof(struct
- send_context *),
- GFP_KERNEL);
- if (!newmap->map[i])
- goto bail;
- newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
- /* assign send contexts */
- for (j = 0; j < sz; j++) {
- if (dd->kernel_send_context[scontext])
- newmap->map[i]->ksc[j] =
- dd->kernel_send_context[scontext];
- if (++scontext >= first_scontext +
- vl_scontexts[i])
- /* wrap back to first send context */
- scontext = first_scontext;
- }
- } else {
- /* just re-use entry without allocating */
- newmap->map[i] = newmap->map[i % num_vls];
- }
- scontext = first_scontext + vl_scontexts[i];
- }
- /* newmap in hand, save old map */
- spin_lock_irq(&dd->pio_map_lock);
- oldmap = rcu_dereference_protected(dd->pio_map,
- lockdep_is_held(&dd->pio_map_lock));
-
- /* publish newmap */
- rcu_assign_pointer(dd->pio_map, newmap);
-
- spin_unlock_irq(&dd->pio_map_lock);
- /* success, free any old map after grace period */
- if (oldmap)
- call_rcu(&oldmap->list, pio_map_rcu_callback);
- return 0;
-bail:
- /* free any partial allocation */
- pio_map_free(newmap);
- return -ENOMEM;
-}
-
-void free_pio_map(struct hfi1_devdata *dd)
-{
- /* Free PIO map if allocated */
- if (rcu_access_pointer(dd->pio_map)) {
- spin_lock_irq(&dd->pio_map_lock);
- pio_map_free(rcu_access_pointer(dd->pio_map));
- RCU_INIT_POINTER(dd->pio_map, NULL);
- spin_unlock_irq(&dd->pio_map_lock);
- synchronize_rcu();
- }
- kfree(dd->kernel_send_context);
- dd->kernel_send_context = NULL;
-}
-
-int init_pervl_scs(struct hfi1_devdata *dd)
-{
- int i;
- u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
- u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
- u32 ctxt;
- struct hfi1_pportdata *ppd = dd->pport;
-
- dd->vld[15].sc = sc_alloc(dd, SC_KERNEL,
- dd->rcd[0]->rcvhdrqentsize, dd->node);
- if (!dd->vld[15].sc)
- goto nomem;
- hfi1_init_ctxt(dd->vld[15].sc);
- dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
-
- dd->kernel_send_context = kmalloc_node(dd->num_send_contexts *
- sizeof(struct send_context *),
- GFP_KERNEL, dd->node);
- dd->kernel_send_context[0] = dd->vld[15].sc;
-
- for (i = 0; i < num_vls; i++) {
- /*
- * Since this function does not deal with a specific
- * receive context but we need the RcvHdrQ entry size,
- * use the size from rcd[0]. It is guaranteed to be
- * valid at this point and will remain the same for all
- * receive contexts.
- */
- dd->vld[i].sc = sc_alloc(dd, SC_KERNEL,
- dd->rcd[0]->rcvhdrqentsize, dd->node);
- if (!dd->vld[i].sc)
- goto nomem;
- dd->kernel_send_context[i + 1] = dd->vld[i].sc;
- hfi1_init_ctxt(dd->vld[i].sc);
- /* non VL15 start with the max MTU */
- dd->vld[i].mtu = hfi1_max_mtu;
- }
- for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
- dd->kernel_send_context[i + 1] =
- sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
- if (!dd->kernel_send_context[i + 1])
- goto nomem;
- hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
- }
-
- sc_enable(dd->vld[15].sc);
- ctxt = dd->vld[15].sc->hw_context;
- mask = all_vl_mask & ~(1LL << 15);
- write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
- dd_dev_info(dd,
- "Using send context %u(%u) for VL15\n",
- dd->vld[15].sc->sw_index, ctxt);
-
- for (i = 0; i < num_vls; i++) {
- sc_enable(dd->vld[i].sc);
- ctxt = dd->vld[i].sc->hw_context;
- mask = all_vl_mask & ~(data_vls_mask);
- write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
- }
- for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
- sc_enable(dd->kernel_send_context[i + 1]);
- ctxt = dd->kernel_send_context[i + 1]->hw_context;
- mask = all_vl_mask & ~(data_vls_mask);
- write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
- }
-
- if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
- goto nomem;
- return 0;
-nomem:
- sc_free(dd->vld[15].sc);
- for (i = 0; i < num_vls; i++)
- sc_free(dd->vld[i].sc);
- for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
- sc_free(dd->kernel_send_context[i + 1]);
- return -ENOMEM;
-}
-
-int init_credit_return(struct hfi1_devdata *dd)
-{
- int ret;
- int num_numa;
- int i;
-
- num_numa = num_online_nodes();
- /* enforce the expectation that the numas are compact */
- for (i = 0; i < num_numa; i++) {
- if (!node_online(i)) {
- dd_dev_err(dd, "NUMA nodes are not compact\n");
- ret = -EINVAL;
- goto done;
- }
- }
-
- dd->cr_base = kcalloc(
- num_numa,
- sizeof(struct credit_return_base),
- GFP_KERNEL);
- if (!dd->cr_base) {
- dd_dev_err(dd, "Unable to allocate credit return base\n");
- ret = -ENOMEM;
- goto done;
- }
- for (i = 0; i < num_numa; i++) {
- int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
-
- set_dev_node(&dd->pcidev->dev, i);
- dd->cr_base[i].va = dma_zalloc_coherent(
- &dd->pcidev->dev,
- bytes,
- &dd->cr_base[i].pa,
- GFP_KERNEL);
- if (!dd->cr_base[i].va) {
- set_dev_node(&dd->pcidev->dev, dd->node);
- dd_dev_err(dd,
- "Unable to allocate credit return DMA range for NUMA %d\n",
- i);
- ret = -ENOMEM;
- goto done;
- }
- }
- set_dev_node(&dd->pcidev->dev, dd->node);
-
- ret = 0;
-done:
- return ret;
-}
-
-void free_credit_return(struct hfi1_devdata *dd)
-{
- int num_numa;
- int i;
-
- if (!dd->cr_base)
- return;
-
- num_numa = num_online_nodes();
- for (i = 0; i < num_numa; i++) {
- if (dd->cr_base[i].va) {
- dma_free_coherent(&dd->pcidev->dev,
- TXE_NUM_CONTEXTS *
- sizeof(struct credit_return),
- dd->cr_base[i].va,
- dd->cr_base[i].pa);
- }
- }
- kfree(dd->cr_base);
- dd->cr_base = NULL;
-}
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h
deleted file mode 100644
index 0026976ce..000000000
--- a/drivers/staging/rdma/hfi1/pio.h
+++ /dev/null
@@ -1,326 +0,0 @@
-#ifndef _PIO_H
-#define _PIO_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/* send context types */
-#define SC_KERNEL 0
-#define SC_ACK 1
-#define SC_USER 2
-#define SC_MAX 3
-
-/* invalid send context index */
-#define INVALID_SCI 0xff
-
-/* PIO buffer release callback function */
-typedef void (*pio_release_cb)(void *arg, int code);
-
-/* PIO release codes - in bits, as there could more than one that apply */
-#define PRC_OK 0 /* no known error */
-#define PRC_STATUS_ERR 0x01 /* credit return due to status error */
-#define PRC_PBC 0x02 /* credit return due to PBC */
-#define PRC_THRESHOLD 0x04 /* credit return due to threshold */
-#define PRC_FILL_ERR 0x08 /* credit return due fill error */
-#define PRC_FORCE 0x10 /* credit return due credit force */
-#define PRC_SC_DISABLE 0x20 /* clean-up after a context disable */
-
-/* byte helper */
-union mix {
- u64 val64;
- u32 val32[2];
- u8 val8[8];
-};
-
-/* an allocated PIO buffer */
-struct pio_buf {
- struct send_context *sc;/* back pointer to owning send context */
- pio_release_cb cb; /* called when the buffer is released */
- void *arg; /* argument for cb */
- void __iomem *start; /* buffer start address */
- void __iomem *end; /* context end address */
- unsigned long size; /* context size, in bytes */
- unsigned long sent_at; /* buffer is sent when <= free */
- u32 block_count; /* size of buffer, in blocks */
- u32 qw_written; /* QW written so far */
- u32 carry_bytes; /* number of valid bytes in carry */
- union mix carry; /* pending unwritten bytes */
-};
-
-/* cache line aligned pio buffer array */
-union pio_shadow_ring {
- struct pio_buf pbuf;
- u64 unused[16]; /* cache line spacer */
-} ____cacheline_aligned;
-
-/* per-NUMA send context */
-struct send_context {
- /* read-only after init */
- struct hfi1_devdata *dd; /* device */
- void __iomem *base_addr; /* start of PIO memory */
- union pio_shadow_ring *sr; /* shadow ring */
-
- volatile __le64 *hw_free; /* HW free counter */
- struct work_struct halt_work; /* halted context work queue entry */
- unsigned long flags; /* flags */
- int node; /* context home node */
- int type; /* context type */
- u32 sw_index; /* software index number */
- u32 hw_context; /* hardware context number */
- u32 credits; /* number of blocks in context */
- u32 sr_size; /* size of the shadow ring */
- u32 group; /* credit return group */
- /* allocator fields */
- spinlock_t alloc_lock ____cacheline_aligned_in_smp;
- unsigned long fill; /* official alloc count */
- unsigned long alloc_free; /* copy of free (less cache thrash) */
- u32 sr_head; /* shadow ring head */
- /* releaser fields */
- spinlock_t release_lock ____cacheline_aligned_in_smp;
- unsigned long free; /* official free count */
- u32 sr_tail; /* shadow ring tail */
- /* list for PIO waiters */
- struct list_head piowait ____cacheline_aligned_in_smp;
- spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
- u64 credit_ctrl; /* cache for credit control */
- u32 credit_intr_count; /* count of credit intr users */
- u32 __percpu *buffers_allocated;/* count of buffers allocated */
- wait_queue_head_t halt_wait; /* wait until kernel sees interrupt */
-};
-
-/* send context flags */
-#define SCF_ENABLED 0x01
-#define SCF_IN_FREE 0x02
-#define SCF_HALTED 0x04
-#define SCF_FROZEN 0x08
-
-struct send_context_info {
- struct send_context *sc; /* allocated working context */
- u16 allocated; /* has this been allocated? */
- u16 type; /* context type */
- u16 base; /* base in PIO array */
- u16 credits; /* size in PIO array */
-};
-
-/* DMA credit return, index is always (context & 0x7) */
-struct credit_return {
- volatile __le64 cr[8];
-};
-
-/* NUMA indexed credit return array */
-struct credit_return_base {
- struct credit_return *va;
- dma_addr_t pa;
-};
-
-/* send context configuration sizes (one per type) */
-struct sc_config_sizes {
- short int size;
- short int count;
-};
-
-/*
- * The diagram below details the relationship of the mapping structures
- *
- * Since the mapping now allows for non-uniform send contexts per vl, the
- * number of send contexts for a vl is either the vl_scontexts[vl] or
- * a computation based on num_kernel_send_contexts/num_vls:
- *
- * For example:
- * nactual = vl_scontexts ? vl_scontexts[vl] : num_kernel_send_contexts/num_vls
- *
- * n = roundup to next highest power of 2 using nactual
- *
- * In the case where there are num_kernel_send_contexts/num_vls doesn't divide
- * evenly, the extras are added from the last vl downward.
- *
- * For the case where n > nactual, the send contexts are assigned
- * in a round robin fashion wrapping back to the first send context
- * for a particular vl.
- *
- * dd->pio_map
- * | pio_map_elem[0]
- * | +--------------------+
- * v | mask |
- * pio_vl_map |--------------------|
- * +--------------------------+ | ksc[0] -> sc 1 |
- * | list (RCU) | |--------------------|
- * |--------------------------| ->| ksc[1] -> sc 2 |
- * | mask | --/ |--------------------|
- * |--------------------------| -/ | * |
- * | actual_vls (max 8) | -/ |--------------------|
- * |--------------------------| --/ | ksc[n] -> sc n |
- * | vls (max 8) | -/ +--------------------+
- * |--------------------------| --/
- * | map[0] |-/
- * |--------------------------| +--------------------+
- * | map[1] |--- | mask |
- * |--------------------------| \---- |--------------------|
- * | * | \-- | ksc[0] -> sc 1+n |
- * | * | \---- |--------------------|
- * | * | \->| ksc[1] -> sc 2+n |
- * |--------------------------| |--------------------|
- * | map[vls - 1] |- | * |
- * +--------------------------+ \- |--------------------|
- * \- | ksc[m] -> sc m+n |
- * \ +--------------------+
- * \-
- * \
- * \- +--------------------+
- * \- | mask |
- * \ |--------------------|
- * \- | ksc[0] -> sc 1+m+n |
- * \- |--------------------|
- * >| ksc[1] -> sc 2+m+n |
- * |--------------------|
- * | * |
- * |--------------------|
- * | ksc[o] -> sc o+m+n |
- * +--------------------+
- *
- */
-
-/* Initial number of send contexts per VL */
-#define INIT_SC_PER_VL 2
-
-/*
- * struct pio_map_elem - mapping for a vl
- * @mask - selector mask
- * @ksc - array of kernel send contexts for this vl
- *
- * The mask is used to "mod" the selector to
- * produce index into the trailing array of
- * kscs
- */
-struct pio_map_elem {
- u32 mask;
- struct send_context *ksc[0];
-};
-
-/*
- * struct pio_vl_map - mapping for a vl
- * @list - rcu head for free callback
- * @mask - vl mask to "mod" the vl to produce an index to map array
- * @actual_vls - number of vls
- * @vls - numbers of vls rounded to next power of 2
- * @map - array of pio_map_elem entries
- *
- * This is the parent mapping structure. The trailing members of the
- * struct point to pio_map_elem entries, which in turn point to an
- * array of kscs for that vl.
- */
-struct pio_vl_map {
- struct rcu_head list;
- u32 mask;
- u8 actual_vls;
- u8 vls;
- struct pio_map_elem *map[0];
-};
-
-int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
- u8 *vl_scontexts);
-void free_pio_map(struct hfi1_devdata *dd);
-struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
- u32 selector, u8 vl);
-struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
- u32 selector, u8 sc5);
-
-/* send context functions */
-int init_credit_return(struct hfi1_devdata *dd);
-void free_credit_return(struct hfi1_devdata *dd);
-int init_sc_pools_and_sizes(struct hfi1_devdata *dd);
-int init_send_contexts(struct hfi1_devdata *dd);
-int init_credit_return(struct hfi1_devdata *dd);
-int init_pervl_scs(struct hfi1_devdata *dd);
-struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
- uint hdrqentsize, int numa);
-void sc_free(struct send_context *sc);
-int sc_enable(struct send_context *sc);
-void sc_disable(struct send_context *sc);
-int sc_restart(struct send_context *sc);
-void sc_return_credits(struct send_context *sc);
-void sc_flush(struct send_context *sc);
-void sc_drop(struct send_context *sc);
-void sc_stop(struct send_context *sc, int bit);
-struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
- pio_release_cb cb, void *arg);
-void sc_release_update(struct send_context *sc);
-void sc_return_credits(struct send_context *sc);
-void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
-void sc_add_credit_return_intr(struct send_context *sc);
-void sc_del_credit_return_intr(struct send_context *sc);
-void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold);
-u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize);
-void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint);
-void sc_wait(struct hfi1_devdata *dd);
-void set_pio_integrity(struct send_context *sc);
-
-/* support functions */
-void pio_reset_all(struct hfi1_devdata *dd);
-void pio_freeze(struct hfi1_devdata *dd);
-void pio_kernel_unfreeze(struct hfi1_devdata *dd);
-
-/* global PIO send control operations */
-#define PSC_GLOBAL_ENABLE 0
-#define PSC_GLOBAL_DISABLE 1
-#define PSC_GLOBAL_VLARB_ENABLE 2
-#define PSC_GLOBAL_VLARB_DISABLE 3
-#define PSC_CM_RESET 4
-#define PSC_DATA_VL_ENABLE 5
-#define PSC_DATA_VL_DISABLE 6
-
-void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl);
-void pio_send_control(struct hfi1_devdata *dd, int op);
-
-/* PIO copy routines */
-void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
- const void *from, size_t count);
-void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
- const void *from, size_t nbytes);
-void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
-void seg_pio_copy_end(struct pio_buf *pbuf);
-
-#endif /* _PIO_H */
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c
deleted file mode 100644
index 8c25e1b58..000000000
--- a/drivers/staging/rdma/hfi1/pio_copy.c
+++ /dev/null
@@ -1,867 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "hfi.h"
-
-/* additive distance between non-SOP and SOP space */
-#define SOP_DISTANCE (TXE_PIO_SIZE / 2)
-#define PIO_BLOCK_MASK (PIO_BLOCK_SIZE - 1)
-/* number of QUADWORDs in a block */
-#define PIO_BLOCK_QWS (PIO_BLOCK_SIZE / sizeof(u64))
-
-/**
- * pio_copy - copy data block to MMIO space
- * @pbuf: a number of blocks allocated within a PIO send context
- * @pbc: PBC to send
- * @from: source, must be 8 byte aligned
- * @count: number of DWORD (32-bit) quantities to copy from source
- *
- * Copy data from source to PIO Send Buffer memory, 8 bytes at a time.
- * Must always write full BLOCK_SIZE bytes blocks. The first block must
- * be written to the corresponding SOP=1 address.
- *
- * Known:
- * o pbuf->start always starts on a block boundary
- * o pbuf can wrap only at a block boundary
- */
-void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
- const void *from, size_t count)
-{
- void __iomem *dest = pbuf->start + SOP_DISTANCE;
- void __iomem *send = dest + PIO_BLOCK_SIZE;
- void __iomem *dend; /* 8-byte data end */
-
- /* write the PBC */
- writeq(pbc, dest);
- dest += sizeof(u64);
-
- /* calculate where the QWORD data ends - in SOP=1 space */
- dend = dest + ((count >> 1) * sizeof(u64));
-
- if (dend < send) {
- /*
- * all QWORD data is within the SOP block, does *not*
- * reach the end of the SOP block
- */
-
- while (dest < dend) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
- /*
- * No boundary checks are needed here:
- * 0. We're not on the SOP block boundary
- * 1. The possible DWORD dangle will still be within
- * the SOP block
- * 2. We cannot wrap except on a block boundary.
- */
- } else {
- /* QWORD data extends _to_ or beyond the SOP block */
-
- /* write 8-byte SOP chunk data */
- while (dest < send) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
- /* drop out of the SOP range */
- dest -= SOP_DISTANCE;
- dend -= SOP_DISTANCE;
-
- /*
- * If the wrap comes before or matches the data end,
- * copy until until the wrap, then wrap.
- *
- * If the data ends at the end of the SOP above and
- * the buffer wraps, then pbuf->end == dend == dest
- * and nothing will get written, but we will wrap in
- * case there is a dangling DWORD.
- */
- if (pbuf->end <= dend) {
- while (dest < pbuf->end) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- dest -= pbuf->size;
- dend -= pbuf->size;
- }
-
- /* write 8-byte non-SOP, non-wrap chunk data */
- while (dest < dend) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
- }
- /* at this point we have wrapped if we are going to wrap */
-
- /* write dangling u32, if any */
- if (count & 1) {
- union mix val;
-
- val.val64 = 0;
- val.val32[0] = *(u32 *)from;
- writeq(val.val64, dest);
- dest += sizeof(u64);
- }
- /*
- * fill in rest of block, no need to check pbuf->end
- * as we only wrap on a block boundary
- */
- while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
- writeq(0, dest);
- dest += sizeof(u64);
- }
-
- /* finished with this buffer */
- this_cpu_dec(*pbuf->sc->buffers_allocated);
- preempt_enable();
-}
-
-/* USE_SHIFTS is faster in user-space tests on a Xeon X5570 @ 2.93GHz */
-#define USE_SHIFTS 1
-#ifdef USE_SHIFTS
-/*
- * Handle carry bytes using shifts and masks.
- *
- * NOTE: the value the unused portion of carry is expected to always be zero.
- */
-
-/*
- * "zero" shift - bit shift used to zero out upper bytes. Input is
- * the count of LSB bytes to preserve.
- */
-#define zshift(x) (8 * (8 - (x)))
-
-/*
- * "merge" shift - bit shift used to merge with carry bytes. Input is
- * the LSB byte count to move beyond.
- */
-#define mshift(x) (8 * (x))
-
-/*
- * Read nbytes bytes from "from" and return them in the LSB bytes
- * of pbuf->carry. Other bytes are zeroed. Any previous value
- * pbuf->carry is lost.
- *
- * NOTES:
- * o do not read from from if nbytes is zero
- * o from may _not_ be u64 aligned
- * o nbytes must not span a QW boundary
- */
-static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
- unsigned int nbytes)
-{
- unsigned long off;
-
- if (nbytes == 0) {
- pbuf->carry.val64 = 0;
- } else {
- /* align our pointer */
- off = (unsigned long)from & 0x7;
- from = (void *)((unsigned long)from & ~0x7l);
- pbuf->carry.val64 = ((*(u64 *)from)
- << zshift(nbytes + off))/* zero upper bytes */
- >> zshift(nbytes); /* place at bottom */
- }
- pbuf->carry_bytes = nbytes;
-}
-
-/*
- * Read nbytes bytes from "from" and put them at the next significant bytes
- * of pbuf->carry. Unused bytes are zeroed. It is expected that the extra
- * read does not overfill carry.
- *
- * NOTES:
- * o from may _not_ be u64 aligned
- * o nbytes may span a QW boundary
- */
-static inline void read_extra_bytes(struct pio_buf *pbuf,
- const void *from, unsigned int nbytes)
-{
- unsigned long off = (unsigned long)from & 0x7;
- unsigned int room, xbytes;
-
- /* align our pointer */
- from = (void *)((unsigned long)from & ~0x7l);
-
- /* check count first - don't read anything if count is zero */
- while (nbytes) {
- /* find the number of bytes in this u64 */
- room = 8 - off; /* this u64 has room for this many bytes */
- xbytes = min(room, nbytes);
-
- /*
- * shift down to zero lower bytes, shift up to zero upper
- * bytes, shift back down to move into place
- */
- pbuf->carry.val64 |= (((*(u64 *)from)
- >> mshift(off))
- << zshift(xbytes))
- >> zshift(xbytes + pbuf->carry_bytes);
- off = 0;
- pbuf->carry_bytes += xbytes;
- nbytes -= xbytes;
- from += sizeof(u64);
- }
-}
-
-/*
- * Zero extra bytes from the end of pbuf->carry.
- *
- * NOTES:
- * o zbytes <= old_bytes
- */
-static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
-{
- unsigned int remaining;
-
- if (zbytes == 0) /* nothing to do */
- return;
-
- remaining = pbuf->carry_bytes - zbytes; /* remaining bytes */
-
- /* NOTE: zshift only guaranteed to work if remaining != 0 */
- if (remaining)
- pbuf->carry.val64 = (pbuf->carry.val64 << zshift(remaining))
- >> zshift(remaining);
- else
- pbuf->carry.val64 = 0;
- pbuf->carry_bytes = remaining;
-}
-
-/*
- * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
- * Put the unused part of the next 8 bytes of src into the LSB bytes of
- * pbuf->carry with the upper bytes zeroed..
- *
- * NOTES:
- * o result must keep unused bytes zeroed
- * o src must be u64 aligned
- */
-static inline void merge_write8(
- struct pio_buf *pbuf,
- void __iomem *dest,
- const void *src)
-{
- u64 new, temp;
-
- new = *(u64 *)src;
- temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
- writeq(temp, dest);
- pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
-}
-
-/*
- * Write a quad word using all bytes of carry.
- */
-static inline void carry8_write8(union mix carry, void __iomem *dest)
-{
- writeq(carry.val64, dest);
-}
-
-/*
- * Write a quad word using all the valid bytes of carry. If carry
- * has zero valid bytes, nothing is written.
- * Returns 0 on nothing written, non-zero on quad word written.
- */
-static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
-{
- if (pbuf->carry_bytes) {
- /* unused bytes are always kept zeroed, so just write */
- writeq(pbuf->carry.val64, dest);
- return 1;
- }
-
- return 0;
-}
-
-#else /* USE_SHIFTS */
-/*
- * Handle carry bytes using byte copies.
- *
- * NOTE: the value the unused portion of carry is left uninitialized.
- */
-
-/*
- * Jump copy - no-loop copy for < 8 bytes.
- */
-static inline void jcopy(u8 *dest, const u8 *src, u32 n)
-{
- switch (n) {
- case 7:
- *dest++ = *src++;
- case 6:
- *dest++ = *src++;
- case 5:
- *dest++ = *src++;
- case 4:
- *dest++ = *src++;
- case 3:
- *dest++ = *src++;
- case 2:
- *dest++ = *src++;
- case 1:
- *dest++ = *src++;
- }
-}
-
-/*
- * Read nbytes from "from" and and place them in the low bytes
- * of pbuf->carry. Other bytes are left as-is. Any previous
- * value in pbuf->carry is lost.
- *
- * NOTES:
- * o do not read from from if nbytes is zero
- * o from may _not_ be u64 aligned.
- */
-static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
- unsigned int nbytes)
-{
- jcopy(&pbuf->carry.val8[0], from, nbytes);
- pbuf->carry_bytes = nbytes;
-}
-
-/*
- * Read nbytes bytes from "from" and put them at the end of pbuf->carry.
- * It is expected that the extra read does not overfill carry.
- *
- * NOTES:
- * o from may _not_ be u64 aligned
- * o nbytes may span a QW boundary
- */
-static inline void read_extra_bytes(struct pio_buf *pbuf,
- const void *from, unsigned int nbytes)
-{
- jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
- pbuf->carry_bytes += nbytes;
-}
-
-/*
- * Zero extra bytes from the end of pbuf->carry.
- *
- * We do not care about the value of unused bytes in carry, so just
- * reduce the byte count.
- *
- * NOTES:
- * o zbytes <= old_bytes
- */
-static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
-{
- pbuf->carry_bytes -= zbytes;
-}
-
-/*
- * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
- * Put the unused part of the next 8 bytes of src into the low bytes of
- * pbuf->carry.
- */
-static inline void merge_write8(
- struct pio_buf *pbuf,
- void *dest,
- const void *src)
-{
- u32 remainder = 8 - pbuf->carry_bytes;
-
- jcopy(&pbuf->carry.val8[pbuf->carry_bytes], src, remainder);
- writeq(pbuf->carry.val64, dest);
- jcopy(&pbuf->carry.val8[0], src + remainder, pbuf->carry_bytes);
-}
-
-/*
- * Write a quad word using all bytes of carry.
- */
-static inline void carry8_write8(union mix carry, void *dest)
-{
- writeq(carry.val64, dest);
-}
-
-/*
- * Write a quad word using all the valid bytes of carry. If carry
- * has zero valid bytes, nothing is written.
- * Returns 0 on nothing written, non-zero on quad word written.
- */
-static inline int carry_write8(struct pio_buf *pbuf, void *dest)
-{
- if (pbuf->carry_bytes) {
- u64 zero = 0;
-
- jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero,
- 8 - pbuf->carry_bytes);
- writeq(pbuf->carry.val64, dest);
- return 1;
- }
-
- return 0;
-}
-#endif /* USE_SHIFTS */
-
-/*
- * Segmented PIO Copy - start
- *
- * Start a PIO copy.
- *
- * @pbuf: destination buffer
- * @pbc: the PBC for the PIO buffer
- * @from: data source, QWORD aligned
- * @nbytes: bytes to copy
- */
-void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
- const void *from, size_t nbytes)
-{
- void __iomem *dest = pbuf->start + SOP_DISTANCE;
- void __iomem *send = dest + PIO_BLOCK_SIZE;
- void __iomem *dend; /* 8-byte data end */
-
- writeq(pbc, dest);
- dest += sizeof(u64);
-
- /* calculate where the QWORD data ends - in SOP=1 space */
- dend = dest + ((nbytes >> 3) * sizeof(u64));
-
- if (dend < send) {
- /*
- * all QWORD data is within the SOP block, does *not*
- * reach the end of the SOP block
- */
-
- while (dest < dend) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
- /*
- * No boundary checks are needed here:
- * 0. We're not on the SOP block boundary
- * 1. The possible DWORD dangle will still be within
- * the SOP block
- * 2. We cannot wrap except on a block boundary.
- */
- } else {
- /* QWORD data extends _to_ or beyond the SOP block */
-
- /* write 8-byte SOP chunk data */
- while (dest < send) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
- /* drop out of the SOP range */
- dest -= SOP_DISTANCE;
- dend -= SOP_DISTANCE;
-
- /*
- * If the wrap comes before or matches the data end,
- * copy until until the wrap, then wrap.
- *
- * If the data ends at the end of the SOP above and
- * the buffer wraps, then pbuf->end == dend == dest
- * and nothing will get written, but we will wrap in
- * case there is a dangling DWORD.
- */
- if (pbuf->end <= dend) {
- while (dest < pbuf->end) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- dest -= pbuf->size;
- dend -= pbuf->size;
- }
-
- /* write 8-byte non-SOP, non-wrap chunk data */
- while (dest < dend) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
- }
- /* at this point we have wrapped if we are going to wrap */
-
- /* ...but it doesn't matter as we're done writing */
-
- /* save dangling bytes, if any */
- read_low_bytes(pbuf, from, nbytes & 0x7);
-
- pbuf->qw_written = 1 /*PBC*/ + (nbytes >> 3);
-}
-
-/*
- * Mid copy helper, "mixed case" - source is 64-bit aligned but carry
- * bytes are non-zero.
- *
- * Whole u64s must be written to the chip, so bytes must be manually merged.
- *
- * @pbuf: destination buffer
- * @from: data source, is QWORD aligned.
- * @nbytes: bytes to copy
- *
- * Must handle nbytes < 8.
- */
-static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
-{
- void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
- void __iomem *dend; /* 8-byte data end */
- unsigned long qw_to_write = (pbuf->carry_bytes + nbytes) >> 3;
- unsigned long bytes_left = (pbuf->carry_bytes + nbytes) & 0x7;
-
- /* calculate 8-byte data end */
- dend = dest + (qw_to_write * sizeof(u64));
-
- if (pbuf->qw_written < PIO_BLOCK_QWS) {
- /*
- * Still within SOP block. We don't need to check for
- * wrap because we are still in the first block and
- * can only wrap on block boundaries.
- */
- void __iomem *send; /* SOP end */
- void __iomem *xend;
-
- /*
- * calculate the end of data or end of block, whichever
- * comes first
- */
- send = pbuf->start + PIO_BLOCK_SIZE;
- xend = min(send, dend);
-
- /* shift up to SOP=1 space */
- dest += SOP_DISTANCE;
- xend += SOP_DISTANCE;
-
- /* write 8-byte chunk data */
- while (dest < xend) {
- merge_write8(pbuf, dest, from);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- /* shift down to SOP=0 space */
- dest -= SOP_DISTANCE;
- }
- /*
- * At this point dest could be (either, both, or neither):
- * - at dend
- * - at the wrap
- */
-
- /*
- * If the wrap comes before or matches the data end,
- * copy until until the wrap, then wrap.
- *
- * If dest is at the wrap, we will fall into the if,
- * not do the loop, when wrap.
- *
- * If the data ends at the end of the SOP above and
- * the buffer wraps, then pbuf->end == dend == dest
- * and nothing will get written.
- */
- if (pbuf->end <= dend) {
- while (dest < pbuf->end) {
- merge_write8(pbuf, dest, from);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- dest -= pbuf->size;
- dend -= pbuf->size;
- }
-
- /* write 8-byte non-SOP, non-wrap chunk data */
- while (dest < dend) {
- merge_write8(pbuf, dest, from);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- /* adjust carry */
- if (pbuf->carry_bytes < bytes_left) {
- /* need to read more */
- read_extra_bytes(pbuf, from, bytes_left - pbuf->carry_bytes);
- } else {
- /* remove invalid bytes */
- zero_extra_bytes(pbuf, pbuf->carry_bytes - bytes_left);
- }
-
- pbuf->qw_written += qw_to_write;
-}
-
-/*
- * Mid copy helper, "straight case" - source pointer is 64-bit aligned
- * with no carry bytes.
- *
- * @pbuf: destination buffer
- * @from: data source, is QWORD aligned
- * @nbytes: bytes to copy
- *
- * Must handle nbytes < 8.
- */
-static void mid_copy_straight(struct pio_buf *pbuf,
- const void *from, size_t nbytes)
-{
- void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
- void __iomem *dend; /* 8-byte data end */
-
- /* calculate 8-byte data end */
- dend = dest + ((nbytes >> 3) * sizeof(u64));
-
- if (pbuf->qw_written < PIO_BLOCK_QWS) {
- /*
- * Still within SOP block. We don't need to check for
- * wrap because we are still in the first block and
- * can only wrap on block boundaries.
- */
- void __iomem *send; /* SOP end */
- void __iomem *xend;
-
- /*
- * calculate the end of data or end of block, whichever
- * comes first
- */
- send = pbuf->start + PIO_BLOCK_SIZE;
- xend = min(send, dend);
-
- /* shift up to SOP=1 space */
- dest += SOP_DISTANCE;
- xend += SOP_DISTANCE;
-
- /* write 8-byte chunk data */
- while (dest < xend) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- /* shift down to SOP=0 space */
- dest -= SOP_DISTANCE;
- }
- /*
- * At this point dest could be (either, both, or neither):
- * - at dend
- * - at the wrap
- */
-
- /*
- * If the wrap comes before or matches the data end,
- * copy until until the wrap, then wrap.
- *
- * If dest is at the wrap, we will fall into the if,
- * not do the loop, when wrap.
- *
- * If the data ends at the end of the SOP above and
- * the buffer wraps, then pbuf->end == dend == dest
- * and nothing will get written.
- */
- if (pbuf->end <= dend) {
- while (dest < pbuf->end) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- dest -= pbuf->size;
- dend -= pbuf->size;
- }
-
- /* write 8-byte non-SOP, non-wrap chunk data */
- while (dest < dend) {
- writeq(*(u64 *)from, dest);
- from += sizeof(u64);
- dest += sizeof(u64);
- }
-
- /* we know carry_bytes was zero on entry to this routine */
- read_low_bytes(pbuf, from, nbytes & 0x7);
-
- pbuf->qw_written += nbytes >> 3;
-}
-
-/*
- * Segmented PIO Copy - middle
- *
- * Must handle any aligned tail and any aligned source with any byte count.
- *
- * @pbuf: a number of blocks allocated within a PIO send context
- * @from: data source
- * @nbytes: number of bytes to copy
- */
-void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
-{
- unsigned long from_align = (unsigned long)from & 0x7;
-
- if (pbuf->carry_bytes + nbytes < 8) {
- /* not enough bytes to fill a QW */
- read_extra_bytes(pbuf, from, nbytes);
- return;
- }
-
- if (from_align) {
- /* misaligned source pointer - align it */
- unsigned long to_align;
-
- /* bytes to read to align "from" */
- to_align = 8 - from_align;
-
- /*
- * In the advance-to-alignment logic below, we do not need
- * to check if we are using more than nbytes. This is because
- * if we are here, we already know that carry+nbytes will
- * fill at least one QW.
- */
- if (pbuf->carry_bytes + to_align < 8) {
- /* not enough align bytes to fill a QW */
- read_extra_bytes(pbuf, from, to_align);
- from += to_align;
- nbytes -= to_align;
- } else {
- /* bytes to fill carry */
- unsigned long to_fill = 8 - pbuf->carry_bytes;
- /* bytes left over to be read */
- unsigned long extra = to_align - to_fill;
- void __iomem *dest;
-
- /* fill carry... */
- read_extra_bytes(pbuf, from, to_fill);
- from += to_fill;
- nbytes -= to_fill;
-
- /* ...now write carry */
- dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
-
- /*
- * The two checks immediately below cannot both be
- * true, hence the else. If we have wrapped, we
- * cannot still be within the first block.
- * Conversely, if we are still in the first block, we
- * cannot have wrapped. We do the wrap check first
- * as that is more likely.
- */
- /* adjust if we've wrapped */
- if (dest >= pbuf->end)
- dest -= pbuf->size;
- /* jump to SOP range if within the first block */
- else if (pbuf->qw_written < PIO_BLOCK_QWS)
- dest += SOP_DISTANCE;
-
- carry8_write8(pbuf->carry, dest);
- pbuf->qw_written++;
-
- /* read any extra bytes to do final alignment */
- /* this will overwrite anything in pbuf->carry */
- read_low_bytes(pbuf, from, extra);
- from += extra;
- nbytes -= extra;
- }
-
- /* at this point, from is QW aligned */
- }
-
- if (pbuf->carry_bytes)
- mid_copy_mix(pbuf, from, nbytes);
- else
- mid_copy_straight(pbuf, from, nbytes);
-}
-
-/*
- * Segmented PIO Copy - end
- *
- * Write any remainder (in pbuf->carry) and finish writing the whole block.
- *
- * @pbuf: a number of blocks allocated within a PIO send context
- */
-void seg_pio_copy_end(struct pio_buf *pbuf)
-{
- void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
-
- /*
- * The two checks immediately below cannot both be true, hence the
- * else. If we have wrapped, we cannot still be within the first
- * block. Conversely, if we are still in the first block, we
- * cannot have wrapped. We do the wrap check first as that is
- * more likely.
- */
- /* adjust if we have wrapped */
- if (dest >= pbuf->end)
- dest -= pbuf->size;
- /* jump to the SOP range if within the first block */
- else if (pbuf->qw_written < PIO_BLOCK_QWS)
- dest += SOP_DISTANCE;
-
- /* write final bytes, if any */
- if (carry_write8(pbuf, dest)) {
- dest += sizeof(u64);
- /*
- * NOTE: We do not need to recalculate whether dest needs
- * SOP_DISTANCE or not.
- *
- * If we are in the first block and the dangle write
- * keeps us in the same block, dest will need
- * to retain SOP_DISTANCE in the loop below.
- *
- * If we are in the first block and the dangle write pushes
- * us to the next block, then loop below will not run
- * and dest is not used. Hence we do not need to update
- * it.
- *
- * If we are past the first block, then SOP_DISTANCE
- * was never added, so there is nothing to do.
- */
- }
-
- /* fill in rest of block */
- while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
- writeq(0, dest);
- dest += sizeof(u64);
- }
-
- /* finished with this buffer */
- this_cpu_dec(*pbuf->sc->buffers_allocated);
- preempt_enable();
-}
diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c
deleted file mode 100644
index 0a1d07458..000000000
--- a/drivers/staging/rdma/hfi1/platform.c
+++ /dev/null
@@ -1,893 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "hfi.h"
-#include "efivar.h"
-
-void get_platform_config(struct hfi1_devdata *dd)
-{
- int ret = 0;
- unsigned long size = 0;
- u8 *temp_platform_config = NULL;
-
- ret = read_hfi1_efi_var(dd, "configuration", &size,
- (void **)&temp_platform_config);
- if (ret) {
- dd_dev_info(dd,
- "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
- __func__);
- /* fall back to request firmware */
- platform_config_load = 1;
- goto bail;
- }
-
- dd->platform_config.data = temp_platform_config;
- dd->platform_config.size = size;
-
-bail:
- /* exit */;
-}
-
-void free_platform_config(struct hfi1_devdata *dd)
-{
- if (!platform_config_load) {
- /*
- * was loaded from EFI, release memory
- * allocated by read_efi_var
- */
- kfree(dd->platform_config.data);
- }
- /*
- * else do nothing, dispose_firmware will release
- * struct firmware platform_config on driver exit
- */
-}
-
-int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
-{
- u8 tx_ctrl_byte = on ? 0x0 : 0xF;
- int ret = 0;
-
- ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
- &tx_ctrl_byte, 1);
- /* we expected 1, so consider 0 an error */
- if (ret == 0)
- ret = -EIO;
- else if (ret == 1)
- ret = 0;
- return ret;
-}
-
-static int qual_power(struct hfi1_pportdata *ppd)
-{
- u32 cable_power_class = 0, power_class_max = 0;
- u8 *cache = ppd->qsfp_info.cache;
- int ret = 0;
-
- ret = get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
- SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
- if (ret)
- return ret;
-
- if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4)
- cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]);
- else
- cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]);
-
- if (cable_power_class <= 3 && cable_power_class > (power_class_max - 1))
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
- else if (cable_power_class > 4 && cable_power_class > (power_class_max))
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
- /*
- * cable_power_class will never have value 4 as this simply
- * means the high power settings are unused
- */
-
- if (ppd->offline_disabled_reason ==
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
- dd_dev_info(
- ppd->dd,
- "%s: Port disabled due to system power restrictions\n",
- __func__);
- ret = -EPERM;
- }
- return ret;
-}
-
-static int qual_bitrate(struct hfi1_pportdata *ppd)
-{
- u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
- u8 *cache = ppd->qsfp_info.cache;
-
- if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
- cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
-
- if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
- cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
-
- if (ppd->offline_disabled_reason ==
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
- dd_dev_info(
- ppd->dd,
- "%s: Cable failed bitrate check, disabling port\n",
- __func__);
- return -EPERM;
- }
- return 0;
-}
-
-static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
-{
- u8 cable_power_class = 0, power_ctrl_byte = 0;
- u8 *cache = ppd->qsfp_info.cache;
- int ret;
-
- if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4)
- cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]);
- else
- cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]);
-
- if (cable_power_class) {
- power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
-
- power_ctrl_byte |= 1;
- power_ctrl_byte &= ~(0x2);
-
- ret = qsfp_write(ppd, ppd->dd->hfi1_id,
- QSFP_PWR_CTRL_BYTE_OFFS,
- &power_ctrl_byte, 1);
- if (ret != 1)
- return -EIO;
-
- if (cable_power_class > 3) {
- /* > power class 4*/
- power_ctrl_byte |= (1 << 2);
- ret = qsfp_write(ppd, ppd->dd->hfi1_id,
- QSFP_PWR_CTRL_BYTE_OFFS,
- &power_ctrl_byte, 1);
- if (ret != 1)
- return -EIO;
- }
-
- /* SFF 8679 rev 1.7 LPMode Deassert time */
- msleep(300);
- }
- return 0;
-}
-
-static void apply_rx_cdr(struct hfi1_pportdata *ppd,
- u32 rx_preset_index,
- u8 *cdr_ctrl_byte)
-{
- u32 rx_preset;
- u8 *cache = ppd->qsfp_info.cache;
-
- if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
- (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
- return;
-
- /* rx_preset preset to zero to catch error */
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
- rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
- &rx_preset, 4);
-
- if (!rx_preset) {
- dd_dev_info(
- ppd->dd,
- "%s: RX_CDR_APPLY is set to disabled\n",
- __func__);
- return;
- }
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
- rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
- &rx_preset, 4);
-
- /* Expand cdr setting to all 4 lanes */
- rx_preset = (rx_preset | (rx_preset << 1) |
- (rx_preset << 2) | (rx_preset << 3));
-
- if (rx_preset) {
- *cdr_ctrl_byte |= rx_preset;
- } else {
- *cdr_ctrl_byte &= rx_preset;
- /* Preserve current TX CDR status */
- *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
- }
-}
-
-static void apply_tx_cdr(struct hfi1_pportdata *ppd,
- u32 tx_preset_index,
- u8 *ctr_ctrl_byte)
-{
- u32 tx_preset;
- u8 *cache = ppd->qsfp_info.cache;
-
- if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
- (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
- return;
-
- get_platform_config_field(
- ppd->dd,
- PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
- TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
-
- if (!tx_preset) {
- dd_dev_info(
- ppd->dd,
- "%s: TX_CDR_APPLY is set to disabled\n",
- __func__);
- return;
- }
- get_platform_config_field(
- ppd->dd,
- PLATFORM_CONFIG_TX_PRESET_TABLE,
- tx_preset_index,
- TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
-
- /* Expand cdr setting to all 4 lanes */
- tx_preset = (tx_preset | (tx_preset << 1) |
- (tx_preset << 2) | (tx_preset << 3));
-
- if (tx_preset)
- *ctr_ctrl_byte |= (tx_preset << 4);
- else
- /* Preserve current/determined RX CDR status */
- *ctr_ctrl_byte &= ((tx_preset << 4) | 0xF);
-}
-
-static void apply_cdr_settings(
- struct hfi1_pportdata *ppd, u32 rx_preset_index,
- u32 tx_preset_index)
-{
- u8 *cache = ppd->qsfp_info.cache;
- u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
-
- apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
-
- apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
-
- qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
- &cdr_ctrl_byte, 1);
-}
-
-static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
-{
- u8 *cache = ppd->qsfp_info.cache;
- u8 tx_eq;
-
- if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
- return;
- /* Disable adaptive TX EQ if present */
- tx_eq = cache[(128 * 3) + 241];
- tx_eq &= 0xF0;
- qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
-}
-
-static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
-{
- u8 *cache = ppd->qsfp_info.cache;
- u32 tx_preset;
- u8 tx_eq;
-
- if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
- return;
-
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
- tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
- &tx_preset, 4);
- if (!tx_preset) {
- dd_dev_info(
- ppd->dd,
- "%s: TX_EQ_APPLY is set to disabled\n",
- __func__);
- return;
- }
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
- tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
- &tx_preset, 4);
-
- if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
- dd_dev_info(
- ppd->dd,
- "%s: TX EQ %x unsupported\n",
- __func__, tx_preset);
-
- dd_dev_info(
- ppd->dd,
- "%s: Applying EQ %x\n",
- __func__, cache[608] & 0xF0);
-
- tx_preset = (cache[608] & 0xF0) >> 4;
- }
-
- tx_eq = tx_preset | (tx_preset << 4);
- qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
- qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
-}
-
-static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
-{
- u32 rx_preset;
- u8 rx_eq, *cache = ppd->qsfp_info.cache;
-
- if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
- return;
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
- rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
- &rx_preset, 4);
-
- if (!rx_preset) {
- dd_dev_info(
- ppd->dd,
- "%s: RX_EMP_APPLY is set to disabled\n",
- __func__);
- return;
- }
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
- rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
- &rx_preset, 4);
-
- if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
- dd_dev_info(
- ppd->dd,
- "%s: Requested RX EMP %x\n",
- __func__, rx_preset);
-
- dd_dev_info(
- ppd->dd,
- "%s: Applying supported EMP %x\n",
- __func__, cache[608] & 0xF);
-
- rx_preset = cache[608] & 0xF;
- }
-
- rx_eq = rx_preset | (rx_preset << 4);
-
- qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
- qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
-}
-
-static void apply_eq_settings(struct hfi1_pportdata *ppd,
- u32 rx_preset_index, u32 tx_preset_index)
-{
- u8 *cache = ppd->qsfp_info.cache;
-
- /* no point going on w/o a page 3 */
- if (cache[2] & 4) {
- dd_dev_info(ppd->dd,
- "%s: Upper page 03 not present\n",
- __func__);
- return;
- }
-
- apply_tx_eq_auto(ppd);
-
- apply_tx_eq_prog(ppd, tx_preset_index);
-
- apply_rx_eq_emp(ppd, rx_preset_index);
-}
-
-static void apply_rx_amplitude_settings(
- struct hfi1_pportdata *ppd, u32 rx_preset_index,
- u32 tx_preset_index)
-{
- u32 rx_preset;
- u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
-
- /* no point going on w/o a page 3 */
- if (cache[2] & 4) {
- dd_dev_info(ppd->dd,
- "%s: Upper page 03 not present\n",
- __func__);
- return;
- }
- if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
- dd_dev_info(ppd->dd,
- "%s: RX_AMP_APPLY is set to disabled\n",
- __func__);
- return;
- }
-
- get_platform_config_field(ppd->dd,
- PLATFORM_CONFIG_RX_PRESET_TABLE,
- rx_preset_index,
- RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
- &rx_preset, 4);
-
- if (!rx_preset) {
- dd_dev_info(ppd->dd,
- "%s: RX_AMP_APPLY is set to disabled\n",
- __func__);
- return;
- }
- get_platform_config_field(ppd->dd,
- PLATFORM_CONFIG_RX_PRESET_TABLE,
- rx_preset_index,
- RX_PRESET_TABLE_QSFP_RX_AMP,
- &rx_preset, 4);
-
- dd_dev_info(ppd->dd,
- "%s: Requested RX AMP %x\n",
- __func__,
- rx_preset);
-
- for (i = 0; i < 4; i++) {
- if (cache[(128 * 3) + 225] & (1 << i)) {
- preferred = i;
- if (preferred == rx_preset)
- break;
- }
- }
-
- /*
- * Verify that preferred RX amplitude is not just a
- * fall through of the default
- */
- if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
- dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
- return;
- }
-
- dd_dev_info(ppd->dd,
- "%s: Applying RX AMP %x\n", __func__, preferred);
-
- rx_amp = preferred | (preferred << 4);
- qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
- qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
-}
-
-#define OPA_INVALID_INDEX 0xFFF
-
-static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
- u32 config_data, const char *message)
-{
- u8 i;
- int ret = HCMD_SUCCESS;
-
- for (i = 0; i < 4; i++) {
- ret = load_8051_config(ppd->dd, field_id, i, config_data);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(
- ppd->dd,
- "%s: %s for lane %u failed\n",
- message, __func__, i);
- }
- }
-}
-
-static void apply_tunings(
- struct hfi1_pportdata *ppd, u32 tx_preset_index,
- u8 tuning_method, u32 total_atten, u8 limiting_active)
-{
- int ret = 0;
- u32 config_data = 0, tx_preset = 0;
- u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
- u8 *cache = ppd->qsfp_info.cache;
-
- /* Enable external device config if channel is limiting active */
- read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
- GENERAL_CONFIG, &config_data);
- config_data |= limiting_active;
- ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
- GENERAL_CONFIG, config_data);
- if (ret != HCMD_SUCCESS)
- dd_dev_err(
- ppd->dd,
- "%s: Failed to set enable external device config\n",
- __func__);
-
- config_data = 0; /* re-init */
- /* Pass tuning method to 8051 */
- read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
- &config_data);
- config_data |= tuning_method;
- ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
- config_data);
- if (ret != HCMD_SUCCESS)
- dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
- __func__);
-
- /* Set same channel loss for both TX and RX */
- config_data = 0 | (total_atten << 16) | (total_atten << 24);
- apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
- "Setting channel loss");
-
- /* Inform 8051 of cable capabilities */
- if (ppd->qsfp_info.cache_valid) {
- external_device_config =
- ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
- ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
- ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
- (cache[QSFP_EQ_INFO_OFFS] & 0x4);
- ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
- GENERAL_CONFIG, &config_data);
- /* Clear, then set the external device config field */
- config_data &= ~(0xFF << 24);
- config_data |= (external_device_config << 24);
- ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
- GENERAL_CONFIG, config_data);
- if (ret != HCMD_SUCCESS)
- dd_dev_info(ppd->dd,
- "%s: Failed set ext device config params\n",
- __func__);
- }
-
- if (tx_preset_index == OPA_INVALID_INDEX) {
- if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
- dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
- __func__);
- return;
- }
-
- /* Following for limiting active channels only */
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
- TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
- precur = tx_preset;
-
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
- tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
- attn = tx_preset;
-
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
- tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
- postcur = tx_preset;
-
- config_data = precur | (attn << 8) | (postcur << 16);
-
- apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
- "Applying TX settings");
-}
-
-static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
- u32 *ptr_rx_preset, u32 *ptr_total_atten)
-{
- int ret;
- u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
- u8 *cache = ppd->qsfp_info.cache;
-
- ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT);
- if (ret) {
- dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
- __func__, (int)ppd->dd->hfi1_id);
- return ret;
- }
-
- ppd->qsfp_info.limiting_active = 1;
-
- ret = set_qsfp_tx(ppd, 0);
- if (ret)
- goto bail_unlock;
-
- ret = qual_power(ppd);
- if (ret)
- goto bail_unlock;
-
- ret = qual_bitrate(ppd);
- if (ret)
- goto bail_unlock;
-
- if (ppd->qsfp_info.reset_needed) {
- reset_qsfp(ppd);
- ppd->qsfp_info.reset_needed = 0;
- refresh_qsfp_cache(ppd, &ppd->qsfp_info);
- } else {
- ppd->qsfp_info.reset_needed = 1;
- }
-
- ret = set_qsfp_high_power(ppd);
- if (ret)
- goto bail_unlock;
-
- if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
- ret = get_platform_config_field(
- ppd->dd,
- PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
- ptr_tx_preset, 4);
- if (ret) {
- *ptr_tx_preset = OPA_INVALID_INDEX;
- goto bail_unlock;
- }
- } else {
- ret = get_platform_config_field(
- ppd->dd,
- PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
- ptr_tx_preset, 4);
- if (ret) {
- *ptr_tx_preset = OPA_INVALID_INDEX;
- goto bail_unlock;
- }
- }
-
- ret = get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
- if (ret) {
- *ptr_rx_preset = OPA_INVALID_INDEX;
- goto bail_unlock;
- }
-
- if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
- else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
-
- apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
-
- apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
-
- apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
-
- ret = set_qsfp_tx(ppd, 1);
-
-bail_unlock:
- release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
- return ret;
-}
-
-static int tune_qsfp(struct hfi1_pportdata *ppd,
- u32 *ptr_tx_preset, u32 *ptr_rx_preset,
- u8 *ptr_tuning_method, u32 *ptr_total_atten)
-{
- u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
- u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
- int ret = 0;
- u8 *cache = ppd->qsfp_info.cache;
-
- switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
- case 0xA ... 0xB:
- ret = get_platform_config_field(
- ppd->dd,
- PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_LOCAL_ATTEN_25G,
- &platform_atten, 4);
- if (ret)
- return ret;
-
- if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
- cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
- else if ((lss & OPA_LINK_SPEED_12_5G) &&
- (lse & OPA_LINK_SPEED_12_5G))
- cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
-
- /* Fallback to configured attenuation if cable memory is bad */
- if (cable_atten == 0 || cable_atten > 36) {
- ret = get_platform_config_field(
- ppd->dd,
- PLATFORM_CONFIG_SYSTEM_TABLE, 0,
- SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
- &cable_atten, 4);
- if (ret)
- return ret;
- }
-
- ret = get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
- if (ret)
- return ret;
-
- *ptr_total_atten = platform_atten + cable_atten + remote_atten;
-
- *ptr_tuning_method = OPA_PASSIVE_TUNING;
- break;
- case 0x0 ... 0x9: /* fallthrough */
- case 0xC: /* fallthrough */
- case 0xE:
- ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
- ptr_total_atten);
- if (ret)
- return ret;
-
- *ptr_tuning_method = OPA_ACTIVE_TUNING;
- break;
- case 0xD: /* fallthrough */
- case 0xF:
- default:
- dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
- __func__);
- break;
- }
- return ret;
-}
-
-/*
- * This function communicates its success or failure via ppd->driver_link_ready
- * Thus, it depends on its association with start_link(...) which checks
- * driver_link_ready before proceeding with the link negotiation and
- * initialization process.
- */
-void tune_serdes(struct hfi1_pportdata *ppd)
-{
- int ret = 0;
- u32 total_atten = 0;
- u32 remote_atten = 0, platform_atten = 0;
- u32 rx_preset_index, tx_preset_index;
- u8 tuning_method = 0, limiting_active = 0;
- struct hfi1_devdata *dd = ppd->dd;
-
- rx_preset_index = OPA_INVALID_INDEX;
- tx_preset_index = OPA_INVALID_INDEX;
-
- /* the link defaults to enabled */
- ppd->link_enabled = 1;
- /* the driver link ready state defaults to not ready */
- ppd->driver_link_ready = 0;
- ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
-
- /* Skip the tuning for testing (loopback != none) and simulations */
- if (loopback != LOOPBACK_NONE ||
- ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
- ppd->driver_link_ready = 1;
- return;
- }
-
- ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_PORT_TYPE, &ppd->port_type,
- 4);
- if (ret)
- ppd->port_type = PORT_TYPE_UNKNOWN;
-
- switch (ppd->port_type) {
- case PORT_TYPE_DISCONNECTED:
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
- dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
- __func__);
- goto bail;
- case PORT_TYPE_FIXED:
- /* platform_atten, remote_atten pre-zeroed to catch error */
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
-
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
-
- total_atten = platform_atten + remote_atten;
-
- tuning_method = OPA_PASSIVE_TUNING;
- break;
- case PORT_TYPE_VARIABLE:
- if (qsfp_mod_present(ppd)) {
- /*
- * platform_atten, remote_atten pre-zeroed to
- * catch error
- */
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_LOCAL_ATTEN_25G,
- &platform_atten, 4);
-
- get_platform_config_field(
- ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_REMOTE_ATTEN_25G,
- &remote_atten, 4);
-
- total_atten = platform_atten + remote_atten;
-
- tuning_method = OPA_PASSIVE_TUNING;
- } else
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
- break;
- case PORT_TYPE_QSFP:
- if (qsfp_mod_present(ppd)) {
- refresh_qsfp_cache(ppd, &ppd->qsfp_info);
-
- if (ppd->qsfp_info.cache_valid) {
- ret = tune_qsfp(ppd,
- &tx_preset_index,
- &rx_preset_index,
- &tuning_method,
- &total_atten);
-
- /*
- * We may have modified the QSFP memory, so
- * update the cache to reflect the changes
- */
- refresh_qsfp_cache(ppd, &ppd->qsfp_info);
- if (ret)
- goto bail;
-
- limiting_active =
- ppd->qsfp_info.limiting_active;
- } else {
- dd_dev_err(dd,
- "%s: Reading QSFP memory failed\n",
- __func__);
- goto bail;
- }
- } else
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(
- OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
- break;
- default:
- dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
- ppd->port_type = PORT_TYPE_UNKNOWN;
- tuning_method = OPA_UNKNOWN_TUNING;
- total_atten = 0;
- limiting_active = 0;
- tx_preset_index = OPA_INVALID_INDEX;
- break;
- }
-
- if (ppd->offline_disabled_reason ==
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
- apply_tunings(ppd, tx_preset_index, tuning_method,
- total_atten, limiting_active);
-
- if (!ret)
- ppd->driver_link_ready = 1;
-
- return;
-bail:
- ppd->driver_link_ready = 0;
-}
diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/staging/rdma/hfi1/platform.h
deleted file mode 100644
index 19620cf54..000000000
--- a/drivers/staging/rdma/hfi1/platform.h
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#ifndef __PLATFORM_H
-#define __PLATFORM_H
-
-#define METADATA_TABLE_FIELD_START_SHIFT 0
-#define METADATA_TABLE_FIELD_START_LEN_BITS 15
-#define METADATA_TABLE_FIELD_LEN_SHIFT 16
-#define METADATA_TABLE_FIELD_LEN_LEN_BITS 16
-
-/* Header structure */
-#define PLATFORM_CONFIG_HEADER_RECORD_IDX_SHIFT 0
-#define PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS 6
-#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT 16
-#define PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS 12
-#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT 28
-#define PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS 4
-
-enum platform_config_table_type_encoding {
- PLATFORM_CONFIG_TABLE_RESERVED,
- PLATFORM_CONFIG_SYSTEM_TABLE,
- PLATFORM_CONFIG_PORT_TABLE,
- PLATFORM_CONFIG_RX_PRESET_TABLE,
- PLATFORM_CONFIG_TX_PRESET_TABLE,
- PLATFORM_CONFIG_QSFP_ATTEN_TABLE,
- PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE,
- PLATFORM_CONFIG_TABLE_MAX
-};
-
-enum platform_config_system_table_fields {
- SYSTEM_TABLE_RESERVED,
- SYSTEM_TABLE_NODE_STRING,
- SYSTEM_TABLE_SYSTEM_IMAGE_GUID,
- SYSTEM_TABLE_NODE_GUID,
- SYSTEM_TABLE_REVISION,
- SYSTEM_TABLE_VENDOR_OUI,
- SYSTEM_TABLE_META_VERSION,
- SYSTEM_TABLE_DEVICE_ID,
- SYSTEM_TABLE_PARTITION_ENFORCEMENT_CAP,
- SYSTEM_TABLE_QSFP_POWER_CLASS_MAX,
- SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_12G,
- SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
- SYSTEM_TABLE_VARIABLE_TABLE_ENTRIES_PER_PORT,
- SYSTEM_TABLE_MAX
-};
-
-enum platform_config_port_table_fields {
- PORT_TABLE_RESERVED,
- PORT_TABLE_PORT_TYPE,
- PORT_TABLE_LOCAL_ATTEN_12G,
- PORT_TABLE_LOCAL_ATTEN_25G,
- PORT_TABLE_LINK_SPEED_SUPPORTED,
- PORT_TABLE_LINK_WIDTH_SUPPORTED,
- PORT_TABLE_AUTO_LANE_SHEDDING_ENABLED,
- PORT_TABLE_EXTERNAL_LOOPBACK_ALLOWED,
- PORT_TABLE_VL_CAP,
- PORT_TABLE_MTU_CAP,
- PORT_TABLE_TX_LANE_ENABLE_MASK,
- PORT_TABLE_LOCAL_MAX_TIMEOUT,
- PORT_TABLE_REMOTE_ATTEN_12G,
- PORT_TABLE_REMOTE_ATTEN_25G,
- PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
- PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
- PORT_TABLE_RX_PRESET_IDX,
- PORT_TABLE_CABLE_REACH_CLASS,
- PORT_TABLE_MAX
-};
-
-enum platform_config_rx_preset_table_fields {
- RX_PRESET_TABLE_RESERVED,
- RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
- RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
- RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
- RX_PRESET_TABLE_QSFP_RX_CDR,
- RX_PRESET_TABLE_QSFP_RX_EMP,
- RX_PRESET_TABLE_QSFP_RX_AMP,
- RX_PRESET_TABLE_MAX
-};
-
-enum platform_config_tx_preset_table_fields {
- TX_PRESET_TABLE_RESERVED,
- TX_PRESET_TABLE_PRECUR,
- TX_PRESET_TABLE_ATTN,
- TX_PRESET_TABLE_POSTCUR,
- TX_PRESET_TABLE_QSFP_TX_CDR_APPLY,
- TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
- TX_PRESET_TABLE_QSFP_TX_CDR,
- TX_PRESET_TABLE_QSFP_TX_EQ,
- TX_PRESET_TABLE_MAX
-};
-
-enum platform_config_qsfp_attn_table_fields {
- QSFP_ATTEN_TABLE_RESERVED,
- QSFP_ATTEN_TABLE_TX_PRESET_IDX,
- QSFP_ATTEN_TABLE_RX_PRESET_IDX,
- QSFP_ATTEN_TABLE_MAX
-};
-
-enum platform_config_variable_settings_table_fields {
- VARIABLE_SETTINGS_TABLE_RESERVED,
- VARIABLE_SETTINGS_TABLE_TX_PRESET_IDX,
- VARIABLE_SETTINGS_TABLE_RX_PRESET_IDX,
- VARIABLE_SETTINGS_TABLE_MAX
-};
-
-struct platform_config {
- size_t size;
- const u8 *data;
-};
-
-struct platform_config_data {
- u32 *table;
- u32 *table_metadata;
- u32 num_table;
-};
-
-/*
- * This struct acts as a quick reference into the platform_data binary image
- * and is populated by parse_platform_config(...) depending on the specific
- * META_VERSION
- */
-struct platform_config_cache {
- u8 cache_valid;
- struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX];
-};
-
-static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
- 0,
- SYSTEM_TABLE_MAX,
- PORT_TABLE_MAX,
- RX_PRESET_TABLE_MAX,
- TX_PRESET_TABLE_MAX,
- QSFP_ATTEN_TABLE_MAX,
- VARIABLE_SETTINGS_TABLE_MAX
-};
-
-/* This section defines default values and encodings for the
- * fields defined for each table above
- */
-
-/*
- * =====================================================
- * System table encodings
- * =====================================================
- */
-#define PLATFORM_CONFIG_MAGIC_NUM 0x3d4f5041
-#define PLATFORM_CONFIG_MAGIC_NUMBER_LEN 4
-
-/*
- * These power classes are the same as defined in SFF 8636 spec rev 2.4
- * describing byte 129 in table 6-16, except enumerated in a different order
- */
-enum platform_config_qsfp_power_class_encoding {
- QSFP_POWER_CLASS_1 = 1,
- QSFP_POWER_CLASS_2,
- QSFP_POWER_CLASS_3,
- QSFP_POWER_CLASS_4,
- QSFP_POWER_CLASS_5,
- QSFP_POWER_CLASS_6,
- QSFP_POWER_CLASS_7
-};
-
-/*
- * ====================================================
- * Port table encodings
- * ====================================================
- */
-enum platform_config_port_type_encoding {
- PORT_TYPE_UNKNOWN,
- PORT_TYPE_DISCONNECTED,
- PORT_TYPE_FIXED,
- PORT_TYPE_VARIABLE,
- PORT_TYPE_QSFP,
- PORT_TYPE_MAX
-};
-
-enum platform_config_link_speed_supported_encoding {
- LINK_SPEED_SUPP_12G = 1,
- LINK_SPEED_SUPP_25G,
- LINK_SPEED_SUPP_12G_25G,
- LINK_SPEED_SUPP_MAX
-};
-
-/*
- * This is a subset (not strict) of the link downgrades
- * supported. The link downgrades supported are expected
- * to be supplied to the driver by another entity such as
- * the fabric manager
- */
-enum platform_config_link_width_supported_encoding {
- LINK_WIDTH_SUPP_1X = 1,
- LINK_WIDTH_SUPP_2X,
- LINK_WIDTH_SUPP_2X_1X,
- LINK_WIDTH_SUPP_3X,
- LINK_WIDTH_SUPP_3X_1X,
- LINK_WIDTH_SUPP_3X_2X,
- LINK_WIDTH_SUPP_3X_2X_1X,
- LINK_WIDTH_SUPP_4X,
- LINK_WIDTH_SUPP_4X_1X,
- LINK_WIDTH_SUPP_4X_2X,
- LINK_WIDTH_SUPP_4X_2X_1X,
- LINK_WIDTH_SUPP_4X_3X,
- LINK_WIDTH_SUPP_4X_3X_1X,
- LINK_WIDTH_SUPP_4X_3X_2X,
- LINK_WIDTH_SUPP_4X_3X_2X_1X,
- LINK_WIDTH_SUPP_MAX
-};
-
-enum platform_config_virtual_lane_capability_encoding {
- VL_CAP_VL0 = 1,
- VL_CAP_VL0_1,
- VL_CAP_VL0_2,
- VL_CAP_VL0_3,
- VL_CAP_VL0_4,
- VL_CAP_VL0_5,
- VL_CAP_VL0_6,
- VL_CAP_VL0_7,
- VL_CAP_VL0_8,
- VL_CAP_VL0_9,
- VL_CAP_VL0_10,
- VL_CAP_VL0_11,
- VL_CAP_VL0_12,
- VL_CAP_VL0_13,
- VL_CAP_VL0_14,
- VL_CAP_MAX
-};
-
-/* Max MTU */
-enum platform_config_mtu_capability_encoding {
- MTU_CAP_256 = 1,
- MTU_CAP_512 = 2,
- MTU_CAP_1024 = 3,
- MTU_CAP_2048 = 4,
- MTU_CAP_4096 = 5,
- MTU_CAP_8192 = 6,
- MTU_CAP_10240 = 7
-};
-
-enum platform_config_local_max_timeout_encoding {
- LOCAL_MAX_TIMEOUT_10_MS = 1,
- LOCAL_MAX_TIMEOUT_100_MS,
- LOCAL_MAX_TIMEOUT_1_S,
- LOCAL_MAX_TIMEOUT_10_S,
- LOCAL_MAX_TIMEOUT_100_S,
- LOCAL_MAX_TIMEOUT_1000_S
-};
-
-enum link_tuning_encoding {
- OPA_PASSIVE_TUNING,
- OPA_ACTIVE_TUNING,
- OPA_UNKNOWN_TUNING
-};
-
-/* platform.c */
-void get_platform_config(struct hfi1_devdata *dd);
-void free_platform_config(struct hfi1_devdata *dd);
-int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
-void tune_serdes(struct hfi1_pportdata *ppd);
-
-#endif /*__PLATFORM_H*/
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
deleted file mode 100644
index 0401955b3..000000000
--- a/drivers/staging/rdma/hfi1/qp.c
+++ /dev/null
@@ -1,974 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/err.h>
-#include <linux/vmalloc.h>
-#include <linux/hash.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/seq_file.h>
-#include <rdma/rdma_vt.h>
-#include <rdma/rdmavt_qp.h>
-
-#include "hfi.h"
-#include "qp.h"
-#include "trace.h"
-#include "verbs_txreq.h"
-
-unsigned int hfi1_qp_table_size = 256;
-module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-static void flush_tx_list(struct rvt_qp *qp);
-static int iowait_sleep(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *stx,
- unsigned seq);
-static void iowait_wakeup(struct iowait *wait, int reason);
-static void iowait_sdma_drained(struct iowait *wait);
-static void qp_pio_drain(struct rvt_qp *qp);
-
-static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map, unsigned off)
-{
- return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
-}
-
-/*
- * Convert the AETH credit code into the number of credits.
- */
-static const u16 credit_table[31] = {
- 0, /* 0 */
- 1, /* 1 */
- 2, /* 2 */
- 3, /* 3 */
- 4, /* 4 */
- 6, /* 5 */
- 8, /* 6 */
- 12, /* 7 */
- 16, /* 8 */
- 24, /* 9 */
- 32, /* A */
- 48, /* B */
- 64, /* C */
- 96, /* D */
- 128, /* E */
- 192, /* F */
- 256, /* 10 */
- 384, /* 11 */
- 512, /* 12 */
- 768, /* 13 */
- 1024, /* 14 */
- 1536, /* 15 */
- 2048, /* 16 */
- 3072, /* 17 */
- 4096, /* 18 */
- 6144, /* 19 */
- 8192, /* 1A */
- 12288, /* 1B */
- 16384, /* 1C */
- 24576, /* 1D */
- 32768 /* 1E */
-};
-
-static void flush_tx_list(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- while (!list_empty(&priv->s_iowait.tx_head)) {
- struct sdma_txreq *tx;
-
- tx = list_first_entry(
- &priv->s_iowait.tx_head,
- struct sdma_txreq,
- list);
- list_del_init(&tx->list);
- hfi1_put_txreq(
- container_of(tx, struct verbs_txreq, txreq));
- }
-}
-
-static void flush_iowait(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
- unsigned long flags;
-
- write_seqlock_irqsave(&dev->iowait_lock, flags);
- if (!list_empty(&priv->s_iowait.list)) {
- list_del_init(&priv->s_iowait.list);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
-}
-
-static inline int opa_mtu_enum_to_int(int mtu)
-{
- switch (mtu) {
- case OPA_MTU_8192: return 8192;
- case OPA_MTU_10240: return 10240;
- default: return -1;
- }
-}
-
-/**
- * This function is what we would push to the core layer if we wanted to be a
- * "first class citizen". Instead we hide this here and rely on Verbs ULPs
- * to blindly pass the MTU enum value from the PathRecord to us.
- *
- * The actual flag used to determine "8k MTU" will change and is currently
- * unknown.
- */
-static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
-{
- int val = opa_mtu_enum_to_int((int)mtu);
-
- if (val > 0)
- return val;
- return ib_mtu_enum_to_int(mtu);
-}
-
-int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct ib_qp *ibqp = &qp->ibqp;
- struct hfi1_ibdev *dev = to_idev(ibqp->device);
- struct hfi1_devdata *dd = dd_from_dev(dev);
- u8 sc;
-
- if (attr_mask & IB_QP_AV) {
- sc = ah_to_sc(ibqp->device, &attr->ah_attr);
- if (sc == 0xf)
- return -EINVAL;
-
- if (!qp_to_sdma_engine(qp, sc) &&
- dd->flags & HFI1_HAS_SEND_DMA)
- return -EINVAL;
-
- if (!qp_to_send_context(qp, sc))
- return -EINVAL;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
- if (sc == 0xf)
- return -EINVAL;
-
- if (!qp_to_sdma_engine(qp, sc) &&
- dd->flags & HFI1_HAS_SEND_DMA)
- return -EINVAL;
-
- if (!qp_to_send_context(qp, sc))
- return -EINVAL;
- }
-
- return 0;
-}
-
-void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct ib_qp *ibqp = &qp->ibqp;
- struct hfi1_qp_priv *priv = qp->priv;
-
- if (attr_mask & IB_QP_AV) {
- priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
- priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
- priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE &&
- attr->path_mig_state == IB_MIG_MIGRATED &&
- qp->s_mig_state == IB_MIG_ARMED) {
- qp->s_flags |= RVT_S_AHG_CLEAR;
- priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
- priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
- priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
- }
-}
-
-/**
- * hfi1_check_send_wqe - validate wqe
- * @qp - The qp
- * @wqe - The built wqe
- *
- * validate wqe. This is called
- * prior to inserting the wqe into
- * the ring but after the wqe has been
- * setup.
- *
- * Returns 0 on success, -EINVAL on failure
- *
- */
-int hfi1_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe)
-{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct rvt_ah *ah;
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- case IB_QPT_UC:
- if (wqe->length > 0x80000000U)
- return -EINVAL;
- break;
- case IB_QPT_SMI:
- ah = ibah_to_rvtah(wqe->ud_wr.ah);
- if (wqe->length > (1 << ah->log_pmtu))
- return -EINVAL;
- break;
- case IB_QPT_GSI:
- case IB_QPT_UD:
- ah = ibah_to_rvtah(wqe->ud_wr.ah);
- if (wqe->length > (1 << ah->log_pmtu))
- return -EINVAL;
- if (ibp->sl_to_sc[ah->attr.sl] == 0xf)
- return -EINVAL;
- default:
- break;
- }
- return wqe->length <= piothreshold;
-}
-
-/**
- * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
-__be32 hfi1_compute_aeth(struct rvt_qp *qp)
-{
- u32 aeth = qp->r_msn & HFI1_MSN_MASK;
-
- if (qp->ibqp.srq) {
- /*
- * Shared receive queues don't generate credits.
- * Set the credit field to the invalid value.
- */
- aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
- } else {
- u32 min, max, x;
- u32 credits;
- struct rvt_rwq *wq = qp->r_rq.wq;
- u32 head;
- u32 tail;
-
- /* sanity check pointers before trusting them */
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- /*
- * Compute the number of credits available (RWQEs).
- * There is a small chance that the pair of reads are
- * not atomic, which is OK, since the fuzziness is
- * resolved as further ACKs go out.
- */
- credits = head - tail;
- if ((int)credits < 0)
- credits += qp->r_rq.size;
- /*
- * Binary search the credit table to find the code to
- * use.
- */
- min = 0;
- max = 31;
- for (;;) {
- x = (min + max) / 2;
- if (credit_table[x] == credits)
- break;
- if (credit_table[x] > credits) {
- max = x;
- } else {
- if (min == x)
- break;
- min = x;
- }
- }
- aeth |= x << HFI1_AETH_CREDIT_SHIFT;
- }
- return cpu_to_be32(aeth);
-}
-
-/**
- * _hfi1_schedule_send - schedule progress
- * @qp: the QP
- *
- * This schedules qp progress w/o regard to the s_flags.
- *
- * It is only used in the post send, which doesn't hold
- * the s_lock.
- */
-void _hfi1_schedule_send(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
-
- iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
- priv->s_sde ?
- priv->s_sde->cpu :
- cpumask_first(cpumask_of_node(dd->node)));
-}
-
-static void qp_pio_drain(struct rvt_qp *qp)
-{
- struct hfi1_ibdev *dev;
- struct hfi1_qp_priv *priv = qp->priv;
-
- if (!priv->s_sendcontext)
- return;
- dev = to_idev(qp->ibqp.device);
- while (iowait_pio_pending(&priv->s_iowait)) {
- write_seqlock_irq(&dev->iowait_lock);
- hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
- write_sequnlock_irq(&dev->iowait_lock);
- iowait_pio_drain(&priv->s_iowait);
- write_seqlock_irq(&dev->iowait_lock);
- hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
- write_sequnlock_irq(&dev->iowait_lock);
- }
-}
-
-/**
- * hfi1_schedule_send - schedule progress
- * @qp: the QP
- *
- * This schedules qp progress and caller should hold
- * the s_lock.
- */
-void hfi1_schedule_send(struct rvt_qp *qp)
-{
- if (hfi1_send_ok(qp))
- _hfi1_schedule_send(qp);
-}
-
-/**
- * hfi1_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
-void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
-{
- u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
-
- /*
- * If the credit is invalid, we can send
- * as many packets as we like. Otherwise, we have to
- * honor the credit field.
- */
- if (credit == HFI1_AETH_CREDIT_INVAL) {
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
- qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
- if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
- hfi1_schedule_send(qp);
- }
- }
- } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
- /* Compute new LSN (i.e., MSN + credit) */
- credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
- if (cmp_msn(credit, qp->s_lsn) > 0) {
- qp->s_lsn = credit;
- if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
- hfi1_schedule_send(qp);
- }
- }
- }
-}
-
-void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & flag) {
- qp->s_flags &= ~flag;
- trace_hfi1_qpwakeup(qp, flag);
- hfi1_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- /* Notify hfi1_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-static int iowait_sleep(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *stx,
- unsigned seq)
-{
- struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
- struct rvt_qp *qp;
- struct hfi1_qp_priv *priv;
- unsigned long flags;
- int ret = 0;
- struct hfi1_ibdev *dev;
-
- qp = tx->qp;
- priv = qp->priv;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- /*
- * If we couldn't queue the DMA request, save the info
- * and try again later rather than destroying the
- * buffer and undoing the side effects of the copy.
- */
- /* Make a common routine? */
- dev = &sde->dd->verbs_dev;
- list_add_tail(&stx->list, &wait->tx_head);
- write_seqlock(&dev->iowait_lock);
- if (sdma_progress(sde, seq, stx))
- goto eagain;
- if (list_empty(&priv->s_iowait.list)) {
- struct hfi1_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
-
- ibp->rvp.n_dmawait++;
- qp->s_flags |= RVT_S_WAIT_DMA_DESC;
- list_add_tail(&priv->s_iowait.list, &sde->dmawait);
- trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
- atomic_inc(&qp->refcount);
- }
- write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&qp->s_lock, flags);
- ret = -EBUSY;
- } else {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- hfi1_put_txreq(tx);
- }
- return ret;
-eagain:
- write_sequnlock(&dev->iowait_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- list_del_init(&stx->list);
- return -EAGAIN;
-}
-
-static void iowait_wakeup(struct iowait *wait, int reason)
-{
- struct rvt_qp *qp = iowait_to_qp(wait);
-
- WARN_ON(reason != SDMA_AVAIL_REASON);
- hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
-}
-
-static void iowait_sdma_drained(struct iowait *wait)
-{
- struct rvt_qp *qp = iowait_to_qp(wait);
- unsigned long flags;
-
- /*
- * This happens when the send engine notes
- * a QP in the error state and cannot
- * do the flush work until that QP's
- * sdma work has finished.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_DMA) {
- qp->s_flags &= ~RVT_S_WAIT_DMA;
- hfi1_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
-
-/**
- *
- * qp_to_sdma_engine - map a qp to a send engine
- * @qp: the QP
- * @sc5: the 5 bit sc
- *
- * Return:
- * A send engine for the qp or NULL for SMI type qp.
- */
-struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct sdma_engine *sde;
-
- if (!(dd->flags & HFI1_HAS_SEND_DMA))
- return NULL;
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- return NULL;
- default:
- break;
- }
- sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
- return sde;
-}
-
-/*
- * qp_to_send_context - map a qp to a send context
- * @qp: the QP
- * @sc5: the 5 bit sc
- *
- * Return:
- * A send context for the qp
- */
-struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- /* SMA packets to VL15 */
- return dd->vld[15].sc;
- default:
- break;
- }
-
- return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
- sc5);
-}
-
-struct qp_iter {
- struct hfi1_ibdev *dev;
- struct rvt_qp *qp;
- int specials;
- int n;
-};
-
-struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
-{
- struct qp_iter *iter;
-
- iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return NULL;
-
- iter->dev = dev;
- iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
- if (qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
-
- return iter;
-}
-
-int qp_iter_next(struct qp_iter *iter)
-{
- struct hfi1_ibdev *dev = iter->dev;
- int n = iter->n;
- int ret = 1;
- struct rvt_qp *pqp = iter->qp;
- struct rvt_qp *qp;
-
- /*
- * The approach is to consider the special qps
- * as an additional table entries before the
- * real hash table. Since the qp code sets
- * the qp->next hash link to NULL, this works just fine.
- *
- * iter->specials is 2 * # ports
- *
- * n = 0..iter->specials is the special qp indices
- *
- * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
- * the potential hash bucket entries
- *
- */
- for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
- if (pqp) {
- qp = rcu_dereference(pqp->next);
- } else {
- if (n < iter->specials) {
- struct hfi1_pportdata *ppd;
- struct hfi1_ibport *ibp;
- int pidx;
-
- pidx = n % dev->rdi.ibdev.phys_port_cnt;
- ppd = &dd_from_dev(dev)->pport[pidx];
- ibp = &ppd->ibport_data;
-
- if (!(n & 1))
- qp = rcu_dereference(ibp->rvp.qp[0]);
- else
- qp = rcu_dereference(ibp->rvp.qp[1]);
- } else {
- qp = rcu_dereference(
- dev->rdi.qp_dev->qp_table[
- (n - iter->specials)]);
- }
- }
- pqp = qp;
- if (qp) {
- iter->qp = qp;
- iter->n = n;
- return 0;
- }
- }
- return ret;
-}
-
-static const char * const qp_type_str[] = {
- "SMI", "GSI", "RC", "UC", "UD",
-};
-
-static int qp_idle(struct rvt_qp *qp)
-{
- return
- qp->s_last == qp->s_acked &&
- qp->s_acked == qp->s_cur &&
- qp->s_cur == qp->s_tail &&
- qp->s_tail == qp->s_head;
-}
-
-void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
-{
- struct rvt_swqe *wqe;
- struct rvt_qp *qp = iter->qp;
- struct hfi1_qp_priv *priv = qp->priv;
- struct sdma_engine *sde;
- struct send_context *send_context;
-
- sde = qp_to_sdma_engine(qp, priv->s_sc);
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- send_context = qp_to_send_context(qp, priv->s_sc);
- seq_printf(s,
- "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
- iter->n,
- qp_idle(qp) ? "I" : "B",
- qp->ibqp.qp_num,
- atomic_read(&qp->refcount),
- qp_type_str[qp->ibqp.qp_type],
- qp->state,
- wqe ? wqe->wr.opcode : 0,
- qp->s_hdrwords,
- qp->s_flags,
- iowait_sdma_pending(&priv->s_iowait),
- iowait_pio_pending(&priv->s_iowait),
- !list_empty(&priv->s_iowait.list),
- qp->timeout,
- wqe ? wqe->ssn : 0,
- qp->s_lsn,
- qp->s_last_psn,
- qp->s_psn, qp->s_next_psn,
- qp->s_sending_psn, qp->s_sending_hpsn,
- qp->s_last, qp->s_acked, qp->s_cur,
- qp->s_tail, qp->s_head, qp->s_size,
- qp->s_avail,
- qp->remote_qpn,
- qp->remote_ah_attr.dlid,
- qp->remote_ah_attr.sl,
- qp->pmtu,
- qp->s_retry,
- qp->s_retry_cnt,
- qp->s_rnr_retry_cnt,
- sde,
- sde ? sde->this_idx : 0,
- send_context,
- send_context ? send_context->sw_index : 0,
- ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
- ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
- qp->pid);
-}
-
-void qp_comm_est(struct rvt_qp *qp)
-{
- qp->r_flags |= RVT_R_COMM_EST;
- if (qp->ibqp.event_handler) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_COMM_EST;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
-}
-
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp)
-{
- struct hfi1_qp_priv *priv;
-
- priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
- if (!priv)
- return ERR_PTR(-ENOMEM);
-
- priv->owner = qp;
-
- priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
- if (!priv->s_hdr) {
- kfree(priv);
- return ERR_PTR(-ENOMEM);
- }
- setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
- qp->s_timer.function = hfi1_rc_timeout;
- return priv;
-}
-
-void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- kfree(priv->s_hdr);
- kfree(priv);
-}
-
-unsigned free_all_qps(struct rvt_dev_info *rdi)
-{
- struct hfi1_ibdev *verbs_dev = container_of(rdi,
- struct hfi1_ibdev,
- rdi);
- struct hfi1_devdata *dd = container_of(verbs_dev,
- struct hfi1_devdata,
- verbs_dev);
- int n;
- unsigned qp_inuse = 0;
-
- for (n = 0; n < dd->num_pports; n++) {
- struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
-
- rcu_read_lock();
- if (rcu_dereference(ibp->rvp.qp[0]))
- qp_inuse++;
- if (rcu_dereference(ibp->rvp.qp[1]))
- qp_inuse++;
- rcu_read_unlock();
- }
-
- return qp_inuse;
-}
-
-void flush_qp_waiters(struct rvt_qp *qp)
-{
- flush_iowait(qp);
- hfi1_stop_rc_timers(qp);
-}
-
-void stop_send_queue(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- cancel_work_sync(&priv->s_iowait.iowork);
- hfi1_del_timers_sync(qp);
-}
-
-void quiesce_qp(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- iowait_sdma_drain(&priv->s_iowait);
- qp_pio_drain(qp);
- flush_tx_list(qp);
-}
-
-void notify_qp_reset(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- iowait_init(
- &priv->s_iowait,
- 1,
- _hfi1_do_send,
- iowait_sleep,
- iowait_wakeup,
- iowait_sdma_drained);
- priv->r_adefered = 0;
- clear_ahg(qp);
-}
-
-/*
- * Switch to alternate path.
- * The QP s_lock should be held and interrupts disabled.
- */
-void hfi1_migrate_qp(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct ib_event ev;
-
- qp->s_mig_state = IB_MIG_MIGRATED;
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
- qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= RVT_S_AHG_CLEAR;
- priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
- priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-}
-
-int mtu_to_path_mtu(u32 mtu)
-{
- return mtu_to_enum(mtu, OPA_MTU_8192);
-}
-
-u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
-{
- u32 mtu;
- struct hfi1_ibdev *verbs_dev = container_of(rdi,
- struct hfi1_ibdev,
- rdi);
- struct hfi1_devdata *dd = container_of(verbs_dev,
- struct hfi1_devdata,
- verbs_dev);
- struct hfi1_ibport *ibp;
- u8 sc, vl;
-
- ibp = &dd->pport[qp->port_num - 1].ibport_data;
- sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- vl = sc_to_vlt(dd, sc);
-
- mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
- if (vl < PER_VL_SEND_CONTEXTS)
- mtu = min_t(u32, mtu, dd->vld[vl].mtu);
- return mtu;
-}
-
-int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- struct ib_qp_attr *attr)
-{
- int mtu, pidx = qp->port_num - 1;
- struct hfi1_ibdev *verbs_dev = container_of(rdi,
- struct hfi1_ibdev,
- rdi);
- struct hfi1_devdata *dd = container_of(verbs_dev,
- struct hfi1_devdata,
- verbs_dev);
- mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
- if (mtu == -1)
- return -1; /* values less than 0 are error */
-
- if (mtu > dd->pport[pidx].ibmtu)
- return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
- else
- return attr->path_mtu;
-}
-
-void notify_error_qp(struct rvt_qp *qp)
-{
- struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
- struct hfi1_qp_priv *priv = qp->priv;
-
- write_seqlock(&dev->iowait_lock);
- if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
- list_del_init(&priv->s_iowait.list);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
- write_sequnlock(&dev->iowait_lock);
-
- if (!(qp->s_flags & RVT_S_BUSY)) {
- qp->s_hdrwords = 0;
- if (qp->s_rdma_mr) {
- rvt_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- flush_tx_list(qp);
- }
-}
-
-/**
- * hfi1_error_port_qps - put a port's RC/UC qps into error state
- * @ibp: the ibport.
- * @sl: the service level.
- *
- * This function places all RC/UC qps with a given service level into error
- * state. It is generally called to force upper lay apps to abandon stale qps
- * after an sl->sc mapping change.
- */
-void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
-{
- struct rvt_qp *qp = NULL;
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
- int n;
- int lastwqe;
- struct ib_event ev;
-
- rcu_read_lock();
-
- /* Deal only with RC/UC qps that use the given SL. */
- for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
- for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
- qp = rcu_dereference(qp->next)) {
- if (qp->port_num == ppd->port &&
- (qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_RC) &&
- qp->remote_ah_attr.sl == sl &&
- (ib_rvt_state_ops[qp->state] &
- RVT_POST_SEND_OK)) {
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_hlock);
- spin_lock(&qp->s_lock);
- lastwqe = rvt_error_qp(qp,
- IB_WC_WR_FLUSH_ERR);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->s_hlock);
- spin_unlock_irq(&qp->r_lock);
- if (lastwqe) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event =
- IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev,
- qp->ibqp.qp_context);
- }
- }
- }
- }
-
- rcu_read_unlock();
-}
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h
deleted file mode 100644
index e7bc8d6cf..000000000
--- a/drivers/staging/rdma/hfi1/qp.h
+++ /dev/null
@@ -1,160 +0,0 @@
-#ifndef _QP_H
-#define _QP_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/hash.h>
-#include <rdma/rdmavt_qp.h>
-#include "verbs.h"
-#include "sdma.h"
-
-extern unsigned int hfi1_qp_table_size;
-
-/*
- * free_ahg - clear ahg from QP
- */
-static inline void clear_ahg(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- priv->s_hdr->ahgcount = 0;
- qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
- if (priv->s_sde && qp->s_ahgidx >= 0)
- sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
- qp->s_ahgidx = -1;
-}
-
-/**
- * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
-__be32 hfi1_compute_aeth(struct rvt_qp *qp);
-
-/**
- * hfi1_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: user data for libibverbs.so
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-/**
- * hfi1_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
-void hfi1_get_credit(struct rvt_qp *qp, u32 aeth);
-
-/**
- * hfi1_qp_wakeup - wake up on the indicated event
- * @qp: the QP
- * @flag: flag the qp on which the qp is stalled
- */
-void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag);
-
-struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5);
-struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5);
-
-struct qp_iter;
-
-/**
- * qp_iter_init - initialize the iterator for the qp hash list
- * @dev: the hfi1_ibdev
- */
-struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev);
-
-/**
- * qp_iter_next - Find the next qp in the hash list
- * @iter: the iterator for the qp hash list
- */
-int qp_iter_next(struct qp_iter *iter);
-
-/**
- * qp_iter_print - print the qp information to seq_file
- * @s: the seq_file to emit the qp information on
- * @iter: the iterator for the qp hash list
- */
-void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
-
-/**
- * qp_comm_est - handle trap with QP established
- * @qp: the QP
- */
-void qp_comm_est(struct rvt_qp *qp);
-
-void _hfi1_schedule_send(struct rvt_qp *qp);
-void hfi1_schedule_send(struct rvt_qp *qp);
-
-void hfi1_migrate_qp(struct rvt_qp *qp);
-
-/*
- * Functions provided by hfi1 driver for rdmavt to use
- */
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp);
-void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
-unsigned free_all_qps(struct rvt_dev_info *rdi);
-void notify_qp_reset(struct rvt_qp *qp);
-int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- struct ib_qp_attr *attr);
-void flush_qp_waiters(struct rvt_qp *qp);
-void notify_error_qp(struct rvt_qp *qp);
-void stop_send_queue(struct rvt_qp *qp);
-void quiesce_qp(struct rvt_qp *qp);
-u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
-int mtu_to_path_mtu(u32 mtu);
-void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl);
-#endif /* _QP_H */
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c
deleted file mode 100644
index 9ed196301..000000000
--- a/drivers/staging/rdma/hfi1/qsfp.c
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "hfi.h"
-#include "twsi.h"
-
-/*
- * QSFP support for hfi driver, using "Two Wire Serial Interface" driver
- * in twsi.c
- */
-#define I2C_MAX_RETRY 4
-
-/*
- * Raw i2c write. No set-up or lock checking.
- */
-static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
- int offset, void *bp, int len)
-{
- struct hfi1_devdata *dd = ppd->dd;
- int ret, cnt;
- u8 *buff = bp;
-
- cnt = 0;
- while (cnt < len) {
- int wlen = len - cnt;
-
- ret = hfi1_twsi_blk_wr(dd, target, i2c_addr, offset,
- buff + cnt, wlen);
- if (ret) {
- /* hfi1_twsi_blk_wr() 1 for error, else 0 */
- return -EIO;
- }
- offset += wlen;
- cnt += wlen;
- }
-
- /* Must wait min 20us between qsfp i2c transactions */
- udelay(20);
-
- return cnt;
-}
-
-/*
- * Caller must hold the i2c chain resource.
- */
-int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
- void *bp, int len)
-{
- int ret;
-
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
- return -EACCES;
-
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "I2C chain %d write interface reset failed\n",
- target);
- return ret;
- }
-
- return __i2c_write(ppd, target, i2c_addr, offset, bp, len);
-}
-
-/*
- * Raw i2c read. No set-up or lock checking.
- */
-static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
- int offset, void *bp, int len)
-{
- struct hfi1_devdata *dd = ppd->dd;
- int ret, cnt, pass = 0;
- int orig_offset = offset;
-
- cnt = 0;
- while (cnt < len) {
- int rlen = len - cnt;
-
- ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset,
- bp + cnt, rlen);
- /* Some QSFP's fail first try. Retry as experiment */
- if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY)
- continue;
- if (ret) {
- /* hfi1_twsi_blk_rd() 1 for error, else 0 */
- ret = -EIO;
- goto exit;
- }
- offset += rlen;
- cnt += rlen;
- }
-
- ret = cnt;
-
-exit:
- if (ret < 0) {
- hfi1_dev_porterr(dd, ppd->port,
- "I2C chain %d read failed, addr 0x%x, offset 0x%x, len %d\n",
- target, i2c_addr, orig_offset, len);
- }
-
- /* Must wait min 20us between qsfp i2c transactions */
- udelay(20);
-
- return ret;
-}
-
-/*
- * Caller must hold the i2c chain resource.
- */
-int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
- void *bp, int len)
-{
- int ret;
-
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
- return -EACCES;
-
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "I2C chain %d read interface reset failed\n",
- target);
- return ret;
- }
-
- return __i2c_read(ppd, target, i2c_addr, offset, bp, len);
-}
-
-/*
- * Write page n, offset m of QSFP memory as defined by SFF 8636
- * by writing @addr = ((256 * n) + m)
- *
- * Caller must hold the i2c chain resource.
- */
-int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len)
-{
- int count = 0;
- int offset;
- int nwrite;
- int ret;
- u8 page;
-
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
- return -EACCES;
-
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d write interface reset failed\n",
- target);
- return ret;
- }
-
- while (count < len) {
- /*
- * Set the qsfp page based on a zero-based address
- * and a page size of QSFP_PAGESIZE bytes.
- */
- page = (u8)(addr / QSFP_PAGESIZE);
-
- ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
- QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
- if (ret != 1) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
- target, ret);
- ret = -EIO;
- break;
- }
-
- offset = addr % QSFP_PAGESIZE;
- nwrite = len - count;
- /* truncate write to boundary if crossing boundary */
- if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY)
- nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
-
- ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
- offset, bp + count, nwrite);
- if (ret <= 0) /* stop on error or nothing written */
- break;
-
- count += ret;
- addr += ret;
- }
-
- if (ret < 0)
- return ret;
- return count;
-}
-
-/*
- * Perform a stand-alone single QSFP write. Acquire the resource, do the
- * read, then release the resource.
- */
-int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u32 resource = qsfp_resource(dd);
- int ret;
-
- ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
- if (ret)
- return ret;
- ret = qsfp_write(ppd, target, addr, bp, len);
- release_chip_resource(dd, resource);
-
- return ret;
-}
-
-/*
- * Access page n, offset m of QSFP memory as defined by SFF 8636
- * by reading @addr = ((256 * n) + m)
- *
- * Caller must hold the i2c chain resource.
- */
-int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len)
-{
- int count = 0;
- int offset;
- int nread;
- int ret;
- u8 page;
-
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
- return -EACCES;
-
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d read interface reset failed\n",
- target);
- return ret;
- }
-
- while (count < len) {
- /*
- * Set the qsfp page based on a zero-based address
- * and a page size of QSFP_PAGESIZE bytes.
- */
- page = (u8)(addr / QSFP_PAGESIZE);
- ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
- QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
- if (ret != 1) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
- target, ret);
- ret = -EIO;
- break;
- }
-
- offset = addr % QSFP_PAGESIZE;
- nread = len - count;
- /* truncate read to boundary if crossing boundary */
- if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY)
- nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
-
- /* QSFPs require a 5-10msec delay after write operations */
- mdelay(5);
- ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
- offset, bp + count, nread);
- if (ret <= 0) /* stop on error or nothing read */
- break;
-
- count += ret;
- addr += ret;
- }
-
- if (ret < 0)
- return ret;
- return count;
-}
-
-/*
- * Perform a stand-alone single QSFP read. Acquire the resource, do the
- * read, then release the resource.
- */
-int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u32 resource = qsfp_resource(dd);
- int ret;
-
- ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
- if (ret)
- return ret;
- ret = qsfp_read(ppd, target, addr, bp, len);
- release_chip_resource(dd, resource);
-
- return ret;
-}
-
-/*
- * This function caches the QSFP memory range in 128 byte chunks.
- * As an example, the next byte after address 255 is byte 128 from
- * upper page 01H (if existing) rather than byte 0 from lower page 00H.
- * Access page n, offset m of QSFP memory as defined by SFF 8636
- * in the cache by reading byte ((128 * n) + m)
- * The calls to qsfp_{read,write} in this function correctly handle the
- * address map difference between this mapping and the mapping implemented
- * by those functions
- */
-int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
-{
- u32 target = ppd->dd->hfi1_id;
- int ret;
- unsigned long flags;
- u8 *cache = &cp->cache[0];
-
- /* ensure sane contents on invalid reads, for cable swaps */
- memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
- spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
- ppd->qsfp_info.cache_valid = 0;
- spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
-
- if (!qsfp_mod_present(ppd)) {
- ret = -ENODEV;
- goto bail_no_release;
- }
-
- ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT);
- if (ret)
- goto bail_no_release;
-
- ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE);
- if (ret != QSFP_PAGESIZE) {
- dd_dev_info(ppd->dd,
- "%s: Page 0 read failed, expected %d, got %d\n",
- __func__, QSFP_PAGESIZE, ret);
- goto bail;
- }
-
- /* Is paging enabled? */
- if (!(cache[2] & 4)) {
- /* Paging enabled, page 03 required */
- if ((cache[195] & 0xC0) == 0xC0) {
- /* all */
- ret = qsfp_read(ppd, target, 384, cache + 256, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- ret = qsfp_read(ppd, target, 640, cache + 384, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- ret = qsfp_read(ppd, target, 896, cache + 512, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- } else if ((cache[195] & 0x80) == 0x80) {
- /* only page 2 and 3 */
- ret = qsfp_read(ppd, target, 640, cache + 384, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- ret = qsfp_read(ppd, target, 896, cache + 512, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- } else if ((cache[195] & 0x40) == 0x40) {
- /* only page 1 and 3 */
- ret = qsfp_read(ppd, target, 384, cache + 256, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- ret = qsfp_read(ppd, target, 896, cache + 512, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- } else {
- /* only page 3 */
- ret = qsfp_read(ppd, target, 896, cache + 512, 128);
- if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s failed\n", __func__);
- goto bail;
- }
- }
- }
-
- release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
-
- spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
- ppd->qsfp_info.cache_valid = 1;
- ppd->qsfp_info.cache_refresh_required = 0;
- spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
-
- return 0;
-
-bail:
- release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
-bail_no_release:
- memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
- return ret;
-}
-
-const char * const hfi1_qsfp_devtech[16] = {
- "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
- "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
- "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
- "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
-};
-
-#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
-#define QSFP_DEFAULT_HDR_CNT 224
-
-static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
-
-int qsfp_mod_present(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u64 reg;
-
- reg = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
- return !(reg & QSFP_HFI0_MODPRST_N);
-}
-
-/*
- * This function maps QSFP memory addresses in 128 byte chunks in the following
- * fashion per the CableInfo SMA query definition in the IBA 1.3 spec/OPA Gen 1
- * spec
- * For addr 000-127, lower page 00h
- * For addr 128-255, upper page 00h
- * For addr 256-383, upper page 01h
- * For addr 384-511, upper page 02h
- * For addr 512-639, upper page 03h
- *
- * For addresses beyond this range, it returns the invalid range of data buffer
- * set to 0.
- * For upper pages that are optional, if they are not valid, returns the
- * particular range of bytes in the data buffer set to 0.
- */
-int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
- u8 *data)
-{
- struct hfi1_pportdata *ppd;
- u32 excess_len = 0;
- int ret = 0;
-
- if (port_num > dd->num_pports || port_num < 1) {
- dd_dev_info(dd, "%s: Invalid port number %d\n",
- __func__, port_num);
- ret = -EINVAL;
- goto set_zeroes;
- }
-
- ppd = dd->pport + (port_num - 1);
- if (!qsfp_mod_present(ppd)) {
- ret = -ENODEV;
- goto set_zeroes;
- }
-
- if (!ppd->qsfp_info.cache_valid) {
- ret = -EINVAL;
- goto set_zeroes;
- }
-
- if (addr >= (QSFP_MAX_NUM_PAGES * 128)) {
- ret = -ERANGE;
- goto set_zeroes;
- }
-
- if ((addr + len) > (QSFP_MAX_NUM_PAGES * 128)) {
- excess_len = (addr + len) - (QSFP_MAX_NUM_PAGES * 128);
- memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len));
- data += (len - excess_len);
- goto set_zeroes;
- }
-
- memcpy(data, &ppd->qsfp_info.cache[addr], len);
- return 0;
-
-set_zeroes:
- memset(data, 0, excess_len);
- return ret;
-}
-
-int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
-{
- u8 *cache = &ppd->qsfp_info.cache[0];
- u8 bin_buff[QSFP_DUMP_CHUNK];
- char lenstr[6];
- int sofar;
- int bidx = 0;
- u8 *atten = &cache[QSFP_ATTEN_OFFS];
- u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
-
- sofar = 0;
- lenstr[0] = ' ';
- lenstr[1] = '\0';
-
- if (ppd->qsfp_info.cache_valid) {
- if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
- sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
- pwr_codes +
- (QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]) * 4));
-
- sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
- lenstr,
- hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
- QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
- QSFP_OUI(vendor_oui));
-
- sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
- QSFP_PN_LEN, &cache[QSFP_PN_OFFS]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
- QSFP_REV_LEN, &cache[QSFP_REV_OFFS]);
-
- if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
- sofar += scnprintf(buf + sofar, len - sofar,
- "Atten:%d, %d\n",
- QSFP_ATTEN_SDR(atten),
- QSFP_ATTEN_DDR(atten));
-
- sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
- QSFP_SN_LEN, &cache[QSFP_SN_OFFS]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
- QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
- QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]);
-
- while (bidx < QSFP_DEFAULT_HDR_CNT) {
- int iidx;
-
- memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
- for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
- sofar += scnprintf(buf + sofar, len - sofar,
- " %02X", bin_buff[iidx]);
- }
- sofar += scnprintf(buf + sofar, len - sofar, "\n");
- bidx += QSFP_DUMP_CHUNK;
- }
- }
- return sofar;
-}
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h
deleted file mode 100644
index 831fe4cf1..000000000
--- a/drivers/staging/rdma/hfi1/qsfp.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-/* QSFP support common definitions, for hfi driver */
-
-#define QSFP_DEV 0xA0
-#define QSFP_PWR_LAG_MSEC 2000
-#define QSFP_MODPRS_LAG_MSEC 20
-/* 128 byte pages, per SFF 8636 rev 2.4 */
-#define QSFP_MAX_NUM_PAGES 5
-
-/*
- * Below are masks for QSFP pins. Pins are the same for HFI0 and HFI1.
- * _N means asserted low
- */
-#define QSFP_HFI0_I2CCLK BIT(0)
-#define QSFP_HFI0_I2CDAT BIT(1)
-#define QSFP_HFI0_RESET_N BIT(2)
-#define QSFP_HFI0_INT_N BIT(3)
-#define QSFP_HFI0_MODPRST_N BIT(4)
-
-/* QSFP is paged at 256 bytes */
-#define QSFP_PAGESIZE 256
-/* Reads/writes cannot cross 128 byte boundaries */
-#define QSFP_RW_BOUNDARY 128
-
-/* number of bytes in i2c offset for QSFP devices */
-#define __QSFP_OFFSET_SIZE 1 /* num address bytes */
-#define QSFP_OFFSET_SIZE (__QSFP_OFFSET_SIZE << 8) /* shifted value */
-
-/* Defined fields that Intel requires of qualified cables */
-/* Byte 0 is Identifier, not checked */
-/* Byte 1 is reserved "status MSB" */
-#define QSFP_TX_CTRL_BYTE_OFFS 86
-#define QSFP_PWR_CTRL_BYTE_OFFS 93
-#define QSFP_CDR_CTRL_BYTE_OFFS 98
-
-#define QSFP_PAGE_SELECT_BYTE_OFFS 127
-/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
-#define QSFP_MOD_ID_OFFS 128
-/*
- * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
- * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
- */
-#define QSFP_MOD_PWR_OFFS 129
-/* Byte 130 is Connector type. Not Intel req'd */
-/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
-/* Byte 139 is encoding. code 0x01 is 8b10b. Not Intel req'd */
-/* byte 140 is nominal bit-rate, in units of 100Mbits/sec */
-#define QSFP_NOM_BIT_RATE_100_OFFS 140
-/* Byte 141 is Extended Rate Select. Not Intel req'd */
-/* Bytes 142..145 are lengths for various fiber types. Not Intel req'd */
-/* Byte 146 is length for Copper. Units of 1 meter */
-#define QSFP_MOD_LEN_OFFS 146
-/*
- * Byte 147 is Device technology. D0..3 not Intel req'd
- * D4..7 select from 15 choices, translated by table:
- */
-#define QSFP_MOD_TECH_OFFS 147
-extern const char *const hfi1_qsfp_devtech[16];
-/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
-#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
-/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
-#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
-/* Attenuation should be valid for copper other than full/near Eq */
-#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
-/* Length is only valid if technology is "copper" */
-#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
-#define QSFP_TECH_1490 9
-
-#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
- oui[2])
-#define QSFP_OUI_AMPHENOL 0x415048
-#define QSFP_OUI_FINISAR 0x009065
-#define QSFP_OUI_GORE 0x002177
-
-/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
-#define QSFP_VEND_OFFS 148
-#define QSFP_VEND_LEN 16
-/* Byte 164 is IB Extended transceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
-#define QSFP_IBXCV_OFFS 164
-/* Bytes 165..167 are Vendor OUI number */
-#define QSFP_VOUI_OFFS 165
-#define QSFP_VOUI_LEN 3
-/* Bytes 168..183 are Vendor Part Number, string */
-#define QSFP_PN_OFFS 168
-#define QSFP_PN_LEN 16
-/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
-#define QSFP_REV_OFFS 184
-#define QSFP_REV_LEN 2
-/*
- * Bytes 186,187 are Wavelength, if Optical. Not Intel req'd
- * If copper, they are attenuation in dB:
- * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
- */
-#define QSFP_ATTEN_OFFS 186
-#define QSFP_ATTEN_LEN 2
-/*
- * Bytes 188,189 are Wavelength tolerance, if optical
- * If copper, they are attenuation in dB:
- * Byte 188 is at 12.5 Gb/s, Byte 189 at 25 Gb/s
- */
-#define QSFP_CU_ATTEN_7G_OFFS 188
-#define QSFP_CU_ATTEN_12G_OFFS 189
-/* Byte 190 is Max Case Temp. Not Intel req'd */
-/* Byte 191 is LSB of sum of bytes 128..190. Not Intel req'd */
-#define QSFP_CC_OFFS 191
-#define QSFP_EQ_INFO_OFFS 193
-#define QSFP_CDR_INFO_OFFS 194
-/* Bytes 196..211 are Serial Number, String */
-#define QSFP_SN_OFFS 196
-#define QSFP_SN_LEN 16
-/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
-#define QSFP_DATE_OFFS 212
-#define QSFP_DATE_LEN 6
-/* Bytes 218,219 are optional lot-code, string */
-#define QSFP_LOT_OFFS 218
-#define QSFP_LOT_LEN 2
-/* Bytes 220, 221 indicate monitoring options, Not Intel req'd */
-/* Byte 222 indicates nominal bitrate in units of 250Mbits/sec */
-#define QSFP_NOM_BIT_RATE_250_OFFS 222
-/* Byte 223 is LSB of sum of bytes 192..222 */
-#define QSFP_CC_EXT_OFFS 223
-
-/*
- * Interrupt flag masks
- */
-#define QSFP_DATA_NOT_READY 0x01
-
-#define QSFP_HIGH_TEMP_ALARM 0x80
-#define QSFP_LOW_TEMP_ALARM 0x40
-#define QSFP_HIGH_TEMP_WARNING 0x20
-#define QSFP_LOW_TEMP_WARNING 0x10
-
-#define QSFP_HIGH_VCC_ALARM 0x80
-#define QSFP_LOW_VCC_ALARM 0x40
-#define QSFP_HIGH_VCC_WARNING 0x20
-#define QSFP_LOW_VCC_WARNING 0x10
-
-#define QSFP_HIGH_POWER_ALARM 0x88
-#define QSFP_LOW_POWER_ALARM 0x44
-#define QSFP_HIGH_POWER_WARNING 0x22
-#define QSFP_LOW_POWER_WARNING 0x11
-
-#define QSFP_HIGH_BIAS_ALARM 0x88
-#define QSFP_LOW_BIAS_ALARM 0x44
-#define QSFP_HIGH_BIAS_WARNING 0x22
-#define QSFP_LOW_BIAS_WARNING 0x11
-
-/*
- * struct qsfp_data encapsulates state of QSFP device for one port.
- * it will be part of port-specific data if a board supports QSFP.
- *
- * Since multiple board-types use QSFP, and their pport_data structs
- * differ (in the chip-specific section), we need a pointer to its head.
- *
- * Avoiding premature optimization, we will have one work_struct per port,
- * and let the qsfp_lock arbitrate access to common resources.
- *
- */
-
-#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
-#define QSFP_HIGH_PWR(pbyte) (((pbyte) & 3) | 4)
-#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
-#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
-
-struct qsfp_data {
- /* Helps to find our way */
- struct hfi1_pportdata *ppd;
- struct work_struct qsfp_work;
- u8 cache[QSFP_MAX_NUM_PAGES * 128];
- /* protect qsfp data */
- spinlock_t qsfp_lock;
- u8 check_interrupt_flags;
- u8 reset_needed;
- u8 limiting_active;
- u8 cache_valid;
- u8 cache_refresh_required;
-};
-
-int refresh_qsfp_cache(struct hfi1_pportdata *ppd,
- struct qsfp_data *cp);
-int qsfp_mod_present(struct hfi1_pportdata *ppd);
-int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr,
- u32 len, u8 *data);
-
-int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
- int offset, void *bp, int len);
-int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
- int offset, void *bp, int len);
-int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len);
-int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len);
-int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len);
-int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
- int len);
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c
deleted file mode 100644
index 0d7e1017f..000000000
--- a/drivers/staging/rdma/hfi1/rc.c
+++ /dev/null
@@ -1,2581 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/io.h>
-#include <rdma/rdma_vt.h>
-#include <rdma/rdmavt_qp.h>
-
-#include "hfi.h"
-#include "qp.h"
-#include "verbs_txreq.h"
-#include "trace.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-/**
- * hfi1_add_retry_timer - add/start a retry timer
- * @qp - the QP
- *
- * add a retry timer on the QP
- */
-static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
-{
- struct ib_qp *ibqp = &qp->ibqp;
- struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
-
- qp->s_flags |= RVT_S_TIMER;
- /* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies + qp->timeout_jiffies +
- rdi->busy_jiffies;
- add_timer(&qp->s_timer);
-}
-
-/**
- * hfi1_add_rnr_timer - add/start an rnr timer
- * @qp - the QP
- * @to - timeout in usecs
- *
- * add an rnr timer on the QP
- */
-void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- qp->s_flags |= RVT_S_WAIT_RNR;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
- add_timer(&priv->s_rnr_timer);
-}
-
-/**
- * hfi1_mod_retry_timer - mod a retry timer
- * @qp - the QP
- *
- * Modify a potentially already running retry
- * timer
- */
-static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
-{
- struct ib_qp *ibqp = &qp->ibqp;
- struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
-
- qp->s_flags |= RVT_S_TIMER;
- /* 4.096 usec. * (1 << qp->timeout) */
- mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
- rdi->busy_jiffies);
-}
-
-/**
- * hfi1_stop_retry_timer - stop a retry timer
- * @qp - the QP
- *
- * stop a retry timer and return if the timer
- * had been pending.
- */
-static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
-{
- int rval = 0;
-
- /* Remove QP from retry */
- if (qp->s_flags & RVT_S_TIMER) {
- qp->s_flags &= ~RVT_S_TIMER;
- rval = del_timer(&qp->s_timer);
- }
- return rval;
-}
-
-/**
- * hfi1_stop_rc_timers - stop all timers
- * @qp - the QP
- *
- * stop any pending timers
- */
-void hfi1_stop_rc_timers(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- /* Remove QP from all timers */
- if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
- qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- del_timer(&priv->s_rnr_timer);
- }
-}
-
-/**
- * hfi1_stop_rnr_timer - stop an rnr timer
- * @qp - the QP
- *
- * stop an rnr timer and return if the timer
- * had been pending.
- */
-static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
-{
- int rval = 0;
- struct hfi1_qp_priv *priv = qp->priv;
-
- /* Remove QP from rnr timer */
- if (qp->s_flags & RVT_S_WAIT_RNR) {
- qp->s_flags &= ~RVT_S_WAIT_RNR;
- rval = del_timer(&priv->s_rnr_timer);
- }
- return rval;
-}
-
-/**
- * hfi1_del_timers_sync - wait for any timeout routines to exit
- * @qp - the QP
- */
-void hfi1_del_timers_sync(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- del_timer_sync(&qp->s_timer);
- del_timer_sync(&priv->s_rnr_timer);
-}
-
-/* only opcode mask for adaptive pio */
-const u32 rc_only_opcode =
- BIT(OP(SEND_ONLY) & 0x1f) |
- BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)) |
- BIT(OP(RDMA_READ_REQUEST & 0x1f)) |
- BIT(OP(ACKNOWLEDGE & 0x1f)) |
- BIT(OP(ATOMIC_ACKNOWLEDGE & 0x1f)) |
- BIT(OP(COMPARE_SWAP & 0x1f)) |
- BIT(OP(FETCH_ADD & 0x1f));
-
-static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
- u32 psn, u32 pmtu)
-{
- u32 len;
-
- len = delta_psn(psn, wqe->psn) * pmtu;
- ss->sge = wqe->sg_list[0];
- ss->sg_list = wqe->sg_list + 1;
- ss->num_sge = wqe->wr.num_sge;
- ss->total_len = wqe->length;
- hfi1_skip_sge(ss, len, 0);
- return wqe->length - len;
-}
-
-/**
- * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @dev: the device for this QP
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @ps: the xmit packet state
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
- struct hfi1_other_headers *ohdr,
- struct hfi1_pkt_state *ps)
-{
- struct rvt_ack_entry *e;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
- int middle = 0;
- u32 pmtu = qp->pmtu;
- struct hfi1_qp_priv *priv = qp->priv;
-
- /* Don't send an ACK if we aren't supposed to. */
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto bail;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
-
- switch (qp->s_ack_state) {
- case OP(RDMA_READ_RESPONSE_LAST):
- case OP(RDMA_READ_RESPONSE_ONLY):
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- /* FALLTHROUGH */
- case OP(ATOMIC_ACKNOWLEDGE):
- /*
- * We can increment the tail pointer now that the last
- * response has been sent instead of only being
- * constructed.
- */
- if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
- qp->s_tail_ack_queue = 0;
- /* FALLTHROUGH */
- case OP(SEND_ONLY):
- case OP(ACKNOWLEDGE):
- /* Check for no next entry in the queue. */
- if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & RVT_S_ACK_PENDING)
- goto normal;
- goto bail;
- }
-
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST)) {
- /*
- * If a RDMA read response is being resent and
- * we haven't seen the duplicate request yet,
- * then stop sending the remaining responses the
- * responder has seen until the requester re-sends it.
- */
- len = e->rdma_sge.sge_length;
- if (len && !e->rdma_sge.mr) {
- qp->s_tail_ack_queue = qp->r_head_ack_queue;
- goto bail;
- }
- /* Copy SGE state in case we need to resend */
- ps->s_txreq->mr = e->rdma_sge.mr;
- if (ps->s_txreq->mr)
- rvt_get_mr(ps->s_txreq->mr);
- qp->s_ack_rdma_sge.sge = e->rdma_sge;
- qp->s_ack_rdma_sge.num_sge = 1;
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- if (len > pmtu) {
- len = pmtu;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
- } else {
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
- e->sent = 1;
- }
- ohdr->u.aeth = hfi1_compute_aeth(qp);
- hwords++;
- qp->s_ack_rdma_psn = e->psn;
- bth2 = mask_psn(qp->s_ack_rdma_psn++);
- } else {
- /* COMPARE_SWAP or FETCH_ADD */
- qp->s_cur_sge = NULL;
- len = 0;
- qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
- ohdr->u.at.aeth = hfi1_compute_aeth(qp);
- ohdr->u.at.atomic_ack_eth[0] =
- cpu_to_be32(e->atomic_data >> 32);
- ohdr->u.at.atomic_ack_eth[1] =
- cpu_to_be32(e->atomic_data);
- hwords += sizeof(ohdr->u.at) / sizeof(u32);
- bth2 = mask_psn(e->psn);
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
- if (ps->s_txreq->mr)
- rvt_get_mr(ps->s_txreq->mr);
- len = qp->s_ack_rdma_sge.sge.sge_length;
- if (len > pmtu) {
- len = pmtu;
- middle = HFI1_CAP_IS_KSET(SDMA_AHG);
- } else {
- ohdr->u.aeth = hfi1_compute_aeth(qp);
- hwords++;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- bth2 = mask_psn(qp->s_ack_rdma_psn++);
- break;
-
- default:
-normal:
- /*
- * Send a regular ACK.
- * Set the s_ack_state so we wait until after sending
- * the ACK before setting s_ack_state to ACKNOWLEDGE
- * (see above).
- */
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~RVT_S_ACK_PENDING;
- qp->s_cur_sge = NULL;
- if (qp->s_nak_state)
- ohdr->u.aeth =
- cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
- (qp->s_nak_state <<
- HFI1_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = hfi1_compute_aeth(qp);
- hwords++;
- len = 0;
- bth0 = OP(ACKNOWLEDGE) << 24;
- bth2 = mask_psn(qp->s_ack_psn);
- }
- qp->s_rdma_ack_cnt++;
- qp->s_hdrwords = hwords;
- ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_size = len;
- hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
- /* pbc */
- ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
- return 1;
-
-bail:
- qp->s_ack_state = OP(ACKNOWLEDGE);
- /*
- * Ensure s_rdma_ack_cnt changes are committed prior to resetting
- * RVT_S_RESP_PENDING
- */
- smp_wmb();
- qp->s_flags &= ~(RVT_S_RESP_PENDING
- | RVT_S_ACK_PENDING
- | RVT_S_AHG_VALID);
- return 0;
-}
-
-/**
- * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- *
- * Assumes s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
- struct hfi1_other_headers *ohdr;
- struct rvt_sge_state *ss;
- struct rvt_swqe *wqe;
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- u32 hwords = 5;
- u32 len;
- u32 bth0 = 0;
- u32 bth2;
- u32 pmtu = qp->pmtu;
- char newreq;
- int middle = 0;
- int delta;
-
- ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
- goto bail_no_tx;
-
- ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
-
- /* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & RVT_S_RESP_PENDING) &&
- make_rc_ack(dev, qp, ohdr, ps))
- return 1;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (iowait_sdma_pending(&priv->s_iowait)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- clear_ahg(qp);
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
- /* will get called again */
- goto done_free_tx;
- }
-
- if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
- goto bail;
-
- if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
- if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= RVT_S_WAIT_PSN;
- goto bail;
- }
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- }
-
- /* Send a request. */
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- switch (qp->s_state) {
- default:
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
- goto bail;
- /*
- * Resend an old request or start a new one.
- *
- * We keep track of the current SWQE so that
- * we don't reset the "furthest progress" state
- * if we need to back up.
- */
- newreq = 0;
- if (qp->s_cur == qp->s_tail) {
- /* Check if send work queue is empty. */
- if (qp->s_tail == qp->s_head) {
- clear_ahg(qp);
- goto bail;
- }
- /*
- * If a fence is requested, wait for previous
- * RDMA read and atomic operations to finish.
- */
- if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
- qp->s_num_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_FENCE;
- goto bail;
- }
- newreq = 1;
- qp->s_psn = wqe->psn;
- }
- /*
- * Note that we have to be careful not to modify the
- * original work request since we may need to resend
- * it.
- */
- len = wqe->length;
- ss = &qp->s_sge;
- bth2 = mask_psn(qp->s_psn);
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- /* If no credit, return. */
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
- cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
- goto bail;
- }
- if (len > pmtu) {
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND) {
- qp->s_state = OP(SEND_ONLY);
- } else {
- qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE_WITH_IMM:
- /* If no credit, return. */
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
- cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
- goto bail;
- }
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / sizeof(u32);
- if (len > pmtu) {
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
- qp->s_state = OP(RDMA_WRITE_ONLY);
- } else {
- qp->s_state =
- OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after RETH */
- ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_READ:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- }
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- qp->s_state = OP(COMPARE_SWAP);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.swap);
- ohdr->u.atomic_eth.compare_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
- } else {
- qp->s_state = OP(FETCH_ADD);
- ohdr->u.atomic_eth.swap_data = cpu_to_be64(
- wqe->atomic_wr.compare_add);
- ohdr->u.atomic_eth.compare_data = 0;
- }
- ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
- wqe->atomic_wr.remote_addr >> 32);
- ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
- wqe->atomic_wr.remote_addr);
- ohdr->u.atomic_eth.rkey = cpu_to_be32(
- wqe->atomic_wr.rkey);
- hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- qp->s_len = wqe->length;
- if (newreq) {
- qp->s_tail++;
- if (qp->s_tail >= qp->s_size)
- qp->s_tail = 0;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_psn = wqe->lpsn + 1;
- else
- qp->s_psn++;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
- * thread to indicate a SEND needs to be restarted from an
- * earlier PSN without interfering with the sending thread.
- * See restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- bth2 = mask_psn(qp->s_psn++);
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- middle = HFI1_CAP_IS_KSET(SDMA_AHG);
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND) {
- qp->s_state = OP(SEND_LAST);
- } else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_LAST is used by the ACK processing
- * thread to indicate a RDMA write needs to be restarted from
- * an earlier PSN without interfering with the sending thread.
- * See restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_MIDDLE):
- bth2 = mask_psn(qp->s_psn++);
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- middle = HFI1_CAP_IS_KSET(SDMA_AHG);
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
- qp->s_state = OP(RDMA_WRITE_LAST);
- } else {
- qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
- * thread to indicate a RDMA read needs to be restarted from
- * an earlier PSN without interfering with the sending thread.
- * See restart_rc().
- */
- len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr + len);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
- qp->s_psn = wqe->lpsn + 1;
- ss = NULL;
- len = 0;
- qp->s_cur++;
- if (qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_sending_hpsn = bth2;
- delta = delta_psn(bth2, wqe->psn);
- if (delta && delta % HFI1_PSN_CREDIT == 0)
- bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & RVT_S_SEND_ONE) {
- qp->s_flags &= ~RVT_S_SEND_ONE;
- qp->s_flags |= RVT_S_WAIT_ACK;
- bth2 |= IB_BTH_REQ_ACK;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_sge = ss;
- qp->s_cur_size = len;
- hfi1_make_ruc_header(
- qp,
- ohdr,
- bth0 | (qp->s_state << 24),
- bth2,
- middle,
- ps);
- /* pbc */
- ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
- return 1;
-
-done_free_tx:
- hfi1_put_txreq(ps->s_txreq);
- ps->s_txreq = NULL;
- return 1;
-
-bail:
- hfi1_put_txreq(ps->s_txreq);
-
-bail_no_tx:
- ps->s_txreq = NULL;
- qp->s_flags &= ~RVT_S_BUSY;
- qp->s_hdrwords = 0;
- return 0;
-}
-
-/**
- * hfi1_send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
- int is_fecn)
-{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u64 pbc, pbc_flags = 0;
- u16 lrh0;
- u16 sc5;
- u32 bth0;
- u32 hwords;
- u32 vl, plen;
- struct send_context *sc;
- struct pio_buf *pbuf;
- struct hfi1_ib_header hdr;
- struct hfi1_other_headers *ohdr;
- unsigned long flags;
-
- /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if (qp->s_flags & RVT_S_RESP_PENDING)
- goto queue_ack;
-
- /* Ensure s_rdma_ack_cnt changes are committed */
- smp_read_barrier_depends();
- if (qp->s_rdma_ack_cnt)
- goto queue_ack;
-
- /* Construct the header */
- /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
- hwords = 6;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
- &qp->remote_ah_attr.grh, hwords, 0);
- ohdr = &hdr.u.l.oth;
- lrh0 = HFI1_LRH_GRH;
- } else {
- ohdr = &hdr.u.oth;
- lrh0 = HFI1_LRH_BTH;
- }
- /* read pkey_index w/o lock (its atomic) */
- bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- if (qp->r_nak_state)
- ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
- (qp->r_nak_state <<
- HFI1_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = hfi1_compute_aeth(qp);
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
- pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
- lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
- ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
-
- /* Don't try to send ACKs if the link isn't ACTIVE */
- if (driver_lstate(ppd) != IB_PORT_ACTIVE)
- return;
-
- sc = rcd->sc;
- plen = 2 /* PBC */ + hwords;
- vl = sc_to_vlt(ppd->dd, sc5);
- pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
-
- pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
- if (!pbuf) {
- /*
- * We have no room to send at the moment. Pass
- * responsibility for sending the ACK to the send tasklet
- * so that when enough buffer space becomes available,
- * the ACK is sent ahead of other outgoing packets.
- */
- goto queue_ack;
- }
-
- trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
-
- /* write the pbc and data */
- ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
-
- return;
-
-queue_ack:
- this_cpu_inc(*ibp->rvp.rc_qacks);
- spin_lock_irqsave(&qp->s_lock, flags);
- qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
- qp->s_nak_state = qp->r_nak_state;
- qp->s_ack_psn = qp->r_ack_psn;
- if (is_fecn)
- qp->s_flags |= RVT_S_ECN;
-
- /* Schedule the send tasklet. */
- hfi1_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from hfi1_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct rvt_qp *qp, u32 psn)
-{
- u32 n = qp->s_acked;
- struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
- u32 opcode;
-
- qp->s_cur = n;
-
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (cmp_psn(psn, wqe->psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
-
- /* Find the work request opcode corresponding to the given PSN. */
- opcode = wqe->wr.opcode;
- for (;;) {
- int diff;
-
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- wqe = rvt_get_swqe_ptr(qp, n);
- diff = cmp_psn(psn, wqe->psn);
- if (diff < 0)
- break;
- qp->s_cur = n;
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (diff == 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
- opcode = wqe->wr.opcode;
- }
-
- /*
- * Set the state to restart in the middle of a request.
- * Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See hfi1_make_rc_req().
- */
- switch (opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
- break;
-
- case IB_WR_RDMA_READ:
- qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- break;
-
- default:
- /*
- * This case shouldn't happen since its only
- * one PSN per req.
- */
- qp->s_state = OP(SEND_LAST);
- }
-done:
- qp->s_psn = psn;
- /*
- * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
- * asynchronously before the send tasklet can get scheduled.
- * Doing it in hfi1_make_rc_req() is too late.
- */
- if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
- (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= RVT_S_WAIT_PSN;
- qp->s_flags &= ~RVT_S_AHG_VALID;
-}
-
-/*
- * Back up requester to resend the last un-ACKed request.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- */
-static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
-{
- struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- struct hfi1_ibport *ibp;
-
- if (qp->s_retry == 0) {
- if (qp->s_mig_state == IB_MIG_ARMED) {
- hfi1_migrate_qp(qp);
- qp->s_retry = qp->s_retry_cnt;
- } else if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- return;
- } else { /* need to handle delayed completion */
- return;
- }
- } else {
- qp->s_retry--;
- }
-
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- ibp->rvp.n_rc_resends++;
- else
- ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
-
- qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
- RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
- RVT_S_WAIT_ACK);
- if (wait)
- qp->s_flags |= RVT_S_SEND_ONE;
- reset_psn(qp, psn);
-}
-
-/*
- * This is called from s_timer for missing responses.
- */
-void hfi1_rc_timeout(unsigned long arg)
-{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
- struct hfi1_ibport *ibp;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->r_lock, flags);
- spin_lock(&qp->s_lock);
- if (qp->s_flags & RVT_S_TIMER) {
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ibp->rvp.n_rc_timeouts++;
- qp->s_flags &= ~RVT_S_TIMER;
- del_timer(&qp->s_timer);
- trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
- restart_rc(qp, qp->s_last_psn + 1, 1);
- hfi1_schedule_send(qp);
- }
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_lock, flags);
-}
-
-/*
- * This is called from s_timer for RNR timeouts.
- */
-void hfi1_rc_rnr_retry(unsigned long arg)
-{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_stop_rnr_timer(qp);
- hfi1_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
-
-/*
- * Set qp->s_sending_psn to the next PSN after the given one.
- * This would be psn+1 except when RDMA reads are present.
- */
-static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
-{
- struct rvt_swqe *wqe;
- u32 n = qp->s_last;
-
- /* Find the work request corresponding to the given PSN. */
- for (;;) {
- wqe = rvt_get_swqe_ptr(qp, n);
- if (cmp_psn(psn, wqe->lpsn) <= 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_sending_psn = wqe->lpsn + 1;
- else
- qp->s_sending_psn = psn + 1;
- break;
- }
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- }
-}
-
-/*
- * This should be called with the QP s_lock held and interrupts disabled.
- */
-void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
-{
- struct hfi1_other_headers *ohdr;
- struct rvt_swqe *wqe;
- struct ib_wc wc;
- unsigned i;
- u32 opcode;
- u32 psn;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- /* Find out where the BTH is */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
-
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- WARN_ON(!qp->s_rdma_ack_cnt);
- qp->s_rdma_ack_cnt--;
- return;
- }
-
- psn = be32_to_cpu(ohdr->bth[2]);
- reset_sending_psn(qp, psn);
-
- /*
- * Start timer after a packet requesting an ACK has been sent and
- * there are still requests that haven't been acked.
- */
- if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags &
- (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
- (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- hfi1_add_retry_timer(qp);
-
- while (qp->s_last != qp->s_acked) {
- u32 s_last;
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
- cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
- break;
- s_last = qp->s_last;
- if (++s_last >= qp->s_size)
- s_last = 0;
- qp->s_last = s_last;
- /* see post_send() */
- barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
- }
- /*
- * If we were waiting for sends to complete before re-sending,
- * and they are now complete, restart sending.
- */
- trace_hfi1_rc_sendcomplete(qp, psn);
- if (qp->s_flags & RVT_S_WAIT_PSN &&
- cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~RVT_S_WAIT_PSN;
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- hfi1_schedule_send(qp);
- }
-}
-
-static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
-{
- qp->s_last_psn = psn;
-}
-
-/*
- * Generate a SWQE completion.
- * This is similar to hfi1_send_complete but has to check to be sure
- * that the SGEs are not being referenced if the SWQE is being resent.
- */
-static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
- struct rvt_swqe *wqe,
- struct hfi1_ibport *ibp)
-{
- struct ib_wc wc;
- unsigned i;
-
- /*
- * Don't decrement refcount and don't generate a
- * completion if the SWQE is being resent until the send
- * is finished.
- */
- if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
- cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- u32 s_last;
-
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
- s_last = qp->s_last;
- if (++s_last >= qp->s_size)
- s_last = 0;
- qp->s_last = s_last;
- /* see post_send() */
- barrier();
- /* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
- }
- } else {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- this_cpu_inc(*ibp->rvp.rc_delayed_comp);
- /*
- * If send progress not running attempt to progress
- * SDMA queue.
- */
- if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
- struct sdma_engine *engine;
- u8 sc5;
-
- /* For now use sc to find engine */
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- engine = qp_to_sdma_engine(qp, sc5);
- sdma_engine_progress_schedule(engine);
- }
- }
-
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, wqe->lpsn);
-
- /*
- * If we are completing a request which is in the process of
- * being resent, we can stop re-sending it since we know the
- * responder has already seen it.
- */
- if (qp->s_acked == qp->s_cur) {
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- qp->s_acked = qp->s_cur;
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- if (qp->s_acked != qp->s_tail) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = wqe->psn;
- }
- } else {
- if (++qp->s_acked >= qp->s_size)
- qp->s_acked = 0;
- if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
- qp->s_draining = 0;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- }
- return wqe;
-}
-
-/**
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * May be called at interrupt level, with the QP s_lock held.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
- u64 val, struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_ibport *ibp;
- enum ib_wc_status status;
- struct rvt_swqe *wqe;
- int ret = 0;
- u32 ack_psn;
- int diff;
- unsigned long to;
-
- /*
- * Note that NAKs implicitly ACK outstanding SEND and RDMA write
- * requests and implicitly NAK RDMA read and atomic requests issued
- * before the NAK'ed request. The MSN won't include the NAK'ed
- * request but will include an ACK'ed request(s).
- */
- ack_psn = psn;
- if (aeth >> 29)
- ack_psn--;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- ibp = to_iport(qp->ibqp.device, qp->port_num);
-
- /*
- * The MSN might be for a later WQE than the PSN indicates so
- * only complete WQEs that the PSN finishes.
- */
- while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
- /*
- * RDMA_READ_RESPONSE_ONLY is a special case since
- * we want to generate completion events for everything
- * before the RDMA read, copy the data, then generate
- * the completion for the read.
- */
- if (wqe->wr.opcode == IB_WR_RDMA_READ &&
- opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
- diff == 0) {
- ret = 1;
- goto bail_stop;
- }
- /*
- * If this request is a RDMA read or atomic, and the ACK is
- * for a later operation, this ACK NAKs the RDMA read or
- * atomic. In other words, only a RDMA_READ_LAST or ONLY
- * can ACK a RDMA read and likewise for atomic ops. Note
- * that the NAK case can only happen if relaxed ordering is
- * used and requests are sent after an RDMA read or atomic
- * is sent but before the response is received.
- */
- if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
- (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
- ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
- (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
- /* Retry this request. */
- if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
- qp->r_flags |= RVT_R_RDMAR_SEQ;
- restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_SEND;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait,
- &rcd->qp_wait_list);
- }
- }
- /*
- * No need to process the ACK/NAK since we are
- * restarting an earlier request.
- */
- goto bail_stop;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
- u64 *vaddr = wqe->sg_list[0].vaddr;
- *vaddr = val;
- }
- if (qp->s_num_rd_atomic &&
- (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
- qp->s_num_rd_atomic--;
- /* Restart sending task if fence is complete */
- if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
- !qp->s_num_rd_atomic) {
- qp->s_flags &= ~(RVT_S_WAIT_FENCE |
- RVT_S_WAIT_ACK);
- hfi1_schedule_send(qp);
- } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
- qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
- RVT_S_WAIT_ACK);
- hfi1_schedule_send(qp);
- }
- }
- wqe = do_rc_completion(qp, wqe, ibp);
- if (qp->s_acked == qp->s_tail)
- break;
- }
-
- switch (aeth >> 29) {
- case 0: /* ACK */
- this_cpu_inc(*ibp->rvp.rc_acks);
- if (qp->s_acked != qp->s_tail) {
- /*
- * We are expecting more ACKs so
- * mod the retry timer.
- */
- hfi1_mod_retry_timer(qp);
- /*
- * We can stop re-sending the earlier packets and
- * continue with the next packet the receiver wants.
- */
- if (cmp_psn(qp->s_psn, psn) <= 0)
- reset_psn(qp, psn + 1);
- } else {
- /* No more acks - kill all timers */
- hfi1_stop_rc_timers(qp);
- if (cmp_psn(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
- }
- }
- if (qp->s_flags & RVT_S_WAIT_ACK) {
- qp->s_flags &= ~RVT_S_WAIT_ACK;
- hfi1_schedule_send(qp);
- }
- hfi1_get_credit(qp, aeth);
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, psn);
- return 1;
-
- case 1: /* RNR NAK */
- ibp->rvp.n_rnr_naks++;
- if (qp->s_acked == qp->s_tail)
- goto bail_stop;
- if (qp->s_flags & RVT_S_WAIT_RNR)
- goto bail_stop;
- if (qp->s_rnr_retry == 0) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
- }
- if (qp->s_rnr_retry_cnt < 7)
- qp->s_rnr_retry--;
-
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
-
- ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
-
- reset_psn(qp, psn);
-
- qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
- hfi1_stop_rc_timers(qp);
- to =
- ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
- HFI1_AETH_CREDIT_MASK];
- hfi1_add_rnr_timer(qp, to);
- return 0;
-
- case 3: /* NAK */
- if (qp->s_acked == qp->s_tail)
- goto bail_stop;
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
- switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
- HFI1_AETH_CREDIT_MASK) {
- case 0: /* PSN sequence error */
- ibp->rvp.n_seq_naks++;
- /*
- * Back up to the responder's expected PSN.
- * Note that we might get a NAK in the middle of an
- * RDMA READ response which terminates the RDMA
- * READ.
- */
- restart_rc(qp, psn, 0);
- hfi1_schedule_send(qp);
- break;
-
- case 1: /* Invalid Request */
- status = IB_WC_REM_INV_REQ_ERR;
- ibp->rvp.n_other_naks++;
- goto class_b;
-
- case 2: /* Remote Access Error */
- status = IB_WC_REM_ACCESS_ERR;
- ibp->rvp.n_other_naks++;
- goto class_b;
-
- case 3: /* Remote Operation Error */
- status = IB_WC_REM_OP_ERR;
- ibp->rvp.n_other_naks++;
-class_b:
- if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
- break;
-
- default:
- /* Ignore other reserved NAK error codes */
- goto reserved;
- }
- qp->s_retry = qp->s_retry_cnt;
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- goto bail_stop;
-
- default: /* 2: reserved */
-reserved:
- /* Ignore reserved NAK codes. */
- goto bail_stop;
- }
- return ret;
-bail_stop:
- hfi1_stop_rc_timers(qp);
- return ret;
-}
-
-/*
- * We have seen an out of sequence RDMA read middle or last packet.
- * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
- */
-static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
- struct hfi1_ctxtdata *rcd)
-{
- struct rvt_swqe *wqe;
-
- /* Remove QP from retry timer */
- hfi1_stop_rc_timers(qp);
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
-
- while (cmp_psn(psn, wqe->lpsn) > 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- break;
- wqe = do_rc_completion(qp, wqe, ibp);
- }
-
- ibp->rvp.n_rdma_seq++;
- qp->r_flags |= RVT_R_RDMAR_SEQ;
- restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_SEND;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
-}
-
-/**
- * rc_rcv_resp - process an incoming RC response packet
- * @ibp: the port this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- *
- * This is called from hfi1_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static void rc_rcv_resp(struct hfi1_ibport *ibp,
- struct hfi1_other_headers *ohdr,
- void *data, u32 tlen, struct rvt_qp *qp,
- u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
- struct hfi1_ctxtdata *rcd)
-{
- struct rvt_swqe *wqe;
- enum ib_wc_status status;
- unsigned long flags;
- int diff;
- u32 pad;
- u32 aeth;
- u64 val;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- trace_hfi1_rc_ack(qp, psn);
-
- /* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
- if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
- goto ack_done;
-
- /* Ignore duplicate responses. */
- diff = cmp_psn(psn, qp->s_last_psn);
- if (unlikely(diff <= 0)) {
- /* Update credits for "ghost" ACKs */
- if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
- aeth = be32_to_cpu(ohdr->u.aeth);
- if ((aeth >> 29) == 0)
- hfi1_get_credit(qp, aeth);
- }
- goto ack_done;
- }
-
- /*
- * Skip everything other than the PSN we expect, if we are waiting
- * for a reply to a restarted RDMA read or atomic op.
- */
- if (qp->r_flags & RVT_R_RDMAR_SEQ) {
- if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
- goto ack_done;
- qp->r_flags &= ~RVT_R_RDMAR_SEQ;
- }
-
- if (unlikely(qp->s_acked == qp->s_tail))
- goto ack_done;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- status = IB_WC_SUCCESS;
-
- switch (opcode) {
- case OP(ACKNOWLEDGE):
- case OP(ATOMIC_ACKNOWLEDGE):
- case OP(RDMA_READ_RESPONSE_FIRST):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
- __be32 *p = ohdr->u.at.atomic_ack_eth;
-
- val = ((u64)be32_to_cpu(p[0]) << 32) |
- be32_to_cpu(p[1]);
- } else {
- val = 0;
- }
- if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
- opcode != OP(RDMA_READ_RESPONSE_FIRST))
- goto ack_done;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_middle;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /* no AETH, no ACK */
- if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
-read_middle:
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto ack_len_err;
- if (unlikely(pmtu >= qp->s_rdma_read_len))
- goto ack_len_err;
-
- /*
- * We got a response so update the timeout.
- * 4.096 usec. * (1 << qp->timeout)
- */
- qp->s_flags |= RVT_S_TIMER;
- mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
- if (qp->s_flags & RVT_S_WAIT_ACK) {
- qp->s_flags &= ~RVT_S_WAIT_ACK;
- hfi1_schedule_send(qp);
- }
-
- if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
- qp->s_retry = qp->s_retry_cnt;
-
- /*
- * Update the RDMA receive state but do the copy w/o
- * holding the locks and blocking interrupts.
- */
- qp->s_rdma_read_len -= pmtu;
- update_last_psn(qp, psn);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0);
- goto bail;
-
- case OP(RDMA_READ_RESPONSE_ONLY):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
- goto ack_done;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 0 && <= pmtu.
- * Remember to account for ICRC (4).
- */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto ack_len_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_last;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /* ACKs READ req. */
- if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 1 && <= pmtu.
- * Remember to account for ICRC (4).
- */
- if (unlikely(tlen <= (hdrsize + pad + 4)))
- goto ack_len_err;
-read_last:
- tlen -= hdrsize + pad + 4;
- if (unlikely(tlen != qp->s_rdma_read_len))
- goto ack_len_err;
- aeth = be32_to_cpu(ohdr->u.aeth);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
- WARN_ON(qp->s_rdma_read_sge.num_sge);
- (void)do_rc_ack(qp, aeth, psn,
- OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
- goto ack_done;
- }
-
-ack_op_err:
- status = IB_WC_LOC_QP_OP_ERR;
- goto ack_err;
-
-ack_seq_err:
- rdma_seq_err(qp, ibp, psn, rcd);
- goto ack_done;
-
-ack_len_err:
- status = IB_WC_LOC_LEN_ERR;
-ack_err:
- if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
-ack_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
- return;
-}
-
-static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
- struct rvt_qp *qp)
-{
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- atomic_inc(&qp->refcount);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
-}
-
-static inline void rc_cancel_ack(struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- priv->r_adefered = 0;
- if (list_empty(&qp->rspwait))
- return;
- list_del_init(&qp->rspwait);
- qp->r_flags &= ~RVT_R_RSP_NAK;
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-}
-
-/**
- * rc_rcv_error - process an incoming duplicate or error RC packet
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- *
- * This is called from hfi1_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
- struct rvt_qp *qp, u32 opcode, u32 psn,
- int diff, struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct rvt_ack_entry *e;
- unsigned long flags;
- u8 i, prev;
- int old_req;
-
- trace_hfi1_rc_rcv_error(qp, psn);
- if (diff > 0) {
- /*
- * Packet sequence error.
- * A NAK will ACK earlier sends and RDMA writes.
- * Don't queue the NAK if we already sent one.
- */
- if (!qp->r_nak_state) {
- ibp->rvp.n_rc_seqnak++;
- qp->r_nak_state = IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence NAK until all packets
- * in the receive queue have been processed.
- * Otherwise, we end up propagating congestion.
- */
- rc_defered_ack(rcd, qp);
- }
- goto done;
- }
-
- /*
- * Handle a duplicate request. Don't re-execute SEND, RDMA
- * write or atomic op. Don't NAK errors, just silently drop
- * the duplicate request. Note that r_sge, r_len, and
- * r_rcv_len may be in use so don't modify them.
- *
- * We are supposed to ACK the earliest duplicate PSN but we
- * can coalesce an outstanding duplicate ACK. We have to
- * send the earliest so that RDMA reads can be restarted at
- * the requester's expected PSN.
- *
- * First, find where this duplicate PSN falls within the
- * ACKs previously sent.
- * old_req is true if there is an older response that is scheduled
- * to be sent before sending this one.
- */
- e = NULL;
- old_req = 1;
- ibp->rvp.n_rc_dupreq++;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- for (i = qp->r_head_ack_queue; ; i = prev) {
- if (i == qp->s_tail_ack_queue)
- old_req = 0;
- if (i)
- prev = i - 1;
- else
- prev = HFI1_MAX_RDMA_ATOMIC;
- if (prev == qp->r_head_ack_queue) {
- e = NULL;
- break;
- }
- e = &qp->s_ack_queue[prev];
- if (!e->opcode) {
- e = NULL;
- break;
- }
- if (cmp_psn(psn, e->psn) >= 0) {
- if (prev == qp->s_tail_ack_queue &&
- cmp_psn(psn, e->lpsn) <= 0)
- old_req = 0;
- break;
- }
- }
- switch (opcode) {
- case OP(RDMA_READ_REQUEST): {
- struct ib_reth *reth;
- u32 offset;
- u32 len;
-
- /*
- * If we didn't find the RDMA read request in the ack queue,
- * we can ignore this request.
- */
- if (!e || e->opcode != OP(RDMA_READ_REQUEST))
- goto unlock_done;
- /* RETH comes after BTH */
- reth = &ohdr->u.rc.reth;
- /*
- * Address range must be a subset of the original
- * request and start on pmtu boundaries.
- * We reuse the old ack_queue slot since the requester
- * should not back up and request an earlier PSN for the
- * same request.
- */
- offset = delta_psn(psn, e->psn) * qp->pmtu;
- len = be32_to_cpu(reth->length);
- if (unlikely(offset + len != e->rdma_sge.sge_length))
- goto unlock_done;
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- if (len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
- IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto unlock_done;
- } else {
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->psn = psn;
- if (old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- /*
- * If we didn't find the atomic request in the ack queue
- * or the send tasklet is already backed up to send an
- * earlier entry, we can ignore this request.
- */
- if (!e || e->opcode != (u8)opcode || old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- default:
- /*
- * Ignore this operation if it doesn't request an ACK
- * or an earlier RDMA read or atomic is going to be resent.
- */
- if (!(psn & IB_BTH_REQ_ACK) || old_req)
- goto unlock_done;
- /*
- * Resend the most recent ACK if this request is
- * after all the previous RDMA reads and atomics.
- */
- if (i == qp->r_head_ack_queue) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->r_psn - 1;
- goto send_ack;
- }
-
- /*
- * Resend the RDMA read or atomic op which
- * ACKs this duplicate request.
- */
- qp->s_tail_ack_queue = i;
- break;
- }
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= RVT_S_RESP_PENDING;
- qp->r_nak_state = 0;
- hfi1_schedule_send(qp);
-
-unlock_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return 1;
-
-send_ack:
- return 0;
-}
-
-void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
-{
- unsigned long flags;
- int lastwqe;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- lastwqe = rvt_error_qp(qp, err);
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
-}
-
-static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
-{
- unsigned next;
-
- next = n + 1;
- if (next > HFI1_MAX_RDMA_ATOMIC)
- next = 0;
- qp->s_tail_ack_queue = next;
- qp->s_ack_state = OP(ACKNOWLEDGE);
-}
-
-static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
- u32 lqpn, u32 rqpn, u8 svc_type)
-{
- struct opa_hfi1_cong_log_event_internal *cc_event;
- unsigned long flags;
-
- if (sl >= OPA_MAX_SLS)
- return;
-
- spin_lock_irqsave(&ppd->cc_log_lock, flags);
-
- ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
- ppd->threshold_event_counter++;
-
- cc_event = &ppd->cc_events[ppd->cc_log_idx++];
- if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
- ppd->cc_log_idx = 0;
- cc_event->lqpn = lqpn & RVT_QPN_MASK;
- cc_event->rqpn = rqpn & RVT_QPN_MASK;
- cc_event->sl = sl;
- cc_event->svc_type = svc_type;
- cc_event->rlid = rlid;
- /* keep timestamp in units of 1.024 usec */
- cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
-
- spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
-}
-
-void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
- u32 rqpn, u8 svc_type)
-{
- struct cca_timer *cca_timer;
- u16 ccti, ccti_incr, ccti_timer, ccti_limit;
- u8 trigger_threshold;
- struct cc_state *cc_state;
- unsigned long flags;
-
- if (sl >= OPA_MAX_SLS)
- return;
-
- cca_timer = &ppd->cca_timer[sl];
-
- cc_state = get_cc_state(ppd);
-
- if (!cc_state)
- return;
-
- /*
- * 1) increase CCTI (for this SL)
- * 2) select IPG (i.e., call set_link_ipg())
- * 3) start timer
- */
- ccti_limit = cc_state->cct.ccti_limit;
- ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
- ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
- trigger_threshold =
- cc_state->cong_setting.entries[sl].trigger_threshold;
-
- spin_lock_irqsave(&ppd->cca_timer_lock, flags);
-
- if (cca_timer->ccti < ccti_limit) {
- if (cca_timer->ccti + ccti_incr <= ccti_limit)
- cca_timer->ccti += ccti_incr;
- else
- cca_timer->ccti = ccti_limit;
- set_link_ipg(ppd);
- }
-
- spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
-
- ccti = cca_timer->ccti;
-
- if (!hrtimer_active(&cca_timer->hrtimer)) {
- /* ccti_timer is in units of 1.024 usec */
- unsigned long nsec = 1024 * ccti_timer;
-
- hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
- HRTIMER_MODE_REL);
- }
-
- if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
- log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
-}
-
-/**
- * hfi1_rc_rcv - process an incoming RC packet
- * @rcd: the context pointer
- * @hdr: the header of this packet
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from qp_rcv() to process an incoming RC packet
- * for the given QP.
- * May be called at interrupt level.
- */
-void hfi1_rc_rcv(struct hfi1_packet *packet)
-{
- struct hfi1_ctxtdata *rcd = packet->rcd;
- struct hfi1_ib_header *hdr = packet->hdr;
- u32 rcv_flags = packet->rcv_flags;
- void *data = packet->ebuf;
- u32 tlen = packet->tlen;
- struct rvt_qp *qp = packet->qp;
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_other_headers *ohdr = packet->ohdr;
- u32 bth0, opcode;
- u32 hdrsize = packet->hlen;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = qp->pmtu;
- int diff;
- struct ib_reth *reth;
- unsigned long flags;
- u32 bth1;
- int ret, is_fecn = 0;
- int copy_last = 0;
-
- bth0 = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
- return;
-
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
- if (bth1 & HFI1_BECN_SMASK) {
- u16 rlid = qp->remote_ah_attr.dlid;
- u32 lqpn, rqpn;
-
- lqpn = qp->ibqp.qp_num;
- rqpn = qp->remote_qpn;
- process_becn(
- ppd,
- qp->remote_ah_attr.sl,
- rlid, lqpn, rqpn,
- IB_CC_SVCTYPE_RC);
- }
- is_fecn = bth1 & HFI1_FECN_SMASK;
- }
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode = (bth0 >> 24) & 0xff;
-
- /*
- * Process responses (ACKs) before anything else. Note that the
- * packet sequence number will be for something in the send work
- * queue rather than the expected receive packet sequence number.
- * In other words, this QP is the requester.
- */
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
- hdrsize, pmtu, rcd);
- if (is_fecn)
- goto send_ack;
- return;
- }
-
- /* Compute 24 bits worth of difference. */
- diff = delta_psn(psn, qp->r_psn);
- if (unlikely(diff)) {
- if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
- return;
- goto send_ack;
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- default:
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- goto nack_inv;
- /*
- * Note that it is up to the requester to not send a new
- * RDMA read or atomic operation before receiving an ACK
- * for the previous operation.
- */
- break;
- }
-
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
- qp_comm_est(qp);
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- ret = hfi1_rvt_get_rwqe(qp, 0);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- case OP(RDMA_WRITE_MIDDLE):
-send_middle:
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto nack_inv;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- /* consume RWQE */
- ret = hfi1_rvt_get_rwqe(qp, 1);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- goto send_last_imm;
-
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = hfi1_rvt_get_rwqe(qp, 0);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto no_immediate_data;
- /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
- case OP(SEND_LAST_WITH_IMMEDIATE):
-send_last_imm:
- wc.ex.imm_data = ohdr->u.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
- case OP(RDMA_WRITE_LAST):
- copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
- /* fall through */
- case OP(SEND_LAST):
-no_immediate_data:
- wc.wc_flags = 0;
- wc.ex.imm_data = 0;
-send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (bth0 >> 20) & 3;
- /* Check for invalid length. */
- /* LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto nack_inv;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
- rvt_put_ss(&qp->r_sge);
- qp->r_msn++;
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- break;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- /*
- * It seems that IB mandates the presence of an SL in a
- * work completion only for the UD transport (see section
- * 11.4.2 of IBTA Vol. 1).
- *
- * However, the way the SL is chosen below is consistent
- * with the way that IB/qib works and is trying avoid
- * introducing incompatibilities.
- *
- * See also OPA Vol. 1, section 9.7.6, and table 9-17.
- */
- wc.sl = qp->remote_ah_attr.sl;
- /* zero fields that are N/A */
- wc.vendor_err = 0;
- wc.pkey_index = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- (bth0 & IB_BTH_SOLICITED) != 0);
- break;
-
- case OP(RDMA_WRITE_ONLY):
- copy_last = 1;
- /* fall through */
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto nack_inv;
- /* consume RWQE */
- reth = &ohdr->u.rc.reth;
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- qp->r_sge.sg_list = NULL;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
- rkey, IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto nack_acc;
- qp->r_sge.num_sge = 1;
- } else {
- qp->r_sge.num_sge = 0;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_FIRST))
- goto send_middle;
- else if (opcode == OP(RDMA_WRITE_ONLY))
- goto no_immediate_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- wc.ex.imm_data = ohdr->u.rc.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
-
- case OP(RDMA_READ_REQUEST): {
- struct rvt_ack_entry *e;
- u32 len;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
- if (next > HFI1_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- reth = &ohdr->u.rc.reth;
- len = be32_to_cpu(reth->length);
- if (len) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
- rkey, IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto nack_acc_unlck;
- /*
- * Update the next expected PSN. We add 1 later
- * below, so only add the remainder here.
- */
- if (len > pmtu)
- qp->r_psn += (len - 1) / pmtu;
- } else {
- e->rdma_sge.mr = NULL;
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = qp->r_psn;
- /*
- * We need to increment the MSN here instead of when we
- * finish sending the result since a duplicate request would
- * increment it more than once.
- */
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= RVT_S_RESP_PENDING;
- hfi1_schedule_send(qp);
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- goto send_ack;
- return;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- struct ib_atomic_eth *ateth;
- struct rvt_ack_entry *e;
- u64 vaddr;
- atomic64_t *maddr;
- u64 sdata;
- u32 rkey;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- if (next > HFI1_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- ateth = &ohdr->u.atomic_eth;
- vaddr = ((u64)be32_to_cpu(ateth->vaddr[0]) << 32) |
- be32_to_cpu(ateth->vaddr[1]);
- if (unlikely(vaddr & (sizeof(u64) - 1)))
- goto nack_inv_unlck;
- rkey = be32_to_cpu(ateth->rkey);
- /* Check rkey & NAK */
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- vaddr, rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc_unlck;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
- sdata = be64_to_cpu(ateth->swap_data);
- e->atomic_data = (opcode == OP(FETCH_ADD)) ?
- (u64)atomic64_add_return(sdata, maddr) - sdata :
- (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
- be64_to_cpu(ateth->compare_data),
- sdata);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = psn;
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= RVT_S_RESP_PENDING;
- hfi1_schedule_send(qp);
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
- if (is_fecn)
- goto send_ack;
- return;
- }
-
- default:
- /* NAK unknown opcodes. */
- goto nack_inv;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_ack_psn = psn;
- qp->r_nak_state = 0;
- /* Send an ACK if requested or required. */
- if (psn & IB_BTH_REQ_ACK) {
- struct hfi1_qp_priv *priv = qp->priv;
-
- if (packet->numpkt == 0) {
- rc_cancel_ack(qp);
- goto send_ack;
- }
- if (priv->r_adefered >= HFI1_PSN_CREDIT) {
- rc_cancel_ack(qp);
- goto send_ack;
- }
- if (unlikely(is_fecn)) {
- rc_cancel_ack(qp);
- goto send_ack;
- }
- priv->r_adefered++;
- rc_defered_ack(rcd, qp);
- }
- return;
-
-rnr_nak:
- qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
- qp->r_ack_psn = qp->r_psn;
- /* Queue RNR NAK for later */
- rc_defered_ack(rcd, qp);
- return;
-
-nack_op_err:
- hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- rc_defered_ack(rcd, qp);
- return;
-
-nack_inv_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
- hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_INVALID_REQUEST;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- rc_defered_ack(rcd, qp);
- return;
-
-nack_acc_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
- hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
- qp->r_ack_psn = qp->r_psn;
-send_ack:
- hfi1_send_rc_ack(rcd, qp, is_fecn);
-}
-
-void hfi1_rc_hdrerr(
- struct hfi1_ctxtdata *rcd,
- struct hfi1_ib_header *hdr,
- u32 rcv_flags,
- struct rvt_qp *qp)
-{
- int has_grh = rcv_flags & HFI1_HAS_GRH;
- struct hfi1_other_headers *ohdr;
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- int diff;
- u32 opcode;
- u32 psn, bth0;
-
- /* Check for GRH */
- ohdr = &hdr->u.oth;
- if (has_grh)
- ohdr = &hdr->u.l.oth;
-
- bth0 = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
- return;
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode = (bth0 >> 24) & 0xff;
-
- /* Only deal with RDMA Writes for now */
- if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
- diff = delta_psn(psn, qp->r_psn);
- if (!qp->r_nak_state && diff >= 0) {
- ibp->rvp.n_rc_seqnak++;
- qp->r_nak_state = IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence
- * NAK until all packets
- * in the receive queue have
- * been processed.
- * Otherwise, we end up
- * propagating congestion.
- */
- rc_defered_ack(rcd, qp);
- } /* Out of sequence NAK */
- } /* QP Request NAKs */
-}
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c
deleted file mode 100644
index 08813cdbd..000000000
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ /dev/null
@@ -1,977 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/spinlock.h>
-
-#include "hfi.h"
-#include "mad.h"
-#include "qp.h"
-#include "verbs_txreq.h"
-#include "trace.h"
-
-/*
- * Convert the AETH RNR timeout code into the number of microseconds.
- */
-const u32 ib_hfi1_rnr_table[32] = {
- 655360, /* 00: 655.36 */
- 10, /* 01: .01 */
- 20, /* 02 .02 */
- 30, /* 03: .03 */
- 40, /* 04: .04 */
- 60, /* 05: .06 */
- 80, /* 06: .08 */
- 120, /* 07: .12 */
- 160, /* 08: .16 */
- 240, /* 09: .24 */
- 320, /* 0A: .32 */
- 480, /* 0B: .48 */
- 640, /* 0C: .64 */
- 960, /* 0D: .96 */
- 1280, /* 0E: 1.28 */
- 1920, /* 0F: 1.92 */
- 2560, /* 10: 2.56 */
- 3840, /* 11: 3.84 */
- 5120, /* 12: 5.12 */
- 7680, /* 13: 7.68 */
- 10240, /* 14: 10.24 */
- 15360, /* 15: 15.36 */
- 20480, /* 16: 20.48 */
- 30720, /* 17: 30.72 */
- 40960, /* 18: 40.96 */
- 61440, /* 19: 61.44 */
- 81920, /* 1A: 81.92 */
- 122880, /* 1B: 122.88 */
- 163840, /* 1C: 163.84 */
- 245760, /* 1D: 245.76 */
- 327680, /* 1E: 327.68 */
- 491520 /* 1F: 491.52 */
-};
-
-/*
- * Validate a RWQE and fill in the SGE state.
- * Return 1 if OK.
- */
-static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
-{
- int i, j, ret;
- struct ib_wc wc;
- struct rvt_lkey_table *rkt;
- struct rvt_pd *pd;
- struct rvt_sge_state *ss;
-
- rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
- pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
- ss = &qp->r_sge;
- ss->sg_list = qp->r_sg_list;
- qp->r_len = 0;
- for (i = j = 0; i < wqe->num_sge; i++) {
- if (wqe->sg_list[i].length == 0)
- continue;
- /* Check LKEY */
- if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
- goto bad_lkey;
- qp->r_len += wqe->sg_list[i].length;
- j++;
- }
- ss->num_sge = j;
- ss->total_len = qp->r_len;
- ret = 1;
- goto bail;
-
-bad_lkey:
- while (j) {
- struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
-
- rvt_put_mr(sge->mr);
- }
- ss->num_sge = 0;
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr_id;
- wc.status = IB_WC_LOC_PROT_ERR;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- /* Signal solicited completion event. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
- ret = 0;
-bail:
- return ret;
-}
-
-/**
- * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return -1 if there is a local error, 0 if no RWQE is available,
- * otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
-{
- unsigned long flags;
- struct rvt_rq *rq;
- struct rvt_rwq *wq;
- struct rvt_srq *srq;
- struct rvt_rwqe *wqe;
- void (*handler)(struct ib_event *, void *);
- u32 tail;
- int ret;
-
- if (qp->ibqp.srq) {
- srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
- handler = srq->ibsrq.event_handler;
- rq = &srq->rq;
- } else {
- srq = NULL;
- handler = NULL;
- rq = &qp->r_rq;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ret = 0;
- goto unlock;
- }
-
- wq = rq->wq;
- tail = wq->tail;
- /* Validate tail before using it since it is user writable. */
- if (tail >= rq->size)
- tail = 0;
- if (unlikely(tail == wq->head)) {
- ret = 0;
- goto unlock;
- }
- /* Make sure entry is read after head index is read. */
- smp_rmb();
- wqe = rvt_get_rwqe_ptr(rq, tail);
- /*
- * Even though we update the tail index in memory, the verbs
- * consumer is not supposed to post more entries until a
- * completion is generated.
- */
- if (++tail >= rq->size)
- tail = 0;
- wq->tail = tail;
- if (!wr_id_only && !init_sge(qp, wqe)) {
- ret = -1;
- goto unlock;
- }
- qp->r_wr_id = wqe->wr_id;
-
- ret = 1;
- set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
- if (handler) {
- u32 n;
-
- /*
- * Validate head pointer value and compute
- * the number of remaining WQEs.
- */
- n = wq->head;
- if (n >= rq->size)
- n = 0;
- if (n < tail)
- n += rq->size - tail;
- else
- n -= tail;
- if (n < srq->limit) {
- struct ib_event ev;
-
- srq->limit = 0;
- spin_unlock_irqrestore(&rq->lock, flags);
- ev.device = qp->ibqp.device;
- ev.element.srq = qp->ibqp.srq;
- ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
- handler(&ev, srq->ibsrq.srq_context);
- goto bail;
- }
- }
-unlock:
- spin_unlock_irqrestore(&rq->lock, flags);
-bail:
- return ret;
-}
-
-static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index)
-{
- if (!index) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- return cpu_to_be64(ppd->guid);
- }
- return ibp->guids[index - 1];
-}
-
-static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
-{
- return (gid->global.interface_id == id &&
- (gid->global.subnet_prefix == gid_prefix ||
- gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
-}
-
-/*
- *
- * This should be called with the QP r_lock held.
- *
- * The s_lock will be acquired around the hfi1_migrate_qp() call.
- */
-int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0)
-{
- __be64 guid;
- unsigned long flags;
- u8 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
-
- if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
- if (!has_grh) {
- if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
- goto err;
- } else {
- if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
- goto err;
- guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
- guid))
- goto err;
- if (!gid_ok(
- &hdr->u.l.grh.sgid,
- qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
- qp->alt_ah_attr.grh.dgid.global.interface_id))
- goto err;
- }
- if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
- sc5, be16_to_cpu(hdr->lrh[3])))) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
- ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
- goto err;
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_migrate_qp(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else {
- if (!has_grh) {
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- goto err;
- } else {
- if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
- goto err;
- guid = get_sguid(ibp,
- qp->remote_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
- guid))
- goto err;
- if (!gid_ok(
- &hdr->u.l.grh.sgid,
- qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
- qp->remote_ah_attr.grh.dgid.global.interface_id))
- goto err;
- }
- if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
- sc5, be16_to_cpu(hdr->lrh[3])))) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 */
- if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
- ppd_from_ibp(ibp)->port != qp->port_num)
- goto err;
- if (qp->s_mig_state == IB_MIG_REARM &&
- !(bth0 & IB_BTH_MIG_REQ))
- qp->s_mig_state = IB_MIG_ARMED;
- }
-
- return 0;
-
-err:
- return 1;
-}
-
-/**
- * ruc_loopback - handle UC and RC loopback requests
- * @sqp: the sending QP
- *
- * This is called from hfi1_do_send() to
- * forward a WQE addressed to the same HFI.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ruc_loopback(struct rvt_qp *sqp)
-{
- struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- int release;
- int ret;
- int copy_last = 0;
- u32 to;
-
- rcu_read_lock();
-
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
- sqp->remote_qpn);
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= RVT_S_BUSY;
-
-again:
- smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work request. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = 1;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
- ret = hfi1_rvt_get_rwqe(qp, 0);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = hfi1_rvt_get_rwqe(qp, 1);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* skip copy_last set and qp_access_flags recheck */
- goto do_write;
- case IB_WR_RDMA_WRITE:
- copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
-do_write:
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = 0;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *)sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64)atomic64_add_return(sdata, maddr) - sdata :
- (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- wc.sl = qp->remote_ah_attr.sl;
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- hfi1_send_complete(sqp, wqe, send_status);
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- to = ib_hfi1_rnr_table[qp->r_min_rnr_timer];
- hfi1_add_rnr_timer(sqp, to);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- hfi1_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- hfi1_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- rcu_read_unlock();
-}
-
-/**
- * hfi1_make_grh - construct a GRH header
- * @ibp: a pointer to the IB port
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
- hdr->version_tclass_flow =
- cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
- (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
- (grh->flow_label << IB_GRH_FLOW_SHIFT));
- hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
- /* next_hdr is defined by C8-7 in ch. 8.4.1 */
- hdr->next_hdr = IB_GRH_NEXT_HDR;
- hdr->hop_limit = grh->hop_limit;
- /* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
- hdr->sgid.global.interface_id =
- grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ?
- ibp->guids[grh->sgid_index - 1] :
- cpu_to_be64(ppd_from_ibp(ibp)->guid);
- hdr->dgid = grh->dgid;
-
- /* GRH header size in 32-bit words. */
- return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-#define BTH2_OFFSET (offsetof(struct hfi1_pio_header, hdr.u.oth.bth[2]) / 4)
-
-/**
- * build_ahg - create ahg in s_hdr
- * @qp: a pointer to QP
- * @npsn: the next PSN for the request/response
- *
- * This routine handles the AHG by allocating an ahg entry and causing the
- * copy of the first middle.
- *
- * Subsequent middles use the copied entry, editing the
- * PSN with 1 or 2 edits.
- */
-static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
-{
- struct hfi1_qp_priv *priv = qp->priv;
-
- if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
- clear_ahg(qp);
- if (!(qp->s_flags & RVT_S_AHG_VALID)) {
- /* first middle that needs copy */
- if (qp->s_ahgidx < 0)
- qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
- if (qp->s_ahgidx >= 0) {
- qp->s_ahgpsn = npsn;
- priv->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
- /* save to protect a change in another thread */
- priv->s_hdr->sde = priv->s_sde;
- priv->s_hdr->ahgidx = qp->s_ahgidx;
- qp->s_flags |= RVT_S_AHG_VALID;
- }
- } else {
- /* subsequent middle after valid */
- if (qp->s_ahgidx >= 0) {
- priv->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
- priv->s_hdr->ahgidx = qp->s_ahgidx;
- priv->s_hdr->ahgcount++;
- priv->s_hdr->ahgdesc[0] =
- sdma_build_ahg_descriptor(
- (__force u16)cpu_to_be16((u16)npsn),
- BTH2_OFFSET,
- 16,
- 16);
- if ((npsn & 0xffff0000) !=
- (qp->s_ahgpsn & 0xffff0000)) {
- priv->s_hdr->ahgcount++;
- priv->s_hdr->ahgdesc[1] =
- sdma_build_ahg_descriptor(
- (__force u16)cpu_to_be16(
- (u16)(npsn >> 16)),
- BTH2_OFFSET,
- 0,
- 16);
- }
- }
- }
-}
-
-void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
- struct hfi1_pkt_state *ps)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ibport *ibp = ps->ibp;
- u16 lrh0;
- u32 nwords;
- u32 extra_bytes;
- u32 bth1;
-
- /* Construct the header. */
- extra_bytes = -qp->s_cur_size & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
- lrh0 = HFI1_LRH_BTH;
- if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += hfi1_make_grh(ibp,
- &ps->s_txreq->phdr.hdr.u.l.grh,
- &qp->remote_ah_attr.grh,
- qp->s_hdrwords, nwords);
- lrh0 = HFI1_LRH_GRH;
- middle = 0;
- }
- lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
- /*
- * reset s_hdr/AHG fields
- *
- * This insures that the ahgentry/ahgcount
- * are at a non-AHG default to protect
- * build_verbs_tx_desc() from using
- * an include ahgidx.
- *
- * build_ahg() will modify as appropriate
- * to use the AHG feature.
- */
- priv->s_hdr->tx_flags = 0;
- priv->s_hdr->ahgcount = 0;
- priv->s_hdr->ahgidx = 0;
- priv->s_hdr->sde = NULL;
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- else
- middle = 0;
- if (middle)
- build_ahg(qp, bth2);
- else
- qp->s_flags &= ~RVT_S_AHG_VALID;
- ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
- ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- ps->s_txreq->phdr.hdr.lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
- qp->remote_ah_attr.src_path_bits);
- bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
- bth0 |= extra_bytes << 20;
- ohdr->bth[0] = cpu_to_be32(bth0);
- bth1 = qp->remote_qpn;
- if (qp->s_flags & RVT_S_ECN) {
- qp->s_flags &= ~RVT_S_ECN;
- /* we recently received a FECN, so return a BECN */
- bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
- }
- ohdr->bth[1] = cpu_to_be32(bth1);
- ohdr->bth[2] = cpu_to_be32(bth2);
-}
-
-/* when sending, force a reschedule every one of these periods */
-#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
-
-void _hfi1_do_send(struct work_struct *work)
-{
- struct iowait *wait = container_of(work, struct iowait, iowork);
- struct rvt_qp *qp = iowait_to_qp(wait);
-
- hfi1_do_send(qp);
-}
-
-/**
- * hfi1_do_send - perform a send on a QP
- * @work: contains a pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void hfi1_do_send(struct rvt_qp *qp)
-{
- struct hfi1_pkt_state ps;
- struct hfi1_qp_priv *priv = qp->priv;
- int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
- unsigned long flags;
- unsigned long timeout;
- unsigned long timeout_int;
- int cpu;
-
- ps.dev = to_idev(qp->ibqp.device);
- ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
- ps.ppd = ppd_from_ibp(ps.ibp);
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
- ) - 1)) ==
- ps.ppd->lid)) {
- ruc_loopback(qp);
- return;
- }
- make_req = hfi1_make_rc_req;
- timeout_int = (qp->timeout_jiffies);
- break;
- case IB_QPT_UC:
- if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
- ) - 1)) ==
- ps.ppd->lid)) {
- ruc_loopback(qp);
- return;
- }
- make_req = hfi1_make_uc_req;
- timeout_int = SEND_RESCHED_TIMEOUT;
- break;
- default:
- make_req = hfi1_make_ud_req;
- timeout_int = SEND_RESCHED_TIMEOUT;
- }
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if (!hfi1_send_ok(qp)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return;
- }
-
- qp->s_flags |= RVT_S_BUSY;
-
- timeout = jiffies + (timeout_int) / 8;
- cpu = priv->s_sde ? priv->s_sde->cpu :
- cpumask_first(cpumask_of_node(ps.ppd->dd->node));
- /* insure a pre-built packet is handled */
- ps.s_txreq = get_waiting_verbs_txreq(qp);
- do {
- /* Check for a constructed packet to be sent. */
- if (qp->s_hdrwords != 0) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- /*
- * If the packet cannot be sent now, return and
- * the send tasklet will be woken up later.
- */
- if (hfi1_verbs_send(qp, &ps))
- return;
- /* Record that s_hdr is empty. */
- qp->s_hdrwords = 0;
- /* allow other tasks to run */
- if (unlikely(time_after(jiffies, timeout))) {
- if (workqueue_congested(cpu,
- ps.ppd->hfi1_wq)) {
- spin_lock_irqsave(&qp->s_lock, flags);
- qp->s_flags &= ~RVT_S_BUSY;
- hfi1_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock,
- flags);
- this_cpu_inc(
- *ps.ppd->dd->send_schedule);
- return;
- }
- if (!irqs_disabled()) {
- cond_resched();
- this_cpu_inc(
- *ps.ppd->dd->send_schedule);
- }
- timeout = jiffies + (timeout_int) / 8;
- }
- spin_lock_irqsave(&qp->s_lock, flags);
- }
- } while (make_req(qp, &ps));
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
-
-/*
- * This should be called with s_lock held.
- */
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
- unsigned i;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
- /* See post_send() */
- barrier();
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
-
- rvt_put_mr(sge->mr);
- }
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
-
- /* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
- wc.qp = &qp->ibqp;
- if (status == IB_WC_SUCCESS)
- wc.byte_len = wqe->length;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
-
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c
deleted file mode 100644
index abb8ebc1f..000000000
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ /dev/null
@@ -1,3052 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-#include <linux/bitops.h>
-#include <linux/timer.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-
-#include "hfi.h"
-#include "common.h"
-#include "qp.h"
-#include "sdma.h"
-#include "iowait.h"
-#include "trace.h"
-
-/* must be a power of 2 >= 64 <= 32768 */
-#define SDMA_DESCQ_CNT 2048
-#define SDMA_DESC_INTR 64
-#define INVALID_TAIL 0xffff
-
-static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
-module_param(sdma_descq_cnt, uint, S_IRUGO);
-MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
-
-static uint sdma_idle_cnt = 250;
-module_param(sdma_idle_cnt, uint, S_IRUGO);
-MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
-
-uint mod_num_sdma;
-module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
-MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
-
-static uint sdma_desct_intr = SDMA_DESC_INTR;
-module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
-
-#define SDMA_WAIT_BATCH_SIZE 20
-/* max wait time for a SDMA engine to indicate it has halted */
-#define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
-/* all SDMA engine errors that cause a halt */
-
-#define SD(name) SEND_DMA_##name
-#define ALL_SDMA_ENG_HALT_ERRS \
- (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
- | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
-
-/* sdma_sendctrl operations */
-#define SDMA_SENDCTRL_OP_ENABLE BIT(0)
-#define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
-#define SDMA_SENDCTRL_OP_HALT BIT(2)
-#define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
-
-/* handle long defines */
-#define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
-SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
-#define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
-SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
-
-static const char * const sdma_state_names[] = {
- [sdma_state_s00_hw_down] = "s00_HwDown",
- [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
- [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
- [sdma_state_s20_idle] = "s20_Idle",
- [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
- [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
- [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
- [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
- [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
- [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
- [sdma_state_s99_running] = "s99_Running",
-};
-
-static const char * const sdma_event_names[] = {
- [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
- [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
- [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
- [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
- [sdma_event_e30_go_running] = "e30_GoRunning",
- [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
- [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
- [sdma_event_e60_hw_halted] = "e60_HwHalted",
- [sdma_event_e70_go_idle] = "e70_GoIdle",
- [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
- [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
- [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
- [sdma_event_e85_link_down] = "e85_LinkDown",
- [sdma_event_e90_sw_halted] = "e90_SwHalted",
-};
-
-static const struct sdma_set_state_action sdma_action_table[] = {
- [sdma_state_s00_hw_down] = {
- .go_s99_running_tofalse = 1,
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_cleanup = 0,
- },
- [sdma_state_s10_hw_start_up_halt_wait] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 1,
- .op_cleanup = 0,
- },
- [sdma_state_s15_hw_start_up_clean_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 0,
- .op_cleanup = 1,
- },
- [sdma_state_s20_idle] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 0,
- .op_cleanup = 0,
- },
- [sdma_state_s30_sw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_cleanup = 0,
- },
- [sdma_state_s40_hw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_cleanup = 1,
- },
- [sdma_state_s50_hw_halt_wait] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_cleanup = 0,
- },
- [sdma_state_s60_idle_halt_wait] = {
- .go_s99_running_tofalse = 1,
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 1,
- .op_cleanup = 0,
- },
- [sdma_state_s80_hw_freeze] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_cleanup = 0,
- },
- [sdma_state_s82_freeze_sw_clean] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_cleanup = 0,
- },
- [sdma_state_s99_running] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 0,
- .op_cleanup = 0,
- .go_s99_running_totrue = 1,
- },
-};
-
-#define SDMA_TAIL_UPDATE_THRESH 0x1F
-
-/* declare all statics here rather than keep sorting */
-static void sdma_complete(struct kref *);
-static void sdma_finalput(struct sdma_state *);
-static void sdma_get(struct sdma_state *);
-static void sdma_hw_clean_up_task(unsigned long);
-static void sdma_put(struct sdma_state *);
-static void sdma_set_state(struct sdma_engine *, enum sdma_states);
-static void sdma_start_hw_clean_up(struct sdma_engine *);
-static void sdma_sw_clean_up_task(unsigned long);
-static void sdma_sendctrl(struct sdma_engine *, unsigned);
-static void init_sdma_regs(struct sdma_engine *, u32, uint);
-static void sdma_process_event(
- struct sdma_engine *sde,
- enum sdma_events event);
-static void __sdma_process_event(
- struct sdma_engine *sde,
- enum sdma_events event);
-static void dump_sdma_state(struct sdma_engine *sde);
-static void sdma_make_progress(struct sdma_engine *sde, u64 status);
-static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
-static void sdma_flush_descq(struct sdma_engine *sde);
-
-/**
- * sdma_state_name() - return state string from enum
- * @state: state
- */
-static const char *sdma_state_name(enum sdma_states state)
-{
- return sdma_state_names[state];
-}
-
-static void sdma_get(struct sdma_state *ss)
-{
- kref_get(&ss->kref);
-}
-
-static void sdma_complete(struct kref *kref)
-{
- struct sdma_state *ss =
- container_of(kref, struct sdma_state, kref);
-
- complete(&ss->comp);
-}
-
-static void sdma_put(struct sdma_state *ss)
-{
- kref_put(&ss->kref, sdma_complete);
-}
-
-static void sdma_finalput(struct sdma_state *ss)
-{
- sdma_put(ss);
- wait_for_completion(&ss->comp);
-}
-
-static inline void write_sde_csr(
- struct sdma_engine *sde,
- u32 offset0,
- u64 value)
-{
- write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
-}
-
-static inline u64 read_sde_csr(
- struct sdma_engine *sde,
- u32 offset0)
-{
- return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
-}
-
-/*
- * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
- * sdma engine 'sde' to drop to 0.
- */
-static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
- int pause)
-{
- u64 off = 8 * sde->this_idx;
- struct hfi1_devdata *dd = sde->dd;
- int lcnt = 0;
- u64 reg_prev;
- u64 reg = 0;
-
- while (1) {
- reg_prev = reg;
- reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
-
- reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
- reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
- if (reg == 0)
- break;
- /* counter is reest if accupancy count changes */
- if (reg != reg_prev)
- lcnt = 0;
- if (lcnt++ > 500) {
- /* timed out - bounce the link */
- dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
- __func__, sde->this_idx, (u32)reg);
- queue_work(dd->pport->hfi1_wq,
- &dd->pport->link_bounce_work);
- break;
- }
- udelay(1);
- }
-}
-
-/*
- * sdma_wait() - wait for packet egress to complete for all SDMA engines,
- * and pause for credit return.
- */
-void sdma_wait(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = 0; i < dd->num_sdma; i++) {
- struct sdma_engine *sde = &dd->per_sdma[i];
-
- sdma_wait_for_packet_egress(sde, 0);
- }
-}
-
-static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
-{
- u64 reg;
-
- if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
- return;
- reg = cnt;
- reg &= SD(DESC_CNT_CNT_MASK);
- reg <<= SD(DESC_CNT_CNT_SHIFT);
- write_sde_csr(sde, SD(DESC_CNT), reg);
-}
-
-static inline void complete_tx(struct sdma_engine *sde,
- struct sdma_txreq *tx,
- int res)
-{
- /* protect against complete modifying */
- struct iowait *wait = tx->wait;
- callback_t complete = tx->complete;
-
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- trace_hfi1_sdma_out_sn(sde, tx->sn);
- if (WARN_ON_ONCE(sde->head_sn != tx->sn))
- dd_dev_err(sde->dd, "expected %llu got %llu\n",
- sde->head_sn, tx->sn);
- sde->head_sn++;
-#endif
- sdma_txclean(sde->dd, tx);
- if (complete)
- (*complete)(tx, res);
- if (iowait_sdma_dec(wait) && wait)
- iowait_drain_wakeup(wait);
-}
-
-/*
- * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
- *
- * Depending on timing there can be txreqs in two places:
- * - in the descq ring
- * - in the flush list
- *
- * To avoid ordering issues the descq ring needs to be flushed
- * first followed by the flush list.
- *
- * This routine is called from two places
- * - From a work queue item
- * - Directly from the state machine just before setting the
- * state to running
- *
- * Must be called with head_lock held
- *
- */
-static void sdma_flush(struct sdma_engine *sde)
-{
- struct sdma_txreq *txp, *txp_next;
- LIST_HEAD(flushlist);
- unsigned long flags;
-
- /* flush from head to tail */
- sdma_flush_descq(sde);
- spin_lock_irqsave(&sde->flushlist_lock, flags);
- /* copy flush list */
- list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
- list_del_init(&txp->list);
- list_add_tail(&txp->list, &flushlist);
- }
- spin_unlock_irqrestore(&sde->flushlist_lock, flags);
- /* flush from flush list */
- list_for_each_entry_safe(txp, txp_next, &flushlist, list)
- complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
-}
-
-/*
- * Fields a work request for flushing the descq ring
- * and the flush list
- *
- * If the engine has been brought to running during
- * the scheduling delay, the flush is ignored, assuming
- * that the process of bringing the engine to running
- * would have done this flush prior to going to running.
- *
- */
-static void sdma_field_flush(struct work_struct *work)
-{
- unsigned long flags;
- struct sdma_engine *sde =
- container_of(work, struct sdma_engine, flush_worker);
-
- write_seqlock_irqsave(&sde->head_lock, flags);
- if (!__sdma_running(sde))
- sdma_flush(sde);
- write_sequnlock_irqrestore(&sde->head_lock, flags);
-}
-
-static void sdma_err_halt_wait(struct work_struct *work)
-{
- struct sdma_engine *sde = container_of(work, struct sdma_engine,
- err_halt_worker);
- u64 statuscsr;
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
- while (1) {
- statuscsr = read_sde_csr(sde, SD(STATUS));
- statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
- if (statuscsr)
- break;
- if (time_after(jiffies, timeout)) {
- dd_dev_err(sde->dd,
- "SDMA engine %d - timeout waiting for engine to halt\n",
- sde->this_idx);
- /*
- * Continue anyway. This could happen if there was
- * an uncorrectable error in the wrong spot.
- */
- break;
- }
- usleep_range(80, 120);
- }
-
- sdma_process_event(sde, sdma_event_e15_hw_halt_done);
-}
-
-static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
-{
- if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
- unsigned index;
- struct hfi1_devdata *dd = sde->dd;
-
- for (index = 0; index < dd->num_sdma; index++) {
- struct sdma_engine *curr_sdma = &dd->per_sdma[index];
-
- if (curr_sdma != sde)
- curr_sdma->progress_check_head =
- curr_sdma->descq_head;
- }
- dd_dev_err(sde->dd,
- "SDMA engine %d - check scheduled\n",
- sde->this_idx);
- mod_timer(&sde->err_progress_check_timer, jiffies + 10);
- }
-}
-
-static void sdma_err_progress_check(unsigned long data)
-{
- unsigned index;
- struct sdma_engine *sde = (struct sdma_engine *)data;
-
- dd_dev_err(sde->dd, "SDE progress check event\n");
- for (index = 0; index < sde->dd->num_sdma; index++) {
- struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
- unsigned long flags;
-
- /* check progress on each engine except the current one */
- if (curr_sde == sde)
- continue;
- /*
- * We must lock interrupts when acquiring sde->lock,
- * to avoid a deadlock if interrupt triggers and spins on
- * the same lock on same CPU
- */
- spin_lock_irqsave(&curr_sde->tail_lock, flags);
- write_seqlock(&curr_sde->head_lock);
-
- /* skip non-running queues */
- if (curr_sde->state.current_state != sdma_state_s99_running) {
- write_sequnlock(&curr_sde->head_lock);
- spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
- continue;
- }
-
- if ((curr_sde->descq_head != curr_sde->descq_tail) &&
- (curr_sde->descq_head ==
- curr_sde->progress_check_head))
- __sdma_process_event(curr_sde,
- sdma_event_e90_sw_halted);
- write_sequnlock(&curr_sde->head_lock);
- spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
- }
- schedule_work(&sde->err_halt_worker);
-}
-
-static void sdma_hw_clean_up_task(unsigned long opaque)
-{
- struct sdma_engine *sde = (struct sdma_engine *)opaque;
- u64 statuscsr;
-
- while (1) {
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
- sde->this_idx, slashstrip(__FILE__), __LINE__,
- __func__);
-#endif
- statuscsr = read_sde_csr(sde, SD(STATUS));
- statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
- if (statuscsr)
- break;
- udelay(10);
- }
-
- sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
-}
-
-static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
-{
- smp_read_barrier_depends(); /* see sdma_update_tail() */
- return sde->tx_ring[sde->tx_head & sde->sdma_mask];
-}
-
-/*
- * flush ring for recovery
- */
-static void sdma_flush_descq(struct sdma_engine *sde)
-{
- u16 head, tail;
- int progress = 0;
- struct sdma_txreq *txp = get_txhead(sde);
-
- /* The reason for some of the complexity of this code is that
- * not all descriptors have corresponding txps. So, we have to
- * be able to skip over descs until we wander into the range of
- * the next txp on the list.
- */
- head = sde->descq_head & sde->sdma_mask;
- tail = sde->descq_tail & sde->sdma_mask;
- while (head != tail) {
- /* advance head, wrap if needed */
- head = ++sde->descq_head & sde->sdma_mask;
- /* if now past this txp's descs, do the callback */
- if (txp && txp->next_descq_idx == head) {
- /* remove from list */
- sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
- complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
- trace_hfi1_sdma_progress(sde, head, tail, txp);
- txp = get_txhead(sde);
- }
- progress++;
- }
- if (progress)
- sdma_desc_avail(sde, sdma_descq_freecnt(sde));
-}
-
-static void sdma_sw_clean_up_task(unsigned long opaque)
-{
- struct sdma_engine *sde = (struct sdma_engine *)opaque;
- unsigned long flags;
-
- spin_lock_irqsave(&sde->tail_lock, flags);
- write_seqlock(&sde->head_lock);
-
- /*
- * At this point, the following should always be true:
- * - We are halted, so no more descriptors are getting retired.
- * - We are not running, so no one is submitting new work.
- * - Only we can send the e40_sw_cleaned, so we can't start
- * running again until we say so. So, the active list and
- * descq are ours to play with.
- */
-
- /*
- * In the error clean up sequence, software clean must be called
- * before the hardware clean so we can use the hardware head in
- * the progress routine. A hardware clean or SPC unfreeze will
- * reset the hardware head.
- *
- * Process all retired requests. The progress routine will use the
- * latest physical hardware head - we are not running so speed does
- * not matter.
- */
- sdma_make_progress(sde, 0);
-
- sdma_flush(sde);
-
- /*
- * Reset our notion of head and tail.
- * Note that the HW registers have been reset via an earlier
- * clean up.
- */
- sde->descq_tail = 0;
- sde->descq_head = 0;
- sde->desc_avail = sdma_descq_freecnt(sde);
- *sde->head_dma = 0;
-
- __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
-
- write_sequnlock(&sde->head_lock);
- spin_unlock_irqrestore(&sde->tail_lock, flags);
-}
-
-static void sdma_sw_tear_down(struct sdma_engine *sde)
-{
- struct sdma_state *ss = &sde->state;
-
- /* Releasing this reference means the state machine has stopped. */
- sdma_put(ss);
-
- /* stop waiting for all unfreeze events to complete */
- atomic_set(&sde->dd->sdma_unfreeze_count, -1);
- wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
-}
-
-static void sdma_start_hw_clean_up(struct sdma_engine *sde)
-{
- tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
-}
-
-static void sdma_set_state(struct sdma_engine *sde,
- enum sdma_states next_state)
-{
- struct sdma_state *ss = &sde->state;
- const struct sdma_set_state_action *action = sdma_action_table;
- unsigned op = 0;
-
- trace_hfi1_sdma_state(
- sde,
- sdma_state_names[ss->current_state],
- sdma_state_names[next_state]);
-
- /* debugging bookkeeping */
- ss->previous_state = ss->current_state;
- ss->previous_op = ss->current_op;
- ss->current_state = next_state;
-
- if (ss->previous_state != sdma_state_s99_running &&
- next_state == sdma_state_s99_running)
- sdma_flush(sde);
-
- if (action[next_state].op_enable)
- op |= SDMA_SENDCTRL_OP_ENABLE;
-
- if (action[next_state].op_intenable)
- op |= SDMA_SENDCTRL_OP_INTENABLE;
-
- if (action[next_state].op_halt)
- op |= SDMA_SENDCTRL_OP_HALT;
-
- if (action[next_state].op_cleanup)
- op |= SDMA_SENDCTRL_OP_CLEANUP;
-
- if (action[next_state].go_s99_running_tofalse)
- ss->go_s99_running = 0;
-
- if (action[next_state].go_s99_running_totrue)
- ss->go_s99_running = 1;
-
- ss->current_op = op;
- sdma_sendctrl(sde, ss->current_op);
-}
-
-/**
- * sdma_get_descq_cnt() - called when device probed
- *
- * Return a validated descq count.
- *
- * This is currently only used in the verbs initialization to build the tx
- * list.
- *
- * This will probably be deleted in favor of a more scalable approach to
- * alloc tx's.
- *
- */
-u16 sdma_get_descq_cnt(void)
-{
- u16 count = sdma_descq_cnt;
-
- if (!count)
- return SDMA_DESCQ_CNT;
- /* count must be a power of 2 greater than 64 and less than
- * 32768. Otherwise return default.
- */
- if (!is_power_of_2(count))
- return SDMA_DESCQ_CNT;
- if (count < 64 || count > 32768)
- return SDMA_DESCQ_CNT;
- return count;
-}
-
-/**
- * sdma_select_engine_vl() - select sdma engine
- * @dd: devdata
- * @selector: a spreading factor
- * @vl: this vl
- *
- *
- * This function returns an engine based on the selector and a vl. The
- * mapping fields are protected by RCU.
- */
-struct sdma_engine *sdma_select_engine_vl(
- struct hfi1_devdata *dd,
- u32 selector,
- u8 vl)
-{
- struct sdma_vl_map *m;
- struct sdma_map_elem *e;
- struct sdma_engine *rval;
-
- /* NOTE This should only happen if SC->VL changed after the initial
- * checks on the QP/AH
- * Default will return engine 0 below
- */
- if (vl >= num_vls) {
- rval = NULL;
- goto done;
- }
-
- rcu_read_lock();
- m = rcu_dereference(dd->sdma_map);
- if (unlikely(!m)) {
- rcu_read_unlock();
- return &dd->per_sdma[0];
- }
- e = m->map[vl & m->mask];
- rval = e->sde[selector & e->mask];
- rcu_read_unlock();
-
-done:
- rval = !rval ? &dd->per_sdma[0] : rval;
- trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
- return rval;
-}
-
-/**
- * sdma_select_engine_sc() - select sdma engine
- * @dd: devdata
- * @selector: a spreading factor
- * @sc5: the 5 bit sc
- *
- *
- * This function returns an engine based on the selector and an sc.
- */
-struct sdma_engine *sdma_select_engine_sc(
- struct hfi1_devdata *dd,
- u32 selector,
- u8 sc5)
-{
- u8 vl = sc_to_vlt(dd, sc5);
-
- return sdma_select_engine_vl(dd, selector, vl);
-}
-
-/*
- * Free the indicated map struct
- */
-static void sdma_map_free(struct sdma_vl_map *m)
-{
- int i;
-
- for (i = 0; m && i < m->actual_vls; i++)
- kfree(m->map[i]);
- kfree(m);
-}
-
-/*
- * Handle RCU callback
- */
-static void sdma_map_rcu_callback(struct rcu_head *list)
-{
- struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
-
- sdma_map_free(m);
-}
-
-/**
- * sdma_map_init - called when # vls change
- * @dd: hfi1_devdata
- * @port: port number
- * @num_vls: number of vls
- * @vl_engines: per vl engine mapping (optional)
- *
- * This routine changes the mapping based on the number of vls.
- *
- * vl_engines is used to specify a non-uniform vl/engine loading. NULL
- * implies auto computing the loading and giving each VLs a uniform
- * distribution of engines per VL.
- *
- * The auto algorithm computes the sde_per_vl and the number of extra
- * engines. Any extra engines are added from the last VL on down.
- *
- * rcu locking is used here to control access to the mapping fields.
- *
- * If either the num_vls or num_sdma are non-power of 2, the array sizes
- * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
- * up to the next highest power of 2 and the first entry is reused
- * in a round robin fashion.
- *
- * If an error occurs the map change is not done and the mapping is
- * not changed.
- *
- */
-int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
-{
- int i, j;
- int extra, sde_per_vl;
- int engine = 0;
- u8 lvl_engines[OPA_MAX_VLS];
- struct sdma_vl_map *oldmap, *newmap;
-
- if (!(dd->flags & HFI1_HAS_SEND_DMA))
- return 0;
-
- if (!vl_engines) {
- /* truncate divide */
- sde_per_vl = dd->num_sdma / num_vls;
- /* extras */
- extra = dd->num_sdma % num_vls;
- vl_engines = lvl_engines;
- /* add extras from last vl down */
- for (i = num_vls - 1; i >= 0; i--, extra--)
- vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
- }
- /* build new map */
- newmap = kzalloc(
- sizeof(struct sdma_vl_map) +
- roundup_pow_of_two(num_vls) *
- sizeof(struct sdma_map_elem *),
- GFP_KERNEL);
- if (!newmap)
- goto bail;
- newmap->actual_vls = num_vls;
- newmap->vls = roundup_pow_of_two(num_vls);
- newmap->mask = (1 << ilog2(newmap->vls)) - 1;
- /* initialize back-map */
- for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
- newmap->engine_to_vl[i] = -1;
- for (i = 0; i < newmap->vls; i++) {
- /* save for wrap around */
- int first_engine = engine;
-
- if (i < newmap->actual_vls) {
- int sz = roundup_pow_of_two(vl_engines[i]);
-
- /* only allocate once */
- newmap->map[i] = kzalloc(
- sizeof(struct sdma_map_elem) +
- sz * sizeof(struct sdma_engine *),
- GFP_KERNEL);
- if (!newmap->map[i])
- goto bail;
- newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
- /* assign engines */
- for (j = 0; j < sz; j++) {
- newmap->map[i]->sde[j] =
- &dd->per_sdma[engine];
- if (++engine >= first_engine + vl_engines[i])
- /* wrap back to first engine */
- engine = first_engine;
- }
- /* assign back-map */
- for (j = 0; j < vl_engines[i]; j++)
- newmap->engine_to_vl[first_engine + j] = i;
- } else {
- /* just re-use entry without allocating */
- newmap->map[i] = newmap->map[i % num_vls];
- }
- engine = first_engine + vl_engines[i];
- }
- /* newmap in hand, save old map */
- spin_lock_irq(&dd->sde_map_lock);
- oldmap = rcu_dereference_protected(dd->sdma_map,
- lockdep_is_held(&dd->sde_map_lock));
-
- /* publish newmap */
- rcu_assign_pointer(dd->sdma_map, newmap);
-
- spin_unlock_irq(&dd->sde_map_lock);
- /* success, free any old map after grace period */
- if (oldmap)
- call_rcu(&oldmap->list, sdma_map_rcu_callback);
- return 0;
-bail:
- /* free any partial allocation */
- sdma_map_free(newmap);
- return -ENOMEM;
-}
-
-/*
- * Clean up allocated memory.
- *
- * This routine is can be called regardless of the success of sdma_init()
- *
- */
-static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
-{
- size_t i;
- struct sdma_engine *sde;
-
- if (dd->sdma_pad_dma) {
- dma_free_coherent(&dd->pcidev->dev, 4,
- (void *)dd->sdma_pad_dma,
- dd->sdma_pad_phys);
- dd->sdma_pad_dma = NULL;
- dd->sdma_pad_phys = 0;
- }
- if (dd->sdma_heads_dma) {
- dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
- (void *)dd->sdma_heads_dma,
- dd->sdma_heads_phys);
- dd->sdma_heads_dma = NULL;
- dd->sdma_heads_phys = 0;
- }
- for (i = 0; dd->per_sdma && i < num_engines; ++i) {
- sde = &dd->per_sdma[i];
-
- sde->head_dma = NULL;
- sde->head_phys = 0;
-
- if (sde->descq) {
- dma_free_coherent(
- &dd->pcidev->dev,
- sde->descq_cnt * sizeof(u64[2]),
- sde->descq,
- sde->descq_phys
- );
- sde->descq = NULL;
- sde->descq_phys = 0;
- }
- kvfree(sde->tx_ring);
- sde->tx_ring = NULL;
- }
- spin_lock_irq(&dd->sde_map_lock);
- sdma_map_free(rcu_access_pointer(dd->sdma_map));
- RCU_INIT_POINTER(dd->sdma_map, NULL);
- spin_unlock_irq(&dd->sde_map_lock);
- synchronize_rcu();
- kfree(dd->per_sdma);
- dd->per_sdma = NULL;
-}
-
-/**
- * sdma_init() - called when device probed
- * @dd: hfi1_devdata
- * @port: port number (currently only zero)
- *
- * sdma_init initializes the specified number of engines.
- *
- * The code initializes each sde, its csrs. Interrupts
- * are not required to be enabled.
- *
- * Returns:
- * 0 - success, -errno on failure
- */
-int sdma_init(struct hfi1_devdata *dd, u8 port)
-{
- unsigned this_idx;
- struct sdma_engine *sde;
- u16 descq_cnt;
- void *curr_head;
- struct hfi1_pportdata *ppd = dd->pport + port;
- u32 per_sdma_credits;
- uint idle_cnt = sdma_idle_cnt;
- size_t num_engines = dd->chip_sdma_engines;
-
- if (!HFI1_CAP_IS_KSET(SDMA)) {
- HFI1_CAP_CLEAR(SDMA_AHG);
- return 0;
- }
- if (mod_num_sdma &&
- /* can't exceed chip support */
- mod_num_sdma <= dd->chip_sdma_engines &&
- /* count must be >= vls */
- mod_num_sdma >= num_vls)
- num_engines = mod_num_sdma;
-
- dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
- dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
- dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
- dd->chip_sdma_mem_size);
-
- per_sdma_credits =
- dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
-
- /* set up freeze waitqueue */
- init_waitqueue_head(&dd->sdma_unfreeze_wq);
- atomic_set(&dd->sdma_unfreeze_count, 0);
-
- descq_cnt = sdma_get_descq_cnt();
- dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
- num_engines, descq_cnt);
-
- /* alloc memory for array of send engines */
- dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
- if (!dd->per_sdma)
- return -ENOMEM;
-
- idle_cnt = ns_to_cclock(dd, idle_cnt);
- if (!sdma_desct_intr)
- sdma_desct_intr = SDMA_DESC_INTR;
-
- /* Allocate memory for SendDMA descriptor FIFOs */
- for (this_idx = 0; this_idx < num_engines; ++this_idx) {
- sde = &dd->per_sdma[this_idx];
- sde->dd = dd;
- sde->ppd = ppd;
- sde->this_idx = this_idx;
- sde->descq_cnt = descq_cnt;
- sde->desc_avail = sdma_descq_freecnt(sde);
- sde->sdma_shift = ilog2(descq_cnt);
- sde->sdma_mask = (1 << sde->sdma_shift) - 1;
-
- /* Create a mask specifically for each interrupt source */
- sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
- this_idx);
- sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
- this_idx);
- sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
- this_idx);
- /* Create a combined mask to cover all 3 interrupt sources */
- sde->imask = sde->int_mask | sde->progress_mask |
- sde->idle_mask;
-
- spin_lock_init(&sde->tail_lock);
- seqlock_init(&sde->head_lock);
- spin_lock_init(&sde->senddmactrl_lock);
- spin_lock_init(&sde->flushlist_lock);
- /* insure there is always a zero bit */
- sde->ahg_bits = 0xfffffffe00000000ULL;
-
- sdma_set_state(sde, sdma_state_s00_hw_down);
-
- /* set up reference counting */
- kref_init(&sde->state.kref);
- init_completion(&sde->state.comp);
-
- INIT_LIST_HEAD(&sde->flushlist);
- INIT_LIST_HEAD(&sde->dmawait);
-
- sde->tail_csr =
- get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
-
- if (idle_cnt)
- dd->default_desc1 =
- SDMA_DESC1_HEAD_TO_HOST_FLAG;
- else
- dd->default_desc1 =
- SDMA_DESC1_INT_REQ_FLAG;
-
- tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
- (unsigned long)sde);
-
- tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
- (unsigned long)sde);
- INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
- INIT_WORK(&sde->flush_worker, sdma_field_flush);
-
- sde->progress_check_head = 0;
-
- setup_timer(&sde->err_progress_check_timer,
- sdma_err_progress_check, (unsigned long)sde);
-
- sde->descq = dma_zalloc_coherent(
- &dd->pcidev->dev,
- descq_cnt * sizeof(u64[2]),
- &sde->descq_phys,
- GFP_KERNEL
- );
- if (!sde->descq)
- goto bail;
- sde->tx_ring =
- kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
- GFP_KERNEL);
- if (!sde->tx_ring)
- sde->tx_ring =
- vzalloc(
- sizeof(struct sdma_txreq *) *
- descq_cnt);
- if (!sde->tx_ring)
- goto bail;
- }
-
- dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
- /* Allocate memory for DMA of head registers to memory */
- dd->sdma_heads_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- dd->sdma_heads_size,
- &dd->sdma_heads_phys,
- GFP_KERNEL
- );
- if (!dd->sdma_heads_dma) {
- dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
- goto bail;
- }
-
- /* Allocate memory for pad */
- dd->sdma_pad_dma = dma_zalloc_coherent(
- &dd->pcidev->dev,
- sizeof(u32),
- &dd->sdma_pad_phys,
- GFP_KERNEL
- );
- if (!dd->sdma_pad_dma) {
- dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
- goto bail;
- }
-
- /* assign each engine to different cacheline and init registers */
- curr_head = (void *)dd->sdma_heads_dma;
- for (this_idx = 0; this_idx < num_engines; ++this_idx) {
- unsigned long phys_offset;
-
- sde = &dd->per_sdma[this_idx];
-
- sde->head_dma = curr_head;
- curr_head += L1_CACHE_BYTES;
- phys_offset = (unsigned long)sde->head_dma -
- (unsigned long)dd->sdma_heads_dma;
- sde->head_phys = dd->sdma_heads_phys + phys_offset;
- init_sdma_regs(sde, per_sdma_credits, idle_cnt);
- }
- dd->flags |= HFI1_HAS_SEND_DMA;
- dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
- dd->num_sdma = num_engines;
- if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
- goto bail;
- dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
- return 0;
-
-bail:
- sdma_clean(dd, num_engines);
- return -ENOMEM;
-}
-
-/**
- * sdma_all_running() - called when the link goes up
- * @dd: hfi1_devdata
- *
- * This routine moves all engines to the running state.
- */
-void sdma_all_running(struct hfi1_devdata *dd)
-{
- struct sdma_engine *sde;
- unsigned int i;
-
- /* move all engines to running */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e30_go_running);
- }
-}
-
-/**
- * sdma_all_idle() - called when the link goes down
- * @dd: hfi1_devdata
- *
- * This routine moves all engines to the idle state.
- */
-void sdma_all_idle(struct hfi1_devdata *dd)
-{
- struct sdma_engine *sde;
- unsigned int i;
-
- /* idle all engines */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e70_go_idle);
- }
-}
-
-/**
- * sdma_start() - called to kick off state processing for all engines
- * @dd: hfi1_devdata
- *
- * This routine is for kicking off the state processing for all required
- * sdma engines. Interrupts need to be working at this point.
- *
- */
-void sdma_start(struct hfi1_devdata *dd)
-{
- unsigned i;
- struct sdma_engine *sde;
-
- /* kick off the engines state processing */
- for (i = 0; i < dd->num_sdma; ++i) {
- sde = &dd->per_sdma[i];
- sdma_process_event(sde, sdma_event_e10_go_hw_start);
- }
-}
-
-/**
- * sdma_exit() - used when module is removed
- * @dd: hfi1_devdata
- */
-void sdma_exit(struct hfi1_devdata *dd)
-{
- unsigned this_idx;
- struct sdma_engine *sde;
-
- for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
- ++this_idx) {
- sde = &dd->per_sdma[this_idx];
- if (!list_empty(&sde->dmawait))
- dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
- sde->this_idx);
- sdma_process_event(sde, sdma_event_e00_go_hw_down);
-
- del_timer_sync(&sde->err_progress_check_timer);
-
- /*
- * This waits for the state machine to exit so it is not
- * necessary to kill the sdma_sw_clean_up_task to make sure
- * it is not running.
- */
- sdma_finalput(&sde->state);
- }
- sdma_clean(dd, dd->num_sdma);
-}
-
-/*
- * unmap the indicated descriptor
- */
-static inline void sdma_unmap_desc(
- struct hfi1_devdata *dd,
- struct sdma_desc *descp)
-{
- switch (sdma_mapping_type(descp)) {
- case SDMA_MAP_SINGLE:
- dma_unmap_single(
- &dd->pcidev->dev,
- sdma_mapping_addr(descp),
- sdma_mapping_len(descp),
- DMA_TO_DEVICE);
- break;
- case SDMA_MAP_PAGE:
- dma_unmap_page(
- &dd->pcidev->dev,
- sdma_mapping_addr(descp),
- sdma_mapping_len(descp),
- DMA_TO_DEVICE);
- break;
- }
-}
-
-/*
- * return the mode as indicated by the first
- * descriptor in the tx.
- */
-static inline u8 ahg_mode(struct sdma_txreq *tx)
-{
- return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
- >> SDMA_DESC1_HEADER_MODE_SHIFT;
-}
-
-/**
- * sdma_txclean() - clean tx of mappings, descp *kmalloc's
- * @dd: hfi1_devdata for unmapping
- * @tx: tx request to clean
- *
- * This is used in the progress routine to clean the tx or
- * by the ULP to toss an in-process tx build.
- *
- * The code can be called multiple times without issue.
- *
- */
-void sdma_txclean(
- struct hfi1_devdata *dd,
- struct sdma_txreq *tx)
-{
- u16 i;
-
- if (tx->num_desc) {
- u8 skip = 0, mode = ahg_mode(tx);
-
- /* unmap first */
- sdma_unmap_desc(dd, &tx->descp[0]);
- /* determine number of AHG descriptors to skip */
- if (mode > SDMA_AHG_APPLY_UPDATE1)
- skip = mode >> 1;
- for (i = 1 + skip; i < tx->num_desc; i++)
- sdma_unmap_desc(dd, &tx->descp[i]);
- tx->num_desc = 0;
- }
- kfree(tx->coalesce_buf);
- tx->coalesce_buf = NULL;
- /* kmalloc'ed descp */
- if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
- tx->desc_limit = ARRAY_SIZE(tx->descs);
- kfree(tx->descp);
- }
-}
-
-static inline u16 sdma_gethead(struct sdma_engine *sde)
-{
- struct hfi1_devdata *dd = sde->dd;
- int use_dmahead;
- u16 hwhead;
-
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
- sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
-#endif
-
-retry:
- use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
- (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
- hwhead = use_dmahead ?
- (u16)le64_to_cpu(*sde->head_dma) :
- (u16)read_sde_csr(sde, SD(HEAD));
-
- if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
- u16 cnt;
- u16 swtail;
- u16 swhead;
- int sane;
-
- swhead = sde->descq_head & sde->sdma_mask;
- /* this code is really bad for cache line trading */
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
- cnt = sde->descq_cnt;
-
- if (swhead < swtail)
- /* not wrapped */
- sane = (hwhead >= swhead) & (hwhead <= swtail);
- else if (swhead > swtail)
- /* wrapped around */
- sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
- (hwhead <= swtail);
- else
- /* empty */
- sane = (hwhead == swhead);
-
- if (unlikely(!sane)) {
- dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
- sde->this_idx,
- use_dmahead ? "dma" : "kreg",
- hwhead, swhead, swtail, cnt);
- if (use_dmahead) {
- /* try one more time, using csr */
- use_dmahead = 0;
- goto retry;
- }
- /* proceed as if no progress */
- hwhead = swhead;
- }
- }
- return hwhead;
-}
-
-/*
- * This is called when there are send DMA descriptors that might be
- * available.
- *
- * This is called with head_lock held.
- */
-static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
-{
- struct iowait *wait, *nw;
- struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
- unsigned i, n = 0, seq;
- struct sdma_txreq *stx;
- struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
-
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
- slashstrip(__FILE__), __LINE__, __func__);
- dd_dev_err(sde->dd, "avail: %u\n", avail);
-#endif
-
- do {
- seq = read_seqbegin(&dev->iowait_lock);
- if (!list_empty(&sde->dmawait)) {
- /* at least one item */
- write_seqlock(&dev->iowait_lock);
- /* Harvest waiters wanting DMA descriptors */
- list_for_each_entry_safe(
- wait,
- nw,
- &sde->dmawait,
- list) {
- u16 num_desc = 0;
-
- if (!wait->wakeup)
- continue;
- if (n == ARRAY_SIZE(waits))
- break;
- if (!list_empty(&wait->tx_head)) {
- stx = list_first_entry(
- &wait->tx_head,
- struct sdma_txreq,
- list);
- num_desc = stx->num_desc;
- }
- if (num_desc > avail)
- break;
- avail -= num_desc;
- list_del_init(&wait->list);
- waits[n++] = wait;
- }
- write_sequnlock(&dev->iowait_lock);
- break;
- }
- } while (read_seqretry(&dev->iowait_lock, seq));
-
- for (i = 0; i < n; i++)
- waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
-}
-
-/* head_lock must be held */
-static void sdma_make_progress(struct sdma_engine *sde, u64 status)
-{
- struct sdma_txreq *txp = NULL;
- int progress = 0;
- u16 hwhead, swhead;
- int idle_check_done = 0;
-
- hwhead = sdma_gethead(sde);
-
- /* The reason for some of the complexity of this code is that
- * not all descriptors have corresponding txps. So, we have to
- * be able to skip over descs until we wander into the range of
- * the next txp on the list.
- */
-
-retry:
- txp = get_txhead(sde);
- swhead = sde->descq_head & sde->sdma_mask;
- trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
- while (swhead != hwhead) {
- /* advance head, wrap if needed */
- swhead = ++sde->descq_head & sde->sdma_mask;
-
- /* if now past this txp's descs, do the callback */
- if (txp && txp->next_descq_idx == swhead) {
- /* remove from list */
- sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
- complete_tx(sde, txp, SDMA_TXREQ_S_OK);
- /* see if there is another txp */
- txp = get_txhead(sde);
- }
- trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
- progress++;
- }
-
- /*
- * The SDMA idle interrupt is not guaranteed to be ordered with respect
- * to updates to the the dma_head location in host memory. The head
- * value read might not be fully up to date. If there are pending
- * descriptors and the SDMA idle interrupt fired then read from the
- * CSR SDMA head instead to get the latest value from the hardware.
- * The hardware SDMA head should be read at most once in this invocation
- * of sdma_make_progress(..) which is ensured by idle_check_done flag
- */
- if ((status & sde->idle_mask) && !idle_check_done) {
- u16 swtail;
-
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
- if (swtail != hwhead) {
- hwhead = (u16)read_sde_csr(sde, SD(HEAD));
- idle_check_done = 1;
- goto retry;
- }
- }
-
- sde->last_status = status;
- if (progress)
- sdma_desc_avail(sde, sdma_descq_freecnt(sde));
-}
-
-/*
- * sdma_engine_interrupt() - interrupt handler for engine
- * @sde: sdma engine
- * @status: sdma interrupt reason
- *
- * Status is a mask of the 3 possible interrupts for this engine. It will
- * contain bits _only_ for this SDMA engine. It will contain at least one
- * bit, it may contain more.
- */
-void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
-{
- trace_hfi1_sdma_engine_interrupt(sde, status);
- write_seqlock(&sde->head_lock);
- sdma_set_desc_cnt(sde, sdma_desct_intr);
- if (status & sde->idle_mask)
- sde->idle_int_cnt++;
- else if (status & sde->progress_mask)
- sde->progress_int_cnt++;
- else if (status & sde->int_mask)
- sde->sdma_int_cnt++;
- sdma_make_progress(sde, status);
- write_sequnlock(&sde->head_lock);
-}
-
-/**
- * sdma_engine_error() - error handler for engine
- * @sde: sdma engine
- * @status: sdma interrupt reason
- */
-void sdma_engine_error(struct sdma_engine *sde, u64 status)
-{
- unsigned long flags;
-
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
- sde->this_idx,
- (unsigned long long)status,
- sdma_state_names[sde->state.current_state]);
-#endif
- spin_lock_irqsave(&sde->tail_lock, flags);
- write_seqlock(&sde->head_lock);
- if (status & ALL_SDMA_ENG_HALT_ERRS)
- __sdma_process_event(sde, sdma_event_e60_hw_halted);
- if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
- dd_dev_err(sde->dd,
- "SDMA (%u) engine error: 0x%llx state %s\n",
- sde->this_idx,
- (unsigned long long)status,
- sdma_state_names[sde->state.current_state]);
- dump_sdma_state(sde);
- }
- write_sequnlock(&sde->head_lock);
- spin_unlock_irqrestore(&sde->tail_lock, flags);
-}
-
-static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
-{
- u64 set_senddmactrl = 0;
- u64 clr_senddmactrl = 0;
- unsigned long flags;
-
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
- sde->this_idx,
- (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
- (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
- (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
- (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
-#endif
-
- if (op & SDMA_SENDCTRL_OP_ENABLE)
- set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
- else
- clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
-
- if (op & SDMA_SENDCTRL_OP_INTENABLE)
- set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
- else
- clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
-
- if (op & SDMA_SENDCTRL_OP_HALT)
- set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
- else
- clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
-
- spin_lock_irqsave(&sde->senddmactrl_lock, flags);
-
- sde->p_senddmactrl |= set_senddmactrl;
- sde->p_senddmactrl &= ~clr_senddmactrl;
-
- if (op & SDMA_SENDCTRL_OP_CLEANUP)
- write_sde_csr(sde, SD(CTRL),
- sde->p_senddmactrl |
- SD(CTRL_SDMA_CLEANUP_SMASK));
- else
- write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
-
- spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
-
-#ifdef CONFIG_SDMA_VERBOSITY
- sdma_dumpstate(sde);
-#endif
-}
-
-static void sdma_setlengen(struct sdma_engine *sde)
-{
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
- sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
-#endif
-
- /*
- * Set SendDmaLenGen and clear-then-set the MSB of the generation
- * count to enable generation checking and load the internal
- * generation counter.
- */
- write_sde_csr(sde, SD(LEN_GEN),
- (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
- write_sde_csr(sde, SD(LEN_GEN),
- ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
- (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
-}
-
-static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
-{
- /* Commit writes to memory and advance the tail on the chip */
- smp_wmb(); /* see get_txhead() */
- writeq(tail, sde->tail_csr);
-}
-
-/*
- * This is called when changing to state s10_hw_start_up_halt_wait as
- * a result of send buffer errors or send DMA descriptor errors.
- */
-static void sdma_hw_start_up(struct sdma_engine *sde)
-{
- u64 reg;
-
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
- sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
-#endif
-
- sdma_setlengen(sde);
- sdma_update_tail(sde, 0); /* Set SendDmaTail */
- *sde->head_dma = 0;
-
- reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
- SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
- write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
-}
-
-#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
-(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-
-#define SET_STATIC_RATE_CONTROL_SMASK(r) \
-(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
-/*
- * set_sdma_integrity
- *
- * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
- */
-static void set_sdma_integrity(struct sdma_engine *sde)
-{
- struct hfi1_devdata *dd = sde->dd;
- u64 reg;
-
- if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
- return;
-
- reg = hfi1_pkt_base_sdma_integrity(dd);
-
- if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
- CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
- else
- SET_STATIC_RATE_CONTROL_SMASK(reg);
-
- write_sde_csr(sde, SD(CHECK_ENABLE), reg);
-}
-
-static void init_sdma_regs(
- struct sdma_engine *sde,
- u32 credits,
- uint idle_cnt)
-{
- u8 opval, opmask;
-#ifdef CONFIG_SDMA_VERBOSITY
- struct hfi1_devdata *dd = sde->dd;
-
- dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
- sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
-#endif
-
- write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
- sdma_setlengen(sde);
- sdma_update_tail(sde, 0); /* Set SendDmaTail */
- write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
- write_sde_csr(sde, SD(DESC_CNT), 0);
- write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
- write_sde_csr(sde, SD(MEMORY),
- ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
- ((u64)(credits * sde->this_idx) <<
- SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
- write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
- set_sdma_integrity(sde);
- opmask = OPCODE_CHECK_MASK_DISABLED;
- opval = OPCODE_CHECK_VAL_DISABLED;
- write_sde_csr(sde, SD(CHECK_OPCODE),
- (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
- (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
-}
-
-#ifdef CONFIG_SDMA_VERBOSITY
-
-#define sdma_dumpstate_helper0(reg) do { \
- csr = read_csr(sde->dd, reg); \
- dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
- } while (0)
-
-#define sdma_dumpstate_helper(reg) do { \
- csr = read_sde_csr(sde, reg); \
- dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
- #reg, sde->this_idx, csr); \
- } while (0)
-
-#define sdma_dumpstate_helper2(reg) do { \
- csr = read_csr(sde->dd, reg + (8 * i)); \
- dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
- #reg, i, csr); \
- } while (0)
-
-void sdma_dumpstate(struct sdma_engine *sde)
-{
- u64 csr;
- unsigned i;
-
- sdma_dumpstate_helper(SD(CTRL));
- sdma_dumpstate_helper(SD(STATUS));
- sdma_dumpstate_helper0(SD(ERR_STATUS));
- sdma_dumpstate_helper0(SD(ERR_MASK));
- sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
- sdma_dumpstate_helper(SD(ENG_ERR_MASK));
-
- for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
- sdma_dumpstate_helper2(CCE_INT_STATUS);
- sdma_dumpstate_helper2(CCE_INT_MASK);
- sdma_dumpstate_helper2(CCE_INT_BLOCKED);
- }
-
- sdma_dumpstate_helper(SD(TAIL));
- sdma_dumpstate_helper(SD(HEAD));
- sdma_dumpstate_helper(SD(PRIORITY_THLD));
- sdma_dumpstate_helper(SD(IDLE_CNT));
- sdma_dumpstate_helper(SD(RELOAD_CNT));
- sdma_dumpstate_helper(SD(DESC_CNT));
- sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
- sdma_dumpstate_helper(SD(MEMORY));
- sdma_dumpstate_helper0(SD(ENGINES));
- sdma_dumpstate_helper0(SD(MEM_SIZE));
- /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
- sdma_dumpstate_helper(SD(BASE_ADDR));
- sdma_dumpstate_helper(SD(LEN_GEN));
- sdma_dumpstate_helper(SD(HEAD_ADDR));
- sdma_dumpstate_helper(SD(CHECK_ENABLE));
- sdma_dumpstate_helper(SD(CHECK_VL));
- sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
- sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
- sdma_dumpstate_helper(SD(CHECK_SLID));
- sdma_dumpstate_helper(SD(CHECK_OPCODE));
-}
-#endif
-
-static void dump_sdma_state(struct sdma_engine *sde)
-{
- struct hw_sdma_desc *descq;
- struct hw_sdma_desc *descqp;
- u64 desc[2];
- u64 addr;
- u8 gen;
- u16 len;
- u16 head, tail, cnt;
-
- head = sde->descq_head & sde->sdma_mask;
- tail = sde->descq_tail & sde->sdma_mask;
- cnt = sdma_descq_freecnt(sde);
- descq = sde->descq;
-
- dd_dev_err(sde->dd,
- "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
- sde->this_idx, head, tail, cnt,
- !list_empty(&sde->flushlist));
-
- /* print info for each entry in the descriptor queue */
- while (head != tail) {
- char flags[6] = { 'x', 'x', 'x', 'x', 0 };
-
- descqp = &sde->descq[head];
- desc[0] = le64_to_cpu(descqp->qw[0]);
- desc[1] = le64_to_cpu(descqp->qw[1]);
- flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
- flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
- 'H' : '-';
- flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
- flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
- addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
- & SDMA_DESC0_PHY_ADDR_MASK;
- gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
- & SDMA_DESC1_GENERATION_MASK;
- len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
- & SDMA_DESC0_BYTE_COUNT_MASK;
- dd_dev_err(sde->dd,
- "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
- head, flags, addr, gen, len);
- dd_dev_err(sde->dd,
- "\tdesc0:0x%016llx desc1 0x%016llx\n",
- desc[0], desc[1]);
- if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
- dd_dev_err(sde->dd,
- "\taidx: %u amode: %u alen: %u\n",
- (u8)((desc[1] &
- SDMA_DESC1_HEADER_INDEX_SMASK) >>
- SDMA_DESC1_HEADER_INDEX_SHIFT),
- (u8)((desc[1] &
- SDMA_DESC1_HEADER_MODE_SMASK) >>
- SDMA_DESC1_HEADER_MODE_SHIFT),
- (u8)((desc[1] &
- SDMA_DESC1_HEADER_DWS_SMASK) >>
- SDMA_DESC1_HEADER_DWS_SHIFT));
- head++;
- head &= sde->sdma_mask;
- }
-}
-
-#define SDE_FMT \
- "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
-/**
- * sdma_seqfile_dump_sde() - debugfs dump of sde
- * @s: seq file
- * @sde: send dma engine to dump
- *
- * This routine dumps the sde to the indicated seq file.
- */
-void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
-{
- u16 head, tail;
- struct hw_sdma_desc *descqp;
- u64 desc[2];
- u64 addr;
- u8 gen;
- u16 len;
-
- head = sde->descq_head & sde->sdma_mask;
- tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
- seq_printf(s, SDE_FMT, sde->this_idx,
- sde->cpu,
- sdma_state_name(sde->state.current_state),
- (unsigned long long)read_sde_csr(sde, SD(CTRL)),
- (unsigned long long)read_sde_csr(sde, SD(STATUS)),
- (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
- (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
- (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
- (unsigned long long)le64_to_cpu(*sde->head_dma),
- (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
- (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
- (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
- (unsigned long long)sde->last_status,
- (unsigned long long)sde->ahg_bits,
- sde->tx_tail,
- sde->tx_head,
- sde->descq_tail,
- sde->descq_head,
- !list_empty(&sde->flushlist),
- sde->descq_full_count,
- (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
-
- /* print info for each entry in the descriptor queue */
- while (head != tail) {
- char flags[6] = { 'x', 'x', 'x', 'x', 0 };
-
- descqp = &sde->descq[head];
- desc[0] = le64_to_cpu(descqp->qw[0]);
- desc[1] = le64_to_cpu(descqp->qw[1]);
- flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
- flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
- 'H' : '-';
- flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
- flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
- addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
- & SDMA_DESC0_PHY_ADDR_MASK;
- gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
- & SDMA_DESC1_GENERATION_MASK;
- len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
- & SDMA_DESC0_BYTE_COUNT_MASK;
- seq_printf(s,
- "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
- head, flags, addr, gen, len);
- if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
- seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
- (u8)((desc[1] &
- SDMA_DESC1_HEADER_INDEX_SMASK) >>
- SDMA_DESC1_HEADER_INDEX_SHIFT),
- (u8)((desc[1] &
- SDMA_DESC1_HEADER_MODE_SMASK) >>
- SDMA_DESC1_HEADER_MODE_SHIFT));
- head = (head + 1) & sde->sdma_mask;
- }
-}
-
-/*
- * add the generation number into
- * the qw1 and return
- */
-static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
-{
- u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
-
- qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
- qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
- << SDMA_DESC1_GENERATION_SHIFT;
- return qw1;
-}
-
-/*
- * This routine submits the indicated tx
- *
- * Space has already been guaranteed and
- * tail side of ring is locked.
- *
- * The hardware tail update is done
- * in the caller and that is facilitated
- * by returning the new tail.
- *
- * There is special case logic for ahg
- * to not add the generation number for
- * up to 2 descriptors that follow the
- * first descriptor.
- *
- */
-static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
-{
- int i;
- u16 tail;
- struct sdma_desc *descp = tx->descp;
- u8 skip = 0, mode = ahg_mode(tx);
-
- tail = sde->descq_tail & sde->sdma_mask;
- sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
- sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
- trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
- tail, &sde->descq[tail]);
- tail = ++sde->descq_tail & sde->sdma_mask;
- descp++;
- if (mode > SDMA_AHG_APPLY_UPDATE1)
- skip = mode >> 1;
- for (i = 1; i < tx->num_desc; i++, descp++) {
- u64 qw1;
-
- sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
- if (skip) {
- /* edits don't have generation */
- qw1 = descp->qw[1];
- skip--;
- } else {
- /* replace generation with real one for non-edits */
- qw1 = add_gen(sde, descp->qw[1]);
- }
- sde->descq[tail].qw[1] = cpu_to_le64(qw1);
- trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
- tail, &sde->descq[tail]);
- tail = ++sde->descq_tail & sde->sdma_mask;
- }
- tx->next_descq_idx = tail;
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- tx->sn = sde->tail_sn++;
- trace_hfi1_sdma_in_sn(sde, tx->sn);
- WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
-#endif
- sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
- sde->desc_avail -= tx->num_desc;
- return tail;
-}
-
-/*
- * Check for progress
- */
-static int sdma_check_progress(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx)
-{
- int ret;
-
- sde->desc_avail = sdma_descq_freecnt(sde);
- if (tx->num_desc <= sde->desc_avail)
- return -EAGAIN;
- /* pulse the head_lock */
- if (wait && wait->sleep) {
- unsigned seq;
-
- seq = raw_seqcount_begin(
- (const seqcount_t *)&sde->head_lock.seqcount);
- ret = wait->sleep(sde, wait, tx, seq);
- if (ret == -EAGAIN)
- sde->desc_avail = sdma_descq_freecnt(sde);
- } else {
- ret = -EBUSY;
- }
- return ret;
-}
-
-/**
- * sdma_send_txreq() - submit a tx req to ring
- * @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
- * @tx: sdma_txreq to submit
- *
- * The call submits the tx into the ring. If a iowait structure is non-NULL
- * the packet will be queued to the list in wait.
- *
- * Return:
- * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
- * ring (wait == NULL)
- * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
- */
-int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx)
-{
- int ret = 0;
- u16 tail;
- unsigned long flags;
-
- /* user should have supplied entire packet */
- if (unlikely(tx->tlen))
- return -EINVAL;
- tx->wait = wait;
- spin_lock_irqsave(&sde->tail_lock, flags);
-retry:
- if (unlikely(!__sdma_running(sde)))
- goto unlock_noconn;
- if (unlikely(tx->num_desc > sde->desc_avail))
- goto nodesc;
- tail = submit_tx(sde, tx);
- if (wait)
- iowait_sdma_inc(wait);
- sdma_update_tail(sde, tail);
-unlock:
- spin_unlock_irqrestore(&sde->tail_lock, flags);
- return ret;
-unlock_noconn:
- if (wait)
- iowait_sdma_inc(wait);
- tx->next_descq_idx = 0;
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- tx->sn = sde->tail_sn++;
- trace_hfi1_sdma_in_sn(sde, tx->sn);
-#endif
- spin_lock(&sde->flushlist_lock);
- list_add_tail(&tx->list, &sde->flushlist);
- spin_unlock(&sde->flushlist_lock);
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
- schedule_work(&sde->flush_worker);
- ret = -ECOMM;
- goto unlock;
-nodesc:
- ret = sdma_check_progress(sde, wait, tx);
- if (ret == -EAGAIN) {
- ret = 0;
- goto retry;
- }
- sde->descq_full_count++;
- goto unlock;
-}
-
-/**
- * sdma_send_txlist() - submit a list of tx req to ring
- * @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
- * @tx_list: list of sdma_txreqs to submit
- *
- * The call submits the list into the ring.
- *
- * If the iowait structure is non-NULL and not equal to the iowait list
- * the unprocessed part of the list will be appended to the list in wait.
- *
- * In all cases, the tx_list will be updated so the head of the tx_list is
- * the list of descriptors that have yet to be transmitted.
- *
- * The intent of this call is to provide a more efficient
- * way of submitting multiple packets to SDMA while holding the tail
- * side locking.
- *
- * Return:
- * > 0 - Success (value is number of sdma_txreq's submitted),
- * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
- * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
- */
-int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
- struct list_head *tx_list)
-{
- struct sdma_txreq *tx, *tx_next;
- int ret = 0;
- unsigned long flags;
- u16 tail = INVALID_TAIL;
- int count = 0;
-
- spin_lock_irqsave(&sde->tail_lock, flags);
-retry:
- list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
- if (unlikely(!__sdma_running(sde)))
- goto unlock_noconn;
- if (unlikely(tx->num_desc > sde->desc_avail))
- goto nodesc;
- if (unlikely(tx->tlen)) {
- ret = -EINVAL;
- goto update_tail;
- }
- list_del_init(&tx->list);
- tail = submit_tx(sde, tx);
- count++;
- if (tail != INVALID_TAIL &&
- (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
- sdma_update_tail(sde, tail);
- tail = INVALID_TAIL;
- }
- }
-update_tail:
- if (wait)
- iowait_sdma_add(wait, count);
- if (tail != INVALID_TAIL)
- sdma_update_tail(sde, tail);
- spin_unlock_irqrestore(&sde->tail_lock, flags);
- return ret == 0 ? count : ret;
-unlock_noconn:
- spin_lock(&sde->flushlist_lock);
- list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
- list_del_init(&tx->list);
- if (wait)
- iowait_sdma_inc(wait);
- tx->next_descq_idx = 0;
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- tx->sn = sde->tail_sn++;
- trace_hfi1_sdma_in_sn(sde, tx->sn);
-#endif
- list_add_tail(&tx->list, &sde->flushlist);
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
- }
- spin_unlock(&sde->flushlist_lock);
- schedule_work(&sde->flush_worker);
- ret = -ECOMM;
- goto update_tail;
-nodesc:
- ret = sdma_check_progress(sde, wait, tx);
- if (ret == -EAGAIN) {
- ret = 0;
- goto retry;
- }
- sde->descq_full_count++;
- goto update_tail;
-}
-
-static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sde->tail_lock, flags);
- write_seqlock(&sde->head_lock);
-
- __sdma_process_event(sde, event);
-
- if (sde->state.current_state == sdma_state_s99_running)
- sdma_desc_avail(sde, sdma_descq_freecnt(sde));
-
- write_sequnlock(&sde->head_lock);
- spin_unlock_irqrestore(&sde->tail_lock, flags);
-}
-
-static void __sdma_process_event(struct sdma_engine *sde,
- enum sdma_events event)
-{
- struct sdma_state *ss = &sde->state;
- int need_progress = 0;
-
- /* CONFIG SDMA temporary */
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
- sdma_state_names[ss->current_state],
- sdma_event_names[event]);
-#endif
-
- switch (ss->current_state) {
- case sdma_state_s00_hw_down:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- break;
- case sdma_event_e30_go_running:
- /*
- * If down, but running requested (usually result
- * of link up, then we need to start up.
- * This can happen when hw down is requested while
- * bringing the link up with traffic active on
- * 7220, e.g.
- */
- ss->go_s99_running = 1;
- /* fall through and start dma engine */
- case sdma_event_e10_go_hw_start:
- /* This reference means the state machine is started */
- sdma_get(&sde->state);
- sdma_set_state(sde,
- sdma_state_s10_hw_start_up_halt_wait);
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e40_sw_cleaned:
- sdma_sw_tear_down(sde);
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- break;
- case sdma_event_e70_go_idle:
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s10_hw_start_up_halt_wait:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_sw_tear_down(sde);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- sdma_set_state(sde,
- sdma_state_s15_hw_start_up_clean_wait);
- sdma_start_hw_clean_up(sde);
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- schedule_work(&sde->err_halt_worker);
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s15_hw_start_up_clean_wait:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_sw_tear_down(sde);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- sdma_hw_start_up(sde);
- sdma_set_state(sde, ss->go_s99_running ?
- sdma_state_s99_running :
- sdma_state_s20_idle);
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s20_idle:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- sdma_sw_tear_down(sde);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- sdma_set_state(sde, sdma_state_s99_running);
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
- schedule_work(&sde->err_halt_worker);
- break;
- case sdma_event_e70_go_idle:
- break;
- case sdma_event_e85_link_down:
- /* fall through */
- case sdma_event_e80_hw_freeze:
- sdma_set_state(sde, sdma_state_s80_hw_freeze);
- atomic_dec(&sde->dd->sdma_unfreeze_count);
- wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s30_sw_clean_up_wait:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
- sdma_start_hw_clean_up(sde);
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s40_hw_clean_up_wait:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- sdma_hw_start_up(sde);
- sdma_set_state(sde, ss->go_s99_running ?
- sdma_state_s99_running :
- sdma_state_s20_idle);
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s50_hw_halt_wait:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- schedule_work(&sde->err_halt_worker);
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s60_idle_halt_wait:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- schedule_work(&sde->err_halt_worker);
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s80_hw_freeze:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- case sdma_event_e85_link_down:
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s82_freeze_sw_clean:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case sdma_event_e40_sw_cleaned:
- /* notify caller this engine is done cleaning */
- atomic_dec(&sde->dd->sdma_unfreeze_count);
- wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- break;
- case sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case sdma_event_e80_hw_freeze:
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- sdma_hw_start_up(sde);
- sdma_set_state(sde, ss->go_s99_running ?
- sdma_state_s99_running :
- sdma_state_s20_idle);
- break;
- case sdma_event_e85_link_down:
- break;
- case sdma_event_e90_sw_halted:
- break;
- }
- break;
-
- case sdma_state_s99_running:
- switch (event) {
- case sdma_event_e00_go_hw_down:
- sdma_set_state(sde, sdma_state_s00_hw_down);
- tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
- break;
- case sdma_event_e10_go_hw_start:
- break;
- case sdma_event_e15_hw_halt_done:
- break;
- case sdma_event_e25_hw_clean_up_done:
- break;
- case sdma_event_e30_go_running:
- break;
- case sdma_event_e40_sw_cleaned:
- break;
- case sdma_event_e50_hw_cleaned:
- break;
- case sdma_event_e60_hw_halted:
- need_progress = 1;
- sdma_err_progress_check_schedule(sde);
- case sdma_event_e90_sw_halted:
- /*
- * SW initiated halt does not perform engines
- * progress check
- */
- sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
- schedule_work(&sde->err_halt_worker);
- break;
- case sdma_event_e70_go_idle:
- sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
- break;
- case sdma_event_e85_link_down:
- ss->go_s99_running = 0;
- /* fall through */
- case sdma_event_e80_hw_freeze:
- sdma_set_state(sde, sdma_state_s80_hw_freeze);
- atomic_dec(&sde->dd->sdma_unfreeze_count);
- wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
- break;
- case sdma_event_e81_hw_frozen:
- break;
- case sdma_event_e82_hw_unfreeze:
- break;
- }
- break;
- }
-
- ss->last_event = event;
- if (need_progress)
- sdma_make_progress(sde, 0);
-}
-
-/*
- * _extend_sdma_tx_descs() - helper to extend txreq
- *
- * This is called once the initial nominal allocation
- * of descriptors in the sdma_txreq is exhausted.
- *
- * The code will bump the allocation up to the max
- * of MAX_DESC (64) descriptors. There doesn't seem
- * much point in an interim step. The last descriptor
- * is reserved for coalesce buffer in order to support
- * cases where input packet has >MAX_DESC iovecs.
- *
- */
-static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
-{
- int i;
-
- /* Handle last descriptor */
- if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
- /* if tlen is 0, it is for padding, release last descriptor */
- if (!tx->tlen) {
- tx->desc_limit = MAX_DESC;
- } else if (!tx->coalesce_buf) {
- /* allocate coalesce buffer with space for padding */
- tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
- GFP_ATOMIC);
- if (!tx->coalesce_buf)
- goto enomem;
- tx->coalesce_idx = 0;
- }
- return 0;
- }
-
- if (unlikely(tx->num_desc == MAX_DESC))
- goto enomem;
-
- tx->descp = kmalloc_array(
- MAX_DESC,
- sizeof(struct sdma_desc),
- GFP_ATOMIC);
- if (!tx->descp)
- goto enomem;
-
- /* reserve last descriptor for coalescing */
- tx->desc_limit = MAX_DESC - 1;
- /* copy ones already built */
- for (i = 0; i < tx->num_desc; i++)
- tx->descp[i] = tx->descs[i];
- return 0;
-enomem:
- sdma_txclean(dd, tx);
- return -ENOMEM;
-}
-
-/*
- * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
- *
- * This is called once the initial nominal allocation of descriptors
- * in the sdma_txreq is exhausted.
- *
- * This function calls _extend_sdma_tx_descs to extend or allocate
- * coalesce buffer. If there is a allocated coalesce buffer, it will
- * copy the input packet data into the coalesce buffer. It also adds
- * coalesce buffer descriptor once when whole packet is received.
- *
- * Return:
- * <0 - error
- * 0 - coalescing, don't populate descriptor
- * 1 - continue with populating descriptor
- */
-int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
- int type, void *kvaddr, struct page *page,
- unsigned long offset, u16 len)
-{
- int pad_len, rval;
- dma_addr_t addr;
-
- rval = _extend_sdma_tx_descs(dd, tx);
- if (rval) {
- sdma_txclean(dd, tx);
- return rval;
- }
-
- /* If coalesce buffer is allocated, copy data into it */
- if (tx->coalesce_buf) {
- if (type == SDMA_MAP_NONE) {
- sdma_txclean(dd, tx);
- return -EINVAL;
- }
-
- if (type == SDMA_MAP_PAGE) {
- kvaddr = kmap(page);
- kvaddr += offset;
- } else if (WARN_ON(!kvaddr)) {
- sdma_txclean(dd, tx);
- return -EINVAL;
- }
-
- memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
- tx->coalesce_idx += len;
- if (type == SDMA_MAP_PAGE)
- kunmap(page);
-
- /* If there is more data, return */
- if (tx->tlen - tx->coalesce_idx)
- return 0;
-
- /* Whole packet is received; add any padding */
- pad_len = tx->packet_len & (sizeof(u32) - 1);
- if (pad_len) {
- pad_len = sizeof(u32) - pad_len;
- memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
- /* padding is taken care of for coalescing case */
- tx->packet_len += pad_len;
- tx->tlen += pad_len;
- }
-
- /* dma map the coalesce buffer */
- addr = dma_map_single(&dd->pcidev->dev,
- tx->coalesce_buf,
- tx->tlen,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
- return -ENOSPC;
- }
-
- /* Add descriptor for coalesce buffer */
- tx->desc_limit = MAX_DESC;
- return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
- addr, tx->tlen);
- }
-
- return 1;
-}
-
-/* Update sdes when the lmc changes */
-void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
-{
- struct sdma_engine *sde;
- int i;
- u64 sreg;
-
- sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
- SD(CHECK_SLID_MASK_SHIFT)) |
- (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
- SD(CHECK_SLID_VALUE_SHIFT));
-
- for (i = 0; i < dd->num_sdma; i++) {
- hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
- i, (u32)sreg);
- sde = &dd->per_sdma[i];
- write_sde_csr(sde, SD(CHECK_SLID), sreg);
- }
-}
-
-/* tx not dword sized - pad */
-int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
-{
- int rval = 0;
-
- tx->num_desc++;
- if ((unlikely(tx->num_desc == tx->desc_limit))) {
- rval = _extend_sdma_tx_descs(dd, tx);
- if (rval) {
- sdma_txclean(dd, tx);
- return rval;
- }
- }
- /* finish the one just added */
- make_tx_sdma_desc(
- tx,
- SDMA_MAP_NONE,
- dd->sdma_pad_phys,
- sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
- _sdma_close_tx(dd, tx);
- return rval;
-}
-
-/*
- * Add ahg to the sdma_txreq
- *
- * The logic will consume up to 3
- * descriptors at the beginning of
- * sdma_txreq.
- */
-void _sdma_txreq_ahgadd(
- struct sdma_txreq *tx,
- u8 num_ahg,
- u8 ahg_entry,
- u32 *ahg,
- u8 ahg_hlen)
-{
- u32 i, shift = 0, desc = 0;
- u8 mode;
-
- WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
- /* compute mode */
- if (num_ahg == 1)
- mode = SDMA_AHG_APPLY_UPDATE1;
- else if (num_ahg <= 5)
- mode = SDMA_AHG_APPLY_UPDATE2;
- else
- mode = SDMA_AHG_APPLY_UPDATE3;
- tx->num_desc++;
- /* initialize to consumed descriptors to zero */
- switch (mode) {
- case SDMA_AHG_APPLY_UPDATE3:
- tx->num_desc++;
- tx->descs[2].qw[0] = 0;
- tx->descs[2].qw[1] = 0;
- /* FALLTHROUGH */
- case SDMA_AHG_APPLY_UPDATE2:
- tx->num_desc++;
- tx->descs[1].qw[0] = 0;
- tx->descs[1].qw[1] = 0;
- break;
- }
- ahg_hlen >>= 2;
- tx->descs[0].qw[1] |=
- (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
- << SDMA_DESC1_HEADER_INDEX_SHIFT) |
- (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
- << SDMA_DESC1_HEADER_DWS_SHIFT) |
- (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
- << SDMA_DESC1_HEADER_MODE_SHIFT) |
- (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
- << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
- for (i = 0; i < (num_ahg - 1); i++) {
- if (!shift && !(i & 2))
- desc++;
- tx->descs[desc].qw[!!(i & 2)] |=
- (((u64)ahg[i + 1])
- << shift);
- shift = (shift + 32) & 63;
- }
-}
-
-/**
- * sdma_ahg_alloc - allocate an AHG entry
- * @sde: engine to allocate from
- *
- * Return:
- * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
- * -ENOSPC if an entry is not available
- */
-int sdma_ahg_alloc(struct sdma_engine *sde)
-{
- int nr;
- int oldbit;
-
- if (!sde) {
- trace_hfi1_ahg_allocate(sde, -EINVAL);
- return -EINVAL;
- }
- while (1) {
- nr = ffz(ACCESS_ONCE(sde->ahg_bits));
- if (nr > 31) {
- trace_hfi1_ahg_allocate(sde, -ENOSPC);
- return -ENOSPC;
- }
- oldbit = test_and_set_bit(nr, &sde->ahg_bits);
- if (!oldbit)
- break;
- cpu_relax();
- }
- trace_hfi1_ahg_allocate(sde, nr);
- return nr;
-}
-
-/**
- * sdma_ahg_free - free an AHG entry
- * @sde: engine to return AHG entry
- * @ahg_index: index to free
- *
- * This routine frees the indicate AHG entry.
- */
-void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
-{
- if (!sde)
- return;
- trace_hfi1_ahg_deallocate(sde, ahg_index);
- if (ahg_index < 0 || ahg_index > 31)
- return;
- clear_bit(ahg_index, &sde->ahg_bits);
-}
-
-/*
- * SPC freeze handling for SDMA engines. Called when the driver knows
- * the SPC is going into a freeze but before the freeze is fully
- * settled. Generally an error interrupt.
- *
- * This event will pull the engine out of running so no more entries can be
- * added to the engine's queue.
- */
-void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
-{
- int i;
- enum sdma_events event = link_down ? sdma_event_e85_link_down :
- sdma_event_e80_hw_freeze;
-
- /* set up the wait but do not wait here */
- atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
-
- /* tell all engines to stop running and wait */
- for (i = 0; i < dd->num_sdma; i++)
- sdma_process_event(&dd->per_sdma[i], event);
-
- /* sdma_freeze() will wait for all engines to have stopped */
-}
-
-/*
- * SPC freeze handling for SDMA engines. Called when the driver knows
- * the SPC is fully frozen.
- */
-void sdma_freeze(struct hfi1_devdata *dd)
-{
- int i;
- int ret;
-
- /*
- * Make sure all engines have moved out of the running state before
- * continuing.
- */
- ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
- atomic_read(&dd->sdma_unfreeze_count) <=
- 0);
- /* interrupted or count is negative, then unloading - just exit */
- if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
- return;
-
- /* set up the count for the next wait */
- atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
-
- /* tell all engines that the SPC is frozen, they can start cleaning */
- for (i = 0; i < dd->num_sdma; i++)
- sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
-
- /*
- * Wait for everyone to finish software clean before exiting. The
- * software clean will read engine CSRs, so must be completed before
- * the next step, which will clear the engine CSRs.
- */
- (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
- atomic_read(&dd->sdma_unfreeze_count) <= 0);
- /* no need to check results - done no matter what */
-}
-
-/*
- * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
- *
- * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
- * that is left is a software clean. We could do it after the SPC is fully
- * frozen, but then we'd have to add another state to wait for the unfreeze.
- * Instead, just defer the software clean until the unfreeze step.
- */
-void sdma_unfreeze(struct hfi1_devdata *dd)
-{
- int i;
-
- /* tell all engines start freeze clean up */
- for (i = 0; i < dd->num_sdma; i++)
- sdma_process_event(&dd->per_sdma[i],
- sdma_event_e82_hw_unfreeze);
-}
-
-/**
- * _sdma_engine_progress_schedule() - schedule progress on engine
- * @sde: sdma_engine to schedule progress
- *
- */
-void _sdma_engine_progress_schedule(
- struct sdma_engine *sde)
-{
- trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
- /* assume we have selected a good cpu */
- write_csr(sde->dd,
- CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
- sde->progress_mask);
-}
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h
deleted file mode 100644
index 8f50c99fe..000000000
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ /dev/null
@@ -1,1082 +0,0 @@
-#ifndef _HFI1_SDMA_H
-#define _HFI1_SDMA_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <asm/byteorder.h>
-#include <linux/workqueue.h>
-#include <linux/rculist.h>
-
-#include "hfi.h"
-#include "verbs.h"
-#include "sdma_txreq.h"
-
-/* Hardware limit */
-#define MAX_DESC 64
-/* Hardware limit for SDMA packet size */
-#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
-
-#define SDMA_TXREQ_S_OK 0
-#define SDMA_TXREQ_S_SENDERROR 1
-#define SDMA_TXREQ_S_ABORTED 2
-#define SDMA_TXREQ_S_SHUTDOWN 3
-
-/* flags bits */
-#define SDMA_TXREQ_F_URGENT 0x0001
-#define SDMA_TXREQ_F_AHG_COPY 0x0002
-#define SDMA_TXREQ_F_USE_AHG 0x0004
-
-#define SDMA_MAP_NONE 0
-#define SDMA_MAP_SINGLE 1
-#define SDMA_MAP_PAGE 2
-
-#define SDMA_AHG_VALUE_MASK 0xffff
-#define SDMA_AHG_VALUE_SHIFT 0
-#define SDMA_AHG_INDEX_MASK 0xf
-#define SDMA_AHG_INDEX_SHIFT 16
-#define SDMA_AHG_FIELD_LEN_MASK 0xf
-#define SDMA_AHG_FIELD_LEN_SHIFT 20
-#define SDMA_AHG_FIELD_START_MASK 0x1f
-#define SDMA_AHG_FIELD_START_SHIFT 24
-#define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
-#define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
-
-/* AHG modes */
-
-/*
- * Be aware the ordering and values
- * for SDMA_AHG_APPLY_UPDATE[123]
- * are assumed in generating a skip
- * count in submit_tx() in sdma.c
- */
-#define SDMA_AHG_NO_AHG 0
-#define SDMA_AHG_COPY 1
-#define SDMA_AHG_APPLY_UPDATE1 2
-#define SDMA_AHG_APPLY_UPDATE2 3
-#define SDMA_AHG_APPLY_UPDATE3 4
-
-/*
- * Bits defined in the send DMA descriptor.
- */
-#define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
-#define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
-#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
-#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
-#define SDMA_DESC0_BYTE_COUNT_MASK \
- ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
-#define SDMA_DESC0_BYTE_COUNT_SMASK \
- (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
-#define SDMA_DESC0_PHY_ADDR_SHIFT 0
-#define SDMA_DESC0_PHY_ADDR_WIDTH 48
-#define SDMA_DESC0_PHY_ADDR_MASK \
- ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
-#define SDMA_DESC0_PHY_ADDR_SMASK \
- (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
-
-#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
-#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
-#define SDMA_DESC1_HEADER_UPDATE1_MASK \
- ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
-#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
- (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
-#define SDMA_DESC1_HEADER_MODE_SHIFT 13
-#define SDMA_DESC1_HEADER_MODE_WIDTH 3
-#define SDMA_DESC1_HEADER_MODE_MASK \
- ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
-#define SDMA_DESC1_HEADER_MODE_SMASK \
- (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
-#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
-#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
-#define SDMA_DESC1_HEADER_INDEX_MASK \
- ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
-#define SDMA_DESC1_HEADER_INDEX_SMASK \
- (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
-#define SDMA_DESC1_HEADER_DWS_SHIFT 4
-#define SDMA_DESC1_HEADER_DWS_WIDTH 4
-#define SDMA_DESC1_HEADER_DWS_MASK \
- ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
-#define SDMA_DESC1_HEADER_DWS_SMASK \
- (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
-#define SDMA_DESC1_GENERATION_SHIFT 2
-#define SDMA_DESC1_GENERATION_WIDTH 2
-#define SDMA_DESC1_GENERATION_MASK \
- ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
-#define SDMA_DESC1_GENERATION_SMASK \
- (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
-#define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
-#define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
-
-enum sdma_states {
- sdma_state_s00_hw_down,
- sdma_state_s10_hw_start_up_halt_wait,
- sdma_state_s15_hw_start_up_clean_wait,
- sdma_state_s20_idle,
- sdma_state_s30_sw_clean_up_wait,
- sdma_state_s40_hw_clean_up_wait,
- sdma_state_s50_hw_halt_wait,
- sdma_state_s60_idle_halt_wait,
- sdma_state_s80_hw_freeze,
- sdma_state_s82_freeze_sw_clean,
- sdma_state_s99_running,
-};
-
-enum sdma_events {
- sdma_event_e00_go_hw_down,
- sdma_event_e10_go_hw_start,
- sdma_event_e15_hw_halt_done,
- sdma_event_e25_hw_clean_up_done,
- sdma_event_e30_go_running,
- sdma_event_e40_sw_cleaned,
- sdma_event_e50_hw_cleaned,
- sdma_event_e60_hw_halted,
- sdma_event_e70_go_idle,
- sdma_event_e80_hw_freeze,
- sdma_event_e81_hw_frozen,
- sdma_event_e82_hw_unfreeze,
- sdma_event_e85_link_down,
- sdma_event_e90_sw_halted,
-};
-
-struct sdma_set_state_action {
- unsigned op_enable:1;
- unsigned op_intenable:1;
- unsigned op_halt:1;
- unsigned op_cleanup:1;
- unsigned go_s99_running_tofalse:1;
- unsigned go_s99_running_totrue:1;
-};
-
-struct sdma_state {
- struct kref kref;
- struct completion comp;
- enum sdma_states current_state;
- unsigned current_op;
- unsigned go_s99_running;
- /* debugging/development */
- enum sdma_states previous_state;
- unsigned previous_op;
- enum sdma_events last_event;
-};
-
-/**
- * DOC: sdma exported routines
- *
- * These sdma routines fit into three categories:
- * - The SDMA API for building and submitting packets
- * to the ring
- *
- * - Initialization and tear down routines to buildup
- * and tear down SDMA
- *
- * - ISR entrances to handle interrupts, state changes
- * and errors
- */
-
-/**
- * DOC: sdma PSM/verbs API
- *
- * The sdma API is designed to be used by both PSM
- * and verbs to supply packets to the SDMA ring.
- *
- * The usage of the API is as follows:
- *
- * Embed a struct iowait in the QP or
- * PQ. The iowait should be initialized with a
- * call to iowait_init().
- *
- * The user of the API should create an allocation method
- * for their version of the txreq. slabs, pre-allocated lists,
- * and dma pools can be used. Once the user's overload of
- * the sdma_txreq has been allocated, the sdma_txreq member
- * must be initialized with sdma_txinit() or sdma_txinit_ahg().
- *
- * The txreq must be declared with the sdma_txreq first.
- *
- * The tx request, once initialized, is manipulated with calls to
- * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
- * for each disjoint memory location. It is the user's responsibility
- * to understand the packet boundaries and page boundaries to do the
- * appropriate number of sdma_txadd_* calls.. The user
- * must be prepared to deal with failures from these routines due to
- * either memory allocation or dma_mapping failures.
- *
- * The mapping specifics for each memory location are recorded
- * in the tx. Memory locations added with sdma_txadd_page()
- * and sdma_txadd_kvaddr() are automatically mapped when added
- * to the tx and nmapped as part of the progress processing in the
- * SDMA interrupt handling.
- *
- * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
- * tx. An example of a use case would be a pre-allocated
- * set of headers allocated via dma_pool_alloc() or
- * dma_alloc_coherent(). For these memory locations, it
- * is the responsibility of the user to handle that unmapping.
- * (This would usually be at an unload or job termination.)
- *
- * The routine sdma_send_txreq() is used to submit
- * a tx to the ring after the appropriate number of
- * sdma_txadd_* have been done.
- *
- * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
- * can be used to submit a list of packets.
- *
- * The user is free to use the link overhead in the struct sdma_txreq as
- * long as the tx isn't in flight.
- *
- * The extreme degenerate case of the number of descriptors
- * exceeding the ring size is automatically handled as
- * memory locations are added. An overflow of the descriptor
- * array that is part of the sdma_txreq is also automatically
- * handled.
- *
- */
-
-/**
- * DOC: Infrastructure calls
- *
- * sdma_init() is used to initialize data structures and
- * CSRs for the desired number of SDMA engines.
- *
- * sdma_start() is used to kick the SDMA engines initialized
- * with sdma_init(). Interrupts must be enabled at this
- * point since aspects of the state machine are interrupt
- * driven.
- *
- * sdma_engine_error() and sdma_engine_interrupt() are
- * entrances for interrupts.
- *
- * sdma_map_init() is for the management of the mapping
- * table when the number of vls is changed.
- *
- */
-
-/*
- * struct hw_sdma_desc - raw 128 bit SDMA descriptor
- *
- * This is the raw descriptor in the SDMA ring
- */
-struct hw_sdma_desc {
- /* private: don't use directly */
- __le64 qw[2];
-};
-
-/**
- * struct sdma_engine - Data pertaining to each SDMA engine.
- * @dd: a back-pointer to the device data
- * @ppd: per port back-pointer
- * @imask: mask for irq manipulation
- * @idle_mask: mask for determining if an interrupt is due to sdma_idle
- *
- * This structure has the state for each sdma_engine.
- *
- * Accessing to non public fields are not supported
- * since the private members are subject to change.
- */
-struct sdma_engine {
- /* read mostly */
- struct hfi1_devdata *dd;
- struct hfi1_pportdata *ppd;
- /* private: */
- void __iomem *tail_csr;
- u64 imask; /* clear interrupt mask */
- u64 idle_mask;
- u64 progress_mask;
- u64 int_mask;
- /* private: */
- volatile __le64 *head_dma; /* DMA'ed by chip */
- /* private: */
- dma_addr_t head_phys;
- /* private: */
- struct hw_sdma_desc *descq;
- /* private: */
- unsigned descq_full_count;
- struct sdma_txreq **tx_ring;
- /* private: */
- dma_addr_t descq_phys;
- /* private */
- u32 sdma_mask;
- /* private */
- struct sdma_state state;
- /* private */
- int cpu;
- /* private: */
- u8 sdma_shift;
- /* private: */
- u8 this_idx; /* zero relative engine */
- /* protect changes to senddmactrl shadow */
- spinlock_t senddmactrl_lock;
- /* private: */
- u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
-
- /* read/write using tail_lock */
- spinlock_t tail_lock ____cacheline_aligned_in_smp;
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- /* private: */
- u64 tail_sn;
-#endif
- /* private: */
- u32 descq_tail;
- /* private: */
- unsigned long ahg_bits;
- /* private: */
- u16 desc_avail;
- /* private: */
- u16 tx_tail;
- /* private: */
- u16 descq_cnt;
-
- /* read/write using head_lock */
- /* private: */
- seqlock_t head_lock ____cacheline_aligned_in_smp;
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- /* private: */
- u64 head_sn;
-#endif
- /* private: */
- u32 descq_head;
- /* private: */
- u16 tx_head;
- /* private: */
- u64 last_status;
- /* private */
- u64 err_cnt;
- /* private */
- u64 sdma_int_cnt;
- u64 idle_int_cnt;
- u64 progress_int_cnt;
-
- /* private: */
- struct list_head dmawait;
-
- /* CONFIG SDMA for now, just blindly duplicate */
- /* private: */
- struct tasklet_struct sdma_hw_clean_up_task
- ____cacheline_aligned_in_smp;
-
- /* private: */
- struct tasklet_struct sdma_sw_clean_up_task
- ____cacheline_aligned_in_smp;
- /* private: */
- struct work_struct err_halt_worker;
- /* private */
- struct timer_list err_progress_check_timer;
- u32 progress_check_head;
- /* private: */
- struct work_struct flush_worker;
- /* protect flush list */
- spinlock_t flushlist_lock;
- /* private: */
- struct list_head flushlist;
-};
-
-int sdma_init(struct hfi1_devdata *dd, u8 port);
-void sdma_start(struct hfi1_devdata *dd);
-void sdma_exit(struct hfi1_devdata *dd);
-void sdma_all_running(struct hfi1_devdata *dd);
-void sdma_all_idle(struct hfi1_devdata *dd);
-void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
-void sdma_freeze(struct hfi1_devdata *dd);
-void sdma_unfreeze(struct hfi1_devdata *dd);
-void sdma_wait(struct hfi1_devdata *dd);
-
-/**
- * sdma_empty() - idle engine test
- * @engine: sdma engine
- *
- * Currently used by verbs as a latency optimization.
- *
- * Return:
- * 1 - empty, 0 - non-empty
- */
-static inline int sdma_empty(struct sdma_engine *sde)
-{
- return sde->descq_tail == sde->descq_head;
-}
-
-static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
-{
- return sde->descq_cnt -
- (sde->descq_tail -
- ACCESS_ONCE(sde->descq_head)) - 1;
-}
-
-static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
-{
- return sde->descq_cnt - sdma_descq_freecnt(sde);
-}
-
-/*
- * Either head_lock or tail lock required to see
- * a steady state.
- */
-static inline int __sdma_running(struct sdma_engine *engine)
-{
- return engine->state.current_state == sdma_state_s99_running;
-}
-
-/**
- * sdma_running() - state suitability test
- * @engine: sdma engine
- *
- * sdma_running probes the internal state to determine if it is suitable
- * for submitting packets.
- *
- * Return:
- * 1 - ok to submit, 0 - not ok to submit
- *
- */
-static inline int sdma_running(struct sdma_engine *engine)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&engine->tail_lock, flags);
- ret = __sdma_running(engine);
- spin_unlock_irqrestore(&engine->tail_lock, flags);
- return ret;
-}
-
-void _sdma_txreq_ahgadd(
- struct sdma_txreq *tx,
- u8 num_ahg,
- u8 ahg_entry,
- u32 *ahg,
- u8 ahg_hlen);
-
-/**
- * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
- * @tx: tx request to initialize
- * @flags: flags to key last descriptor additions
- * @tlen: total packet length (pbc + headers + data)
- * @ahg_entry: ahg entry to use (0 - 31)
- * @num_ahg: ahg descriptor for first descriptor (0 - 9)
- * @ahg: array of AHG descriptors (up to 9 entries)
- * @ahg_hlen: number of bytes from ASIC entry to use
- * @cb: callback
- *
- * The allocation of the sdma_txreq and it enclosing structure is user
- * dependent. This routine must be called to initialize the user independent
- * fields.
- *
- * The currently supported flags are SDMA_TXREQ_F_URGENT,
- * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
- *
- * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
- * completion is desired as soon as possible.
- *
- * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
- * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
- * the AHG descriptors into the first 1 to 3 descriptors.
- *
- * Completions of submitted requests can be gotten on selected
- * txreqs by giving a completion routine callback to sdma_txinit() or
- * sdma_txinit_ahg(). The environment in which the callback runs
- * can be from an ISR, a tasklet, or a thread, so no sleeping
- * kernel routines can be used. Aspects of the sdma ring may
- * be locked so care should be taken with locking.
- *
- * The callback pointer can be NULL to avoid any callback for the packet
- * being submitted. The callback will be provided this tx, a status, and a flag.
- *
- * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
- * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
- *
- * The flag, if the is the iowait had been used, indicates the iowait
- * sdma_busy count has reached zero.
- *
- * user data portion of tlen should be precise. The sdma_txadd_* entrances
- * will pad with a descriptor references 1 - 3 bytes when the number of bytes
- * specified in tlen have been supplied to the sdma_txreq.
- *
- * ahg_hlen is used to determine the number of on-chip entry bytes to
- * use as the header. This is for cases where the stored header is
- * larger than the header to be used in a packet. This is typical
- * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
- * and RDMA_WRITE_MIDDLE.
- *
- */
-static inline int sdma_txinit_ahg(
- struct sdma_txreq *tx,
- u16 flags,
- u16 tlen,
- u8 ahg_entry,
- u8 num_ahg,
- u32 *ahg,
- u8 ahg_hlen,
- void (*cb)(struct sdma_txreq *, int))
-{
- if (tlen == 0)
- return -ENODATA;
- if (tlen > MAX_SDMA_PKT_SIZE)
- return -EMSGSIZE;
- tx->desc_limit = ARRAY_SIZE(tx->descs);
- tx->descp = &tx->descs[0];
- INIT_LIST_HEAD(&tx->list);
- tx->num_desc = 0;
- tx->flags = flags;
- tx->complete = cb;
- tx->coalesce_buf = NULL;
- tx->wait = NULL;
- tx->packet_len = tlen;
- tx->tlen = tx->packet_len;
- tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
- tx->descs[0].qw[1] = 0;
- if (flags & SDMA_TXREQ_F_AHG_COPY)
- tx->descs[0].qw[1] |=
- (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
- << SDMA_DESC1_HEADER_INDEX_SHIFT) |
- (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
- << SDMA_DESC1_HEADER_MODE_SHIFT);
- else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
- _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
- return 0;
-}
-
-/**
- * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
- * @tx: tx request to initialize
- * @flags: flags to key last descriptor additions
- * @tlen: total packet length (pbc + headers + data)
- * @cb: callback pointer
- *
- * The allocation of the sdma_txreq and it enclosing structure is user
- * dependent. This routine must be called to initialize the user
- * independent fields.
- *
- * The currently supported flags is SDMA_TXREQ_F_URGENT.
- *
- * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
- * completion is desired as soon as possible.
- *
- * Completions of submitted requests can be gotten on selected
- * txreqs by giving a completion routine callback to sdma_txinit() or
- * sdma_txinit_ahg(). The environment in which the callback runs
- * can be from an ISR, a tasklet, or a thread, so no sleeping
- * kernel routines can be used. The head size of the sdma ring may
- * be locked so care should be taken with locking.
- *
- * The callback pointer can be NULL to avoid any callback for the packet
- * being submitted.
- *
- * The callback, if non-NULL, will be provided this tx and a status. The
- * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
- * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
- *
- */
-static inline int sdma_txinit(
- struct sdma_txreq *tx,
- u16 flags,
- u16 tlen,
- void (*cb)(struct sdma_txreq *, int))
-{
- return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
-}
-
-/* helpers - don't use */
-static inline int sdma_mapping_type(struct sdma_desc *d)
-{
- return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
- >> SDMA_DESC1_GENERATION_SHIFT;
-}
-
-static inline size_t sdma_mapping_len(struct sdma_desc *d)
-{
- return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
- >> SDMA_DESC0_BYTE_COUNT_SHIFT;
-}
-
-static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
-{
- return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
- >> SDMA_DESC0_PHY_ADDR_SHIFT;
-}
-
-static inline void make_tx_sdma_desc(
- struct sdma_txreq *tx,
- int type,
- dma_addr_t addr,
- size_t len)
-{
- struct sdma_desc *desc = &tx->descp[tx->num_desc];
-
- if (!tx->num_desc) {
- /* qw[0] zero; qw[1] first, ahg mode already in from init */
- desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
- << SDMA_DESC1_GENERATION_SHIFT;
- } else {
- desc->qw[0] = 0;
- desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
- << SDMA_DESC1_GENERATION_SHIFT;
- }
- desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
- << SDMA_DESC0_PHY_ADDR_SHIFT) |
- (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
- << SDMA_DESC0_BYTE_COUNT_SHIFT);
-}
-
-/* helper to extend txreq */
-int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
- int type, void *kvaddr, struct page *page,
- unsigned long offset, u16 len);
-int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
-void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
-
-/* helpers used by public routines */
-static inline void _sdma_close_tx(struct hfi1_devdata *dd,
- struct sdma_txreq *tx)
-{
- tx->descp[tx->num_desc].qw[0] |=
- SDMA_DESC0_LAST_DESC_FLAG;
- tx->descp[tx->num_desc].qw[1] |=
- dd->default_desc1;
- if (tx->flags & SDMA_TXREQ_F_URGENT)
- tx->descp[tx->num_desc].qw[1] |=
- (SDMA_DESC1_HEAD_TO_HOST_FLAG |
- SDMA_DESC1_INT_REQ_FLAG);
-}
-
-static inline int _sdma_txadd_daddr(
- struct hfi1_devdata *dd,
- int type,
- struct sdma_txreq *tx,
- dma_addr_t addr,
- u16 len)
-{
- int rval = 0;
-
- make_tx_sdma_desc(
- tx,
- type,
- addr, len);
- WARN_ON(len > tx->tlen);
- tx->tlen -= len;
- /* special cases for last */
- if (!tx->tlen) {
- if (tx->packet_len & (sizeof(u32) - 1)) {
- rval = _pad_sdma_tx_descs(dd, tx);
- if (rval)
- return rval;
- } else {
- _sdma_close_tx(dd, tx);
- }
- }
- tx->num_desc++;
- return rval;
-}
-
-/**
- * sdma_txadd_page() - add a page to the sdma_txreq
- * @dd: the device to use for mapping
- * @tx: tx request to which the page is added
- * @page: page to map
- * @offset: offset within the page
- * @len: length in bytes
- *
- * This is used to add a page/offset/length descriptor.
- *
- * The mapping/unmapping of the page/offset/len is automatically handled.
- *
- * Return:
- * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
- * extend/coalesce descriptor array
- */
-static inline int sdma_txadd_page(
- struct hfi1_devdata *dd,
- struct sdma_txreq *tx,
- struct page *page,
- unsigned long offset,
- u16 len)
-{
- dma_addr_t addr;
- int rval;
-
- if ((unlikely(tx->num_desc == tx->desc_limit))) {
- rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
- NULL, page, offset, len);
- if (rval <= 0)
- return rval;
- }
-
- addr = dma_map_page(
- &dd->pcidev->dev,
- page,
- offset,
- len,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
- return -ENOSPC;
- }
-
- return _sdma_txadd_daddr(
- dd, SDMA_MAP_PAGE, tx, addr, len);
-}
-
-/**
- * sdma_txadd_daddr() - add a dma address to the sdma_txreq
- * @dd: the device to use for mapping
- * @tx: sdma_txreq to which the page is added
- * @addr: dma address mapped by caller
- * @len: length in bytes
- *
- * This is used to add a descriptor for memory that is already dma mapped.
- *
- * In this case, there is no unmapping as part of the progress processing for
- * this memory location.
- *
- * Return:
- * 0 - success, -ENOMEM - couldn't extend descriptor array
- */
-
-static inline int sdma_txadd_daddr(
- struct hfi1_devdata *dd,
- struct sdma_txreq *tx,
- dma_addr_t addr,
- u16 len)
-{
- int rval;
-
- if ((unlikely(tx->num_desc == tx->desc_limit))) {
- rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
- NULL, NULL, 0, 0);
- if (rval <= 0)
- return rval;
- }
-
- return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
-}
-
-/**
- * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
- * @dd: the device to use for mapping
- * @tx: sdma_txreq to which the page is added
- * @kvaddr: the kernel virtual address
- * @len: length in bytes
- *
- * This is used to add a descriptor referenced by the indicated kvaddr and
- * len.
- *
- * The mapping/unmapping of the kvaddr and len is automatically handled.
- *
- * Return:
- * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
- * descriptor array
- */
-static inline int sdma_txadd_kvaddr(
- struct hfi1_devdata *dd,
- struct sdma_txreq *tx,
- void *kvaddr,
- u16 len)
-{
- dma_addr_t addr;
- int rval;
-
- if ((unlikely(tx->num_desc == tx->desc_limit))) {
- rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
- kvaddr, NULL, 0, len);
- if (rval <= 0)
- return rval;
- }
-
- addr = dma_map_single(
- &dd->pcidev->dev,
- kvaddr,
- len,
- DMA_TO_DEVICE);
-
- if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
- sdma_txclean(dd, tx);
- return -ENOSPC;
- }
-
- return _sdma_txadd_daddr(
- dd, SDMA_MAP_SINGLE, tx, addr, len);
-}
-
-struct iowait;
-
-int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx);
-int sdma_send_txlist(struct sdma_engine *sde,
- struct iowait *wait,
- struct list_head *tx_list);
-
-int sdma_ahg_alloc(struct sdma_engine *sde);
-void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
-
-/**
- * sdma_build_ahg - build ahg descriptor
- * @data
- * @dwindex
- * @startbit
- * @bits
- *
- * Build and return a 32 bit descriptor.
- */
-static inline u32 sdma_build_ahg_descriptor(
- u16 data,
- u8 dwindex,
- u8 startbit,
- u8 bits)
-{
- return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
- ((startbit & SDMA_AHG_FIELD_START_MASK) <<
- SDMA_AHG_FIELD_START_SHIFT) |
- ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
- SDMA_AHG_FIELD_LEN_SHIFT) |
- ((dwindex & SDMA_AHG_INDEX_MASK) <<
- SDMA_AHG_INDEX_SHIFT) |
- ((data & SDMA_AHG_VALUE_MASK) <<
- SDMA_AHG_VALUE_SHIFT));
-}
-
-/**
- * sdma_progress - use seq number of detect head progress
- * @sde: sdma_engine to check
- * @seq: base seq count
- * @tx: txreq for which we need to check descriptor availability
- *
- * This is used in the appropriate spot in the sleep routine
- * to check for potential ring progress. This routine gets the
- * seqcount before queuing the iowait structure for progress.
- *
- * If the seqcount indicates that progress needs to be checked,
- * re-submission is detected by checking whether the descriptor
- * queue has enough descriptor for the txreq.
- */
-static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
- struct sdma_txreq *tx)
-{
- if (read_seqretry(&sde->head_lock, seq)) {
- sde->desc_avail = sdma_descq_freecnt(sde);
- if (tx->num_desc > sde->desc_avail)
- return 0;
- return 1;
- }
- return 0;
-}
-
-/**
- * sdma_iowait_schedule() - initialize wait structure
- * @sde: sdma_engine to schedule
- * @wait: wait struct to schedule
- *
- * This function initializes the iowait
- * structure embedded in the QP or PQ.
- *
- */
-static inline void sdma_iowait_schedule(
- struct sdma_engine *sde,
- struct iowait *wait)
-{
- struct hfi1_pportdata *ppd = sde->dd->pport;
-
- iowait_schedule(wait, ppd->hfi1_wq, sde->cpu);
-}
-
-/* for use by interrupt handling */
-void sdma_engine_error(struct sdma_engine *sde, u64 status);
-void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
-
-/*
- *
- * The diagram below details the relationship of the mapping structures
- *
- * Since the mapping now allows for non-uniform engines per vl, the
- * number of engines for a vl is either the vl_engines[vl] or
- * a computation based on num_sdma/num_vls:
- *
- * For example:
- * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
- *
- * n = roundup to next highest power of 2 using nactual
- *
- * In the case where there are num_sdma/num_vls doesn't divide
- * evenly, the extras are added from the last vl downward.
- *
- * For the case where n > nactual, the engines are assigned
- * in a round robin fashion wrapping back to the first engine
- * for a particular vl.
- *
- * dd->sdma_map
- * | sdma_map_elem[0]
- * | +--------------------+
- * v | mask |
- * sdma_vl_map |--------------------|
- * +--------------------------+ | sde[0] -> eng 1 |
- * | list (RCU) | |--------------------|
- * |--------------------------| ->| sde[1] -> eng 2 |
- * | mask | --/ |--------------------|
- * |--------------------------| -/ | * |
- * | actual_vls (max 8) | -/ |--------------------|
- * |--------------------------| --/ | sde[n] -> eng n |
- * | vls (max 8) | -/ +--------------------+
- * |--------------------------| --/
- * | map[0] |-/
- * |--------------------------| +--------------------+
- * | map[1] |--- | mask |
- * |--------------------------| \---- |--------------------|
- * | * | \-- | sde[0] -> eng 1+n |
- * | * | \---- |--------------------|
- * | * | \->| sde[1] -> eng 2+n |
- * |--------------------------| |--------------------|
- * | map[vls - 1] |- | * |
- * +--------------------------+ \- |--------------------|
- * \- | sde[m] -> eng m+n |
- * \ +--------------------+
- * \-
- * \
- * \- +--------------------+
- * \- | mask |
- * \ |--------------------|
- * \- | sde[0] -> eng 1+m+n|
- * \- |--------------------|
- * >| sde[1] -> eng 2+m+n|
- * |--------------------|
- * | * |
- * |--------------------|
- * | sde[o] -> eng o+m+n|
- * +--------------------+
- *
- */
-
-/**
- * struct sdma_map_elem - mapping for a vl
- * @mask - selector mask
- * @sde - array of engines for this vl
- *
- * The mask is used to "mod" the selector
- * to produce index into the trailing
- * array of sdes.
- */
-struct sdma_map_elem {
- u32 mask;
- struct sdma_engine *sde[0];
-};
-
-/**
- * struct sdma_map_el - mapping for a vl
- * @engine_to_vl - map of an engine to a vl
- * @list - rcu head for free callback
- * @mask - vl mask to "mod" the vl to produce an index to map array
- * @actual_vls - number of vls
- * @vls - number of vls rounded to next power of 2
- * @map - array of sdma_map_elem entries
- *
- * This is the parent mapping structure. The trailing
- * members of the struct point to sdma_map_elem entries, which
- * in turn point to an array of sde's for that vl.
- */
-struct sdma_vl_map {
- s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
- struct rcu_head list;
- u32 mask;
- u8 actual_vls;
- u8 vls;
- struct sdma_map_elem *map[0];
-};
-
-int sdma_map_init(
- struct hfi1_devdata *dd,
- u8 port,
- u8 num_vls,
- u8 *vl_engines);
-
-/* slow path */
-void _sdma_engine_progress_schedule(struct sdma_engine *sde);
-
-/**
- * sdma_engine_progress_schedule() - schedule progress on engine
- * @sde: sdma_engine to schedule progress
- *
- * This is the fast path.
- *
- */
-static inline void sdma_engine_progress_schedule(
- struct sdma_engine *sde)
-{
- if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
- return;
- _sdma_engine_progress_schedule(sde);
-}
-
-struct sdma_engine *sdma_select_engine_sc(
- struct hfi1_devdata *dd,
- u32 selector,
- u8 sc5);
-
-struct sdma_engine *sdma_select_engine_vl(
- struct hfi1_devdata *dd,
- u32 selector,
- u8 vl);
-
-void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
-
-#ifdef CONFIG_SDMA_VERBOSITY
-void sdma_dumpstate(struct sdma_engine *);
-#endif
-static inline char *slashstrip(char *s)
-{
- char *r = s;
-
- while (*s)
- if (*s++ == '/')
- r = s;
- return r;
-}
-
-u16 sdma_get_descq_cnt(void);
-
-extern uint mod_num_sdma;
-
-void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
-
-#endif
diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/staging/rdma/hfi1/sdma_txreq.h
deleted file mode 100644
index bf7d777d7..000000000
--- a/drivers/staging/rdma/hfi1/sdma_txreq.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef HFI1_SDMA_TXREQ_H
-#define HFI1_SDMA_TXREQ_H
-
-/* increased for AHG */
-#define NUM_DESC 6
-
-/*
- * struct sdma_desc - canonical fragment descriptor
- *
- * This is the descriptor carried in the tx request
- * corresponding to each fragment.
- *
- */
-struct sdma_desc {
- /* private: don't use directly */
- u64 qw[2];
-};
-
-/**
- * struct sdma_txreq - the sdma_txreq structure (one per packet)
- * @list: for use by user and by queuing for wait
- *
- * This is the representation of a packet which consists of some
- * number of fragments. Storage is provided to within the structure.
- * for all fragments.
- *
- * The storage for the descriptors are automatically extended as needed
- * when the currently allocation is exceeded.
- *
- * The user (Verbs or PSM) may overload this structure with fields
- * specific to their use by putting this struct first in their struct.
- * The method of allocation of the overloaded structure is user dependent
- *
- * The list is the only public field in the structure.
- *
- */
-
-#define SDMA_TXREQ_S_OK 0
-#define SDMA_TXREQ_S_SENDERROR 1
-#define SDMA_TXREQ_S_ABORTED 2
-#define SDMA_TXREQ_S_SHUTDOWN 3
-
-/* flags bits */
-#define SDMA_TXREQ_F_URGENT 0x0001
-#define SDMA_TXREQ_F_AHG_COPY 0x0002
-#define SDMA_TXREQ_F_USE_AHG 0x0004
-
-struct sdma_txreq;
-typedef void (*callback_t)(struct sdma_txreq *, int);
-
-struct iowait;
-struct sdma_txreq {
- struct list_head list;
- /* private: */
- struct sdma_desc *descp;
- /* private: */
- void *coalesce_buf;
- /* private: */
- struct iowait *wait;
- /* private: */
- callback_t complete;
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- u64 sn;
-#endif
- /* private: - used in coalesce/pad processing */
- u16 packet_len;
- /* private: - down-counted to trigger last */
- u16 tlen;
- /* private: */
- u16 num_desc;
- /* private: */
- u16 desc_limit;
- /* private: */
- u16 next_descq_idx;
- /* private: */
- u16 coalesce_idx;
- /* private: flags */
- u16 flags;
- /* private: */
- struct sdma_desc descs[NUM_DESC];
-};
-
-static inline int sdma_txreq_built(struct sdma_txreq *tx)
-{
- return tx->num_desc;
-}
-
-#endif /* HFI1_SDMA_TXREQ_H */
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c
deleted file mode 100644
index c7f127119..000000000
--- a/drivers/staging/rdma/hfi1/sysfs.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/ctype.h>
-
-#include "hfi.h"
-#include "mad.h"
-#include "trace.h"
-
-/*
- * Start of per-port congestion control structures and support code
- */
-
-/*
- * Congestion control table size followed by table entries
- */
-static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- int ret;
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
- struct cc_state *cc_state;
-
- ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
- + sizeof(__be16);
-
- if (pos > ret)
- return -EINVAL;
-
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- rcu_read_lock();
- cc_state = get_cc_state(ppd);
- if (!cc_state) {
- rcu_read_unlock();
- return -EINVAL;
- }
- memcpy(buf, &cc_state->cct, count);
- rcu_read_unlock();
-
- return count;
-}
-
-static void port_release(struct kobject *kobj)
-{
- /* nothing to do since memory is freed by hfi1_free_devdata() */
-}
-
-static struct bin_attribute cc_table_bin_attr = {
- .attr = {.name = "cc_table_bin", .mode = 0444},
- .read = read_cc_table_bin,
- .size = PAGE_SIZE,
-};
-
-/*
- * Congestion settings: port control, control map and an array of 16
- * entries for the congestion entries - increase, timer, event log
- * trigger threshold and the minimum injection rate delay.
- */
-static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- int ret;
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
- struct cc_state *cc_state;
-
- ret = sizeof(struct opa_congestion_setting_attr_shadow);
-
- if (pos > ret)
- return -EINVAL;
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- rcu_read_lock();
- cc_state = get_cc_state(ppd);
- if (!cc_state) {
- rcu_read_unlock();
- return -EINVAL;
- }
- memcpy(buf, &cc_state->cong_setting, count);
- rcu_read_unlock();
-
- return count;
-}
-
-static struct bin_attribute cc_setting_bin_attr = {
- .attr = {.name = "cc_settings_bin", .mode = 0444},
- .read = read_cc_setting_bin,
- .size = PAGE_SIZE,
-};
-
-struct hfi1_port_attr {
- struct attribute attr;
- ssize_t (*show)(struct hfi1_pportdata *, char *);
- ssize_t (*store)(struct hfi1_pportdata *, const char *, size_t);
-};
-
-static ssize_t cc_prescan_show(struct hfi1_pportdata *ppd, char *buf)
-{
- return sprintf(buf, "%s\n", ppd->cc_prescan ? "on" : "off");
-}
-
-static ssize_t cc_prescan_store(struct hfi1_pportdata *ppd, const char *buf,
- size_t count)
-{
- if (!memcmp(buf, "on", 2))
- ppd->cc_prescan = true;
- else if (!memcmp(buf, "off", 3))
- ppd->cc_prescan = false;
-
- return count;
-}
-
-static struct hfi1_port_attr cc_prescan_attr =
- __ATTR(cc_prescan, 0600, cc_prescan_show, cc_prescan_store);
-
-static ssize_t cc_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_port_attr *port_attr =
- container_of(attr, struct hfi1_port_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
-
- return port_attr->show(ppd, buf);
-}
-
-static ssize_t cc_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct hfi1_port_attr *port_attr =
- container_of(attr, struct hfi1_port_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
-
- return port_attr->store(ppd, buf, count);
-}
-
-static const struct sysfs_ops port_cc_sysfs_ops = {
- .show = cc_attr_show,
- .store = cc_attr_store
-};
-
-static struct attribute *port_cc_default_attributes[] = {
- &cc_prescan_attr.attr
-};
-
-static struct kobj_type port_cc_ktype = {
- .release = port_release,
- .sysfs_ops = &port_cc_sysfs_ops,
- .default_attrs = port_cc_default_attributes
-};
-
-/* Start sc2vl */
-#define HFI1_SC2VL_ATTR(N) \
- static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .sc = N \
- }
-
-struct hfi1_sc2vl_attr {
- struct attribute attr;
- int sc;
-};
-
-HFI1_SC2VL_ATTR(0);
-HFI1_SC2VL_ATTR(1);
-HFI1_SC2VL_ATTR(2);
-HFI1_SC2VL_ATTR(3);
-HFI1_SC2VL_ATTR(4);
-HFI1_SC2VL_ATTR(5);
-HFI1_SC2VL_ATTR(6);
-HFI1_SC2VL_ATTR(7);
-HFI1_SC2VL_ATTR(8);
-HFI1_SC2VL_ATTR(9);
-HFI1_SC2VL_ATTR(10);
-HFI1_SC2VL_ATTR(11);
-HFI1_SC2VL_ATTR(12);
-HFI1_SC2VL_ATTR(13);
-HFI1_SC2VL_ATTR(14);
-HFI1_SC2VL_ATTR(15);
-HFI1_SC2VL_ATTR(16);
-HFI1_SC2VL_ATTR(17);
-HFI1_SC2VL_ATTR(18);
-HFI1_SC2VL_ATTR(19);
-HFI1_SC2VL_ATTR(20);
-HFI1_SC2VL_ATTR(21);
-HFI1_SC2VL_ATTR(22);
-HFI1_SC2VL_ATTR(23);
-HFI1_SC2VL_ATTR(24);
-HFI1_SC2VL_ATTR(25);
-HFI1_SC2VL_ATTR(26);
-HFI1_SC2VL_ATTR(27);
-HFI1_SC2VL_ATTR(28);
-HFI1_SC2VL_ATTR(29);
-HFI1_SC2VL_ATTR(30);
-HFI1_SC2VL_ATTR(31);
-
-static struct attribute *sc2vl_default_attributes[] = {
- &hfi1_sc2vl_attr_0.attr,
- &hfi1_sc2vl_attr_1.attr,
- &hfi1_sc2vl_attr_2.attr,
- &hfi1_sc2vl_attr_3.attr,
- &hfi1_sc2vl_attr_4.attr,
- &hfi1_sc2vl_attr_5.attr,
- &hfi1_sc2vl_attr_6.attr,
- &hfi1_sc2vl_attr_7.attr,
- &hfi1_sc2vl_attr_8.attr,
- &hfi1_sc2vl_attr_9.attr,
- &hfi1_sc2vl_attr_10.attr,
- &hfi1_sc2vl_attr_11.attr,
- &hfi1_sc2vl_attr_12.attr,
- &hfi1_sc2vl_attr_13.attr,
- &hfi1_sc2vl_attr_14.attr,
- &hfi1_sc2vl_attr_15.attr,
- &hfi1_sc2vl_attr_16.attr,
- &hfi1_sc2vl_attr_17.attr,
- &hfi1_sc2vl_attr_18.attr,
- &hfi1_sc2vl_attr_19.attr,
- &hfi1_sc2vl_attr_20.attr,
- &hfi1_sc2vl_attr_21.attr,
- &hfi1_sc2vl_attr_22.attr,
- &hfi1_sc2vl_attr_23.attr,
- &hfi1_sc2vl_attr_24.attr,
- &hfi1_sc2vl_attr_25.attr,
- &hfi1_sc2vl_attr_26.attr,
- &hfi1_sc2vl_attr_27.attr,
- &hfi1_sc2vl_attr_28.attr,
- &hfi1_sc2vl_attr_29.attr,
- &hfi1_sc2vl_attr_30.attr,
- &hfi1_sc2vl_attr_31.attr,
- NULL
-};
-
-static ssize_t sc2vl_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_sc2vl_attr *sattr =
- container_of(attr, struct hfi1_sc2vl_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, sc2vl_kobj);
- struct hfi1_devdata *dd = ppd->dd;
-
- return sprintf(buf, "%u\n", *((u8 *)dd->sc2vl + sattr->sc));
-}
-
-static const struct sysfs_ops hfi1_sc2vl_ops = {
- .show = sc2vl_attr_show,
-};
-
-static struct kobj_type hfi1_sc2vl_ktype = {
- .release = port_release,
- .sysfs_ops = &hfi1_sc2vl_ops,
- .default_attrs = sc2vl_default_attributes
-};
-
-/* End sc2vl */
-
-/* Start sl2sc */
-#define HFI1_SL2SC_ATTR(N) \
- static struct hfi1_sl2sc_attr hfi1_sl2sc_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .sl = N \
- }
-
-struct hfi1_sl2sc_attr {
- struct attribute attr;
- int sl;
-};
-
-HFI1_SL2SC_ATTR(0);
-HFI1_SL2SC_ATTR(1);
-HFI1_SL2SC_ATTR(2);
-HFI1_SL2SC_ATTR(3);
-HFI1_SL2SC_ATTR(4);
-HFI1_SL2SC_ATTR(5);
-HFI1_SL2SC_ATTR(6);
-HFI1_SL2SC_ATTR(7);
-HFI1_SL2SC_ATTR(8);
-HFI1_SL2SC_ATTR(9);
-HFI1_SL2SC_ATTR(10);
-HFI1_SL2SC_ATTR(11);
-HFI1_SL2SC_ATTR(12);
-HFI1_SL2SC_ATTR(13);
-HFI1_SL2SC_ATTR(14);
-HFI1_SL2SC_ATTR(15);
-HFI1_SL2SC_ATTR(16);
-HFI1_SL2SC_ATTR(17);
-HFI1_SL2SC_ATTR(18);
-HFI1_SL2SC_ATTR(19);
-HFI1_SL2SC_ATTR(20);
-HFI1_SL2SC_ATTR(21);
-HFI1_SL2SC_ATTR(22);
-HFI1_SL2SC_ATTR(23);
-HFI1_SL2SC_ATTR(24);
-HFI1_SL2SC_ATTR(25);
-HFI1_SL2SC_ATTR(26);
-HFI1_SL2SC_ATTR(27);
-HFI1_SL2SC_ATTR(28);
-HFI1_SL2SC_ATTR(29);
-HFI1_SL2SC_ATTR(30);
-HFI1_SL2SC_ATTR(31);
-
-static struct attribute *sl2sc_default_attributes[] = {
- &hfi1_sl2sc_attr_0.attr,
- &hfi1_sl2sc_attr_1.attr,
- &hfi1_sl2sc_attr_2.attr,
- &hfi1_sl2sc_attr_3.attr,
- &hfi1_sl2sc_attr_4.attr,
- &hfi1_sl2sc_attr_5.attr,
- &hfi1_sl2sc_attr_6.attr,
- &hfi1_sl2sc_attr_7.attr,
- &hfi1_sl2sc_attr_8.attr,
- &hfi1_sl2sc_attr_9.attr,
- &hfi1_sl2sc_attr_10.attr,
- &hfi1_sl2sc_attr_11.attr,
- &hfi1_sl2sc_attr_12.attr,
- &hfi1_sl2sc_attr_13.attr,
- &hfi1_sl2sc_attr_14.attr,
- &hfi1_sl2sc_attr_15.attr,
- &hfi1_sl2sc_attr_16.attr,
- &hfi1_sl2sc_attr_17.attr,
- &hfi1_sl2sc_attr_18.attr,
- &hfi1_sl2sc_attr_19.attr,
- &hfi1_sl2sc_attr_20.attr,
- &hfi1_sl2sc_attr_21.attr,
- &hfi1_sl2sc_attr_22.attr,
- &hfi1_sl2sc_attr_23.attr,
- &hfi1_sl2sc_attr_24.attr,
- &hfi1_sl2sc_attr_25.attr,
- &hfi1_sl2sc_attr_26.attr,
- &hfi1_sl2sc_attr_27.attr,
- &hfi1_sl2sc_attr_28.attr,
- &hfi1_sl2sc_attr_29.attr,
- &hfi1_sl2sc_attr_30.attr,
- &hfi1_sl2sc_attr_31.attr,
- NULL
-};
-
-static ssize_t sl2sc_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_sl2sc_attr *sattr =
- container_of(attr, struct hfi1_sl2sc_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, sl2sc_kobj);
- struct hfi1_ibport *ibp = &ppd->ibport_data;
-
- return sprintf(buf, "%u\n", ibp->sl_to_sc[sattr->sl]);
-}
-
-static const struct sysfs_ops hfi1_sl2sc_ops = {
- .show = sl2sc_attr_show,
-};
-
-static struct kobj_type hfi1_sl2sc_ktype = {
- .release = port_release,
- .sysfs_ops = &hfi1_sl2sc_ops,
- .default_attrs = sl2sc_default_attributes
-};
-
-/* End sl2sc */
-
-/* Start vl2mtu */
-
-#define HFI1_VL2MTU_ATTR(N) \
- static struct hfi1_vl2mtu_attr hfi1_vl2mtu_attr_##N = { \
- .attr = { .name = __stringify(N), .mode = 0444 }, \
- .vl = N \
- }
-
-struct hfi1_vl2mtu_attr {
- struct attribute attr;
- int vl;
-};
-
-HFI1_VL2MTU_ATTR(0);
-HFI1_VL2MTU_ATTR(1);
-HFI1_VL2MTU_ATTR(2);
-HFI1_VL2MTU_ATTR(3);
-HFI1_VL2MTU_ATTR(4);
-HFI1_VL2MTU_ATTR(5);
-HFI1_VL2MTU_ATTR(6);
-HFI1_VL2MTU_ATTR(7);
-HFI1_VL2MTU_ATTR(8);
-HFI1_VL2MTU_ATTR(9);
-HFI1_VL2MTU_ATTR(10);
-HFI1_VL2MTU_ATTR(11);
-HFI1_VL2MTU_ATTR(12);
-HFI1_VL2MTU_ATTR(13);
-HFI1_VL2MTU_ATTR(14);
-HFI1_VL2MTU_ATTR(15);
-
-static struct attribute *vl2mtu_default_attributes[] = {
- &hfi1_vl2mtu_attr_0.attr,
- &hfi1_vl2mtu_attr_1.attr,
- &hfi1_vl2mtu_attr_2.attr,
- &hfi1_vl2mtu_attr_3.attr,
- &hfi1_vl2mtu_attr_4.attr,
- &hfi1_vl2mtu_attr_5.attr,
- &hfi1_vl2mtu_attr_6.attr,
- &hfi1_vl2mtu_attr_7.attr,
- &hfi1_vl2mtu_attr_8.attr,
- &hfi1_vl2mtu_attr_9.attr,
- &hfi1_vl2mtu_attr_10.attr,
- &hfi1_vl2mtu_attr_11.attr,
- &hfi1_vl2mtu_attr_12.attr,
- &hfi1_vl2mtu_attr_13.attr,
- &hfi1_vl2mtu_attr_14.attr,
- &hfi1_vl2mtu_attr_15.attr,
- NULL
-};
-
-static ssize_t vl2mtu_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct hfi1_vl2mtu_attr *vlattr =
- container_of(attr, struct hfi1_vl2mtu_attr, attr);
- struct hfi1_pportdata *ppd =
- container_of(kobj, struct hfi1_pportdata, vl2mtu_kobj);
- struct hfi1_devdata *dd = ppd->dd;
-
- return sprintf(buf, "%u\n", dd->vld[vlattr->vl].mtu);
-}
-
-static const struct sysfs_ops hfi1_vl2mtu_ops = {
- .show = vl2mtu_attr_show,
-};
-
-static struct kobj_type hfi1_vl2mtu_ktype = {
- .release = port_release,
- .sysfs_ops = &hfi1_vl2mtu_ops,
- .default_attrs = vl2mtu_default_attributes
-};
-
-/* end of per-port file structures and support code */
-
-/*
- * Start of per-unit (or driver, in some cases, but replicated
- * per unit) functions (these get a device *)
- */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
-
- return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
-}
-
-static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
- int ret;
-
- if (!dd->boardname)
- ret = -EINVAL;
- else
- ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
- return ret;
-}
-
-static ssize_t show_boardversion(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- /* The string printed here is already newline-terminated. */
- return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
-}
-
-static ssize_t show_nctxts(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- /*
- * Return the smaller of send and receive contexts.
- * Normally, user level applications would require both a send
- * and a receive context, so returning the smaller of the two counts
- * give a more accurate picture of total contexts available.
- */
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- min(dd->num_rcv_contexts - dd->first_user_ctxt,
- (u32)dd->sc_sizes[SC_USER].count));
-}
-
-static ssize_t show_nfreectxts(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- /* Return the number of free user ports (contexts) available. */
- return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
-}
-
-static ssize_t show_serial(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
-}
-
-static ssize_t store_chip_reset(struct device *device,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
- int ret;
-
- if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = hfi1_reset_device(dd->unit);
-bail:
- return ret < 0 ? ret : count;
-}
-
-/*
- * Convert the reported temperature from an integer (reported in
- * units of 0.25C) to a floating point number.
- */
-#define temp2str(temp, buf, size, idx) \
- scnprintf((buf) + (idx), (size) - (idx), "%u.%02u ", \
- ((temp) >> 2), ((temp) & 0x3) * 25)
-
-/*
- * Dump tempsense values, in decimal, to ease shell-scripts.
- */
-static ssize_t show_tempsense(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
- struct hfi1_devdata *dd = dd_from_dev(dev);
- struct hfi1_temp temp;
- int ret;
-
- ret = hfi1_tempsense_rd(dd, &temp);
- if (!ret) {
- int idx = 0;
-
- idx += temp2str(temp.curr, buf, PAGE_SIZE, idx);
- idx += temp2str(temp.lo_lim, buf, PAGE_SIZE, idx);
- idx += temp2str(temp.hi_lim, buf, PAGE_SIZE, idx);
- idx += temp2str(temp.crit_lim, buf, PAGE_SIZE, idx);
- idx += scnprintf(buf + idx, PAGE_SIZE - idx,
- "%u %u %u\n", temp.triggers & 0x1,
- temp.triggers & 0x2, temp.triggers & 0x4);
- ret = idx;
- }
- return ret;
-}
-
-/*
- * end of per-unit (or driver, in some cases, but replicated
- * per unit) functions
- */
-
-/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hfi, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *hfi1_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_board_id,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_chip_reset,
-};
-
-int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
- struct kobject *kobj)
-{
- struct hfi1_pportdata *ppd;
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (!port_num || port_num > dd->num_pports) {
- dd_dev_err(dd,
- "Skipping infiniband class with invalid port %u\n",
- port_num);
- return -ENODEV;
- }
- ppd = &dd->pport[port_num - 1];
-
- ret = kobject_init_and_add(&ppd->sc2vl_kobj, &hfi1_sc2vl_ktype, kobj,
- "sc2vl");
- if (ret) {
- dd_dev_err(dd,
- "Skipping sc2vl sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail;
- }
- kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->sl2sc_kobj, &hfi1_sl2sc_ktype, kobj,
- "sl2sc");
- if (ret) {
- dd_dev_err(dd,
- "Skipping sl2sc sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_sc2vl;
- }
- kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->vl2mtu_kobj, &hfi1_vl2mtu_ktype, kobj,
- "vl2mtu");
- if (ret) {
- dd_dev_err(dd,
- "Skipping vl2mtu sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_sl2sc;
- }
- kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
-
- ret = kobject_init_and_add(&ppd->pport_cc_kobj, &port_cc_ktype,
- kobj, "CCMgtA");
- if (ret) {
- dd_dev_err(dd,
- "Skipping Congestion Control sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_vl2mtu;
- }
-
- kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
- if (ret) {
- dd_dev_err(dd,
- "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc;
- }
-
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_table_bin_attr);
- if (ret) {
- dd_dev_err(dd,
- "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
- ret, port_num);
- goto bail_cc_entry_bin;
- }
-
- dd_dev_info(dd,
- "IB%u: Congestion Control Agent enabled for port %d\n",
- dd->unit, port_num);
-
- return 0;
-
-bail_cc_entry_bin:
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
-bail_cc:
- kobject_put(&ppd->pport_cc_kobj);
-bail_vl2mtu:
- kobject_put(&ppd->vl2mtu_kobj);
-bail_sl2sc:
- kobject_put(&ppd->sl2sc_kobj);
-bail_sc2vl:
- kobject_put(&ppd->sc2vl_kobj);
-bail:
- return ret;
-}
-
-/*
- * Register and create our files in /sys/class/infiniband.
- */
-int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
-{
- struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) {
- ret = device_create_file(&dev->dev, hfi1_attributes[i]);
- if (ret)
- goto bail;
- }
-
- return 0;
-bail:
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
- device_remove_file(&dev->dev, hfi1_attributes[i]);
- return ret;
-}
-
-/*
- * Unregister and remove our files in /sys/class/infiniband.
- */
-void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd;
- int i;
-
- for (i = 0; i < dd->num_pports; i++) {
- ppd = &dd->pport[i];
-
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
- sysfs_remove_bin_file(&ppd->pport_cc_kobj,
- &cc_table_bin_attr);
- kobject_put(&ppd->pport_cc_kobj);
- kobject_put(&ppd->vl2mtu_kobj);
- kobject_put(&ppd->sl2sc_kobj);
- kobject_put(&ppd->sc2vl_kobj);
- }
-}
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c
deleted file mode 100644
index 8b62fefcf..000000000
--- a/drivers/staging/rdma/hfi1/trace.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
-{
- struct hfi1_other_headers *ohdr;
- u8 opcode;
- u8 lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
-
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- return hdr_len_by_opcode[opcode] == 0 ?
- 0 : hdr_len_by_opcode[opcode] - (12 + 8);
-}
-
-#define IMM_PRN "imm %d"
-#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
-#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
-#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
-#define ATOMICACKETH_PRN "origdata %lld"
-#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
-
-#define OP(transport, op) IB_OPCODE_## transport ## _ ## op
-
-static u64 ib_u64_get(__be32 *p)
-{
- return ((u64)be32_to_cpu(p[0]) << 32) | be32_to_cpu(p[1]);
-}
-
-static const char *parse_syndrome(u8 syndrome)
-{
- switch (syndrome >> 5) {
- case 0:
- return "ACK";
- case 1:
- return "RNRNAK";
- case 3:
- return "NAK";
- }
- return "";
-}
-
-const char *parse_everbs_hdrs(
- struct trace_seq *p,
- u8 opcode,
- void *ehdrs)
-{
- union ib_ehdrs *eh = ehdrs;
- const char *ret = trace_seq_buffer_ptr(p);
-
- switch (opcode) {
- /* imm */
- case OP(RC, SEND_LAST_WITH_IMMEDIATE):
- case OP(UC, SEND_LAST_WITH_IMMEDIATE):
- case OP(RC, SEND_ONLY_WITH_IMMEDIATE):
- case OP(UC, SEND_ONLY_WITH_IMMEDIATE):
- case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
- case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
- trace_seq_printf(p, IMM_PRN,
- be32_to_cpu(eh->imm_data));
- break;
- /* reth + imm */
- case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- trace_seq_printf(p, RETH_PRN " " IMM_PRN,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->rc.reth.vaddr),
- be32_to_cpu(eh->rc.reth.rkey),
- be32_to_cpu(eh->rc.reth.length),
- be32_to_cpu(eh->rc.imm_data));
- break;
- /* reth */
- case OP(RC, RDMA_READ_REQUEST):
- case OP(RC, RDMA_WRITE_FIRST):
- case OP(UC, RDMA_WRITE_FIRST):
- case OP(RC, RDMA_WRITE_ONLY):
- case OP(UC, RDMA_WRITE_ONLY):
- trace_seq_printf(p, RETH_PRN,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->rc.reth.vaddr),
- be32_to_cpu(eh->rc.reth.rkey),
- be32_to_cpu(eh->rc.reth.length));
- break;
- case OP(RC, RDMA_READ_RESPONSE_FIRST):
- case OP(RC, RDMA_READ_RESPONSE_LAST):
- case OP(RC, RDMA_READ_RESPONSE_ONLY):
- case OP(RC, ACKNOWLEDGE):
- trace_seq_printf(p, AETH_PRN, be32_to_cpu(eh->aeth) >> 24,
- parse_syndrome(be32_to_cpu(eh->aeth) >> 24),
- be32_to_cpu(eh->aeth) & HFI1_MSN_MASK);
- break;
- /* aeth + atomicacketh */
- case OP(RC, ATOMIC_ACKNOWLEDGE):
- trace_seq_printf(p, AETH_PRN " " ATOMICACKETH_PRN,
- be32_to_cpu(eh->at.aeth) >> 24,
- parse_syndrome(be32_to_cpu(eh->at.aeth) >> 24),
- be32_to_cpu(eh->at.aeth) & HFI1_MSN_MASK,
- (unsigned long long)
- ib_u64_get(eh->at.atomic_ack_eth));
- break;
- /* atomiceth */
- case OP(RC, COMPARE_SWAP):
- case OP(RC, FETCH_ADD):
- trace_seq_printf(p, ATOMICETH_PRN,
- (unsigned long long)ib_u64_get(
- eh->atomic_eth.vaddr),
- eh->atomic_eth.rkey,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->atomic_eth.swap_data),
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->atomic_eth.compare_data));
- break;
- /* deth */
- case OP(UD, SEND_ONLY):
- case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
- trace_seq_printf(p, DETH_PRN,
- be32_to_cpu(eh->ud.deth[0]),
- be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
- break;
- }
- trace_seq_putc(p, 0);
- return ret;
-}
-
-const char *parse_sdma_flags(
- struct trace_seq *p,
- u64 desc0, u64 desc1)
-{
- const char *ret = trace_seq_buffer_ptr(p);
- char flags[5] = { 'x', 'x', 'x', 'x', 0 };
-
- flags[0] = (desc1 & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
- flags[1] = (desc1 & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? 'H' : '-';
- flags[2] = (desc0 & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
- flags[3] = (desc0 & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
- trace_seq_printf(p, "%s", flags);
- if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
- trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
- (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) &
- SDMA_DESC1_HEADER_MODE_MASK),
- (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) &
- SDMA_DESC1_HEADER_INDEX_MASK),
- (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) &
- SDMA_DESC1_HEADER_DWS_MASK));
- return ret;
-}
-
-const char *print_u32_array(
- struct trace_seq *p,
- u32 *arr, int len)
-{
- int i;
- const char *ret = trace_seq_buffer_ptr(p);
-
- for (i = 0; i < len ; i++)
- trace_seq_printf(p, "%s%#x", i == 0 ? "" : " ", arr[i]);
- trace_seq_putc(p, 0);
- return ret;
-}
-
-const char *print_u64_array(
- struct trace_seq *p,
- u64 *arr, int len)
-{
- int i;
- const char *ret = trace_seq_buffer_ptr(p);
-
- for (i = 0; i < len; i++)
- trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
- trace_seq_putc(p, 0);
- return ret;
-}
-
-__hfi1_trace_fn(PKT);
-__hfi1_trace_fn(PROC);
-__hfi1_trace_fn(SDMA);
-__hfi1_trace_fn(LINKVERB);
-__hfi1_trace_fn(DEBUG);
-__hfi1_trace_fn(SNOOP);
-__hfi1_trace_fn(CNTR);
-__hfi1_trace_fn(PIO);
-__hfi1_trace_fn(DC8051);
-__hfi1_trace_fn(FIRMWARE);
-__hfi1_trace_fn(RCVCTRL);
-__hfi1_trace_fn(TID);
-__hfi1_trace_fn(MMU);
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h
deleted file mode 100644
index 963dc948c..000000000
--- a/drivers/staging/rdma/hfi1/trace.h
+++ /dev/null
@@ -1,1369 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#undef TRACE_SYSTEM_VAR
-#define TRACE_SYSTEM_VAR hfi1
-
-#if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __HFI1_TRACE_H
-
-#include <linux/tracepoint.h>
-#include <linux/trace_seq.h>
-
-#include "hfi.h"
-#include "mad.h"
-#include "sdma.h"
-
-#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
-#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
-
-#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
-#define show_packettype(etype) \
-__print_symbolic(etype, \
- packettype_name(EXPECTED), \
- packettype_name(EAGER), \
- packettype_name(IB), \
- packettype_name(ERROR), \
- packettype_name(BYPASS))
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_rx
-
-TRACE_EVENT(hfi1_rcvhdr,
- TP_PROTO(struct hfi1_devdata *dd,
- u64 eflags,
- u32 ctxt,
- u32 etype,
- u32 hlen,
- u32 tlen,
- u32 updegr,
- u32 etail
- ),
- TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u64, eflags)
- __field(u32, ctxt)
- __field(u32, etype)
- __field(u32, hlen)
- __field(u32, tlen)
- __field(u32, updegr)
- __field(u32, etail)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->eflags = eflags;
- __entry->ctxt = ctxt;
- __entry->etype = etype;
- __entry->hlen = hlen;
- __entry->tlen = tlen;
- __entry->updegr = updegr;
- __entry->etail = etail;
- ),
- TP_printk(
- "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->eflags,
- __entry->etype, show_packettype(__entry->etype),
- __entry->hlen,
- __entry->tlen,
- __entry->updegr,
- __entry->etail
- )
-);
-
-TRACE_EVENT(hfi1_receive_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
- TP_ARGS(dd, ctxt),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u32, ctxt)
- __field(u8, slow_path)
- __field(u8, dma_rtail)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt) {
- __entry->slow_path = 1;
- __entry->dma_rtail = 0xFF;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_dma_rtail){
- __entry->dma_rtail = 1;
- __entry->slow_path = 0;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_nodma_rtail) {
- __entry->dma_rtail = 0;
- __entry->slow_path = 0;
- }
- ),
- TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->slow_path,
- __entry->dma_rtail
- )
-);
-
-TRACE_EVENT(hfi1_exp_tid_reg,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr,
- u32 npages, unsigned long va, unsigned long pa,
- dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_exp_tid_unreg,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr, u32 npages,
- unsigned long va, unsigned long pa, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_exp_tid_inval,
- TP_PROTO(unsigned ctxt, u16 subctxt, unsigned long va, u32 rarr,
- u32 npages, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(unsigned long, va)
- __field(u32, rarr)
- __field(u32, npages)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->va = va;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_mmu_invalidate,
- TP_PROTO(unsigned ctxt, u16 subctxt, const char *type,
- unsigned long start, unsigned long end),
- TP_ARGS(ctxt, subctxt, type, start, end),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __string(type, type)
- __field(unsigned long, start)
- __field(unsigned long, end)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __assign_str(type, type);
- __entry->start = start;
- __entry->end = end;
- ),
- TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
- __entry->ctxt,
- __entry->subctxt,
- __get_str(type),
- __entry->start,
- __entry->end
- )
- );
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_tx
-
-TRACE_EVENT(hfi1_piofree,
- TP_PROTO(struct send_context *sc, int extra),
- TP_ARGS(sc, extra),
- TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(int, extra)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->extra = extra;
- ),
- TP_printk("[%s] ctxt %u(%u) extra %d",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->extra
- )
-);
-
-TRACE_EVENT(hfi1_wantpiointr,
- TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
- TP_ARGS(sc, needint, credit_ctrl),
- TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(u32, needint)
- __field(u64, credit_ctrl)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->needint = needint;
- __entry->credit_ctrl = credit_ctrl;
- ),
- TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->needint,
- (unsigned long long)__entry->credit_ctrl
- )
-);
-
-DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, flags)
- __field(u32, s_flags)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->flags = flags;
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- ),
- TP_printk(
- "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->flags,
- __entry->s_flags
- )
-);
-
-DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags));
-
-DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_ibhdrs
-
-u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
-const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
-
-#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
-
-const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
-
-#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
-
-#define lrh_name(lrh) { HFI1_##lrh, #lrh }
-#define show_lnh(lrh) \
-__print_symbolic(lrh, \
- lrh_name(LRH_BTH), \
- lrh_name(LRH_GRH))
-
-#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
-#define show_ib_opcode(opcode) \
-__print_symbolic(opcode, \
- ib_opcode_name(RC_SEND_FIRST), \
- ib_opcode_name(RC_SEND_MIDDLE), \
- ib_opcode_name(RC_SEND_LAST), \
- ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_SEND_ONLY), \
- ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_FIRST), \
- ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(RC_RDMA_WRITE_LAST), \
- ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_READ_REQUEST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
- ib_opcode_name(RC_ACKNOWLEDGE), \
- ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
- ib_opcode_name(RC_COMPARE_SWAP), \
- ib_opcode_name(RC_FETCH_ADD), \
- ib_opcode_name(UC_SEND_FIRST), \
- ib_opcode_name(UC_SEND_MIDDLE), \
- ib_opcode_name(UC_SEND_LAST), \
- ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_SEND_ONLY), \
- ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_FIRST), \
- ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(UC_RDMA_WRITE_LAST), \
- ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UD_SEND_ONLY), \
- ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(CNP))
-
-#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
-#define BTH_PRN \
- "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
- "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
-#define EHDR_PRN "%s"
-
-DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
- TP_PROTO(struct hfi1_devdata *dd,
- struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- /* LRH */
- __field(u8, vl)
- __field(u8, lver)
- __field(u8, sl)
- __field(u8, lnh)
- __field(u16, dlid)
- __field(u16, len)
- __field(u16, slid)
- /* BTH */
- __field(u8, opcode)
- __field(u8, se)
- __field(u8, m)
- __field(u8, pad)
- __field(u8, tver)
- __field(u16, pkey)
- __field(u8, f)
- __field(u8, b)
- __field(u32, qpn)
- __field(u8, a)
- __field(u32, psn)
- /* extended headers */
- __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
- ),
- TP_fast_assign(
- struct hfi1_other_headers *ohdr;
-
- DD_DEV_ASSIGN(dd);
- /* LRH */
- __entry->vl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
- __entry->lver =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
- __entry->sl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->lnh =
- (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- __entry->dlid =
- be16_to_cpu(hdr->lrh[1]);
- /* allow for larger len */
- __entry->len =
- be16_to_cpu(hdr->lrh[2]);
- __entry->slid =
- be16_to_cpu(hdr->lrh[3]);
- /* BTH */
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- __entry->opcode =
- (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->se =
- (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
- __entry->m =
- (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
- __entry->pad =
- (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- __entry->tver =
- (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
- __entry->pkey =
- be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->f =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
- HFI1_FECN_MASK;
- __entry->b =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
- HFI1_BECN_MASK;
- __entry->qpn =
- be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->a =
- (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
- /* allow for larger PSN */
- __entry->psn =
- be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
- /* extended headers */
- memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
- ibhdr_exhdr_len(hdr));
- ),
- TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
- __get_str(dev),
- /* LRH */
- __entry->vl,
- __entry->lver,
- __entry->sl,
- __entry->lnh, show_lnh(__entry->lnh),
- __entry->dlid,
- __entry->len,
- __entry->slid,
- /* BTH */
- __entry->opcode, show_ib_opcode(__entry->opcode),
- __entry->se,
- __entry->m,
- __entry->pad,
- __entry->tver,
- __entry->pkey,
- __entry->f,
- __entry->b,
- __entry->qpn,
- __entry->a,
- __entry->psn,
- /* extended headers */
- __parse_ib_ehdrs(
- __entry->opcode,
- (void *)__get_dynamic_array(ehdrs))
- )
-);
-
-DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-#define SNOOP_PRN \
- "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
- "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_snoop
-
-TRACE_EVENT(snoop_capture,
- TP_PROTO(struct hfi1_devdata *dd,
- int hdr_len,
- struct hfi1_ib_header *hdr,
- int data_len,
- void *data),
- TP_ARGS(dd, hdr_len, hdr, data_len, data),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, slid)
- __field(u16, dlid)
- __field(u32, qpn)
- __field(u8, opcode)
- __field(u8, sl)
- __field(u16, pkey)
- __field(u32, hdr_len)
- __field(u32, data_len)
- __field(u8, lnh)
- __dynamic_array(u8, raw_hdr, hdr_len)
- __dynamic_array(u8, raw_pkt, data_len)
- ),
- TP_fast_assign(
- struct hfi1_other_headers *ohdr;
-
- __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- DD_DEV_ASSIGN(dd);
- __entry->slid = be16_to_cpu(hdr->lrh[3]);
- __entry->dlid = be16_to_cpu(hdr->lrh[1]);
- __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->hdr_len = hdr_len;
- __entry->data_len = data_len;
- memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
- memcpy(__get_dynamic_array(raw_pkt), data, data_len);
- ),
- TP_printk(
- "[%s] " SNOOP_PRN,
- __get_str(dev),
- __entry->slid,
- __entry->dlid,
- __entry->qpn,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->sl,
- __entry->pkey,
- __entry->hdr_len,
- __entry->data_len
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_ctxts
-
-#define UCTXT_FMT \
- "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
- "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
-TRACE_EVENT(hfi1_uctxtdata,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
- TP_ARGS(dd, uctxt),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(u32, credits)
- __field(u64, hw_free)
- __field(u64, piobase)
- __field(u16, rcvhdrq_cnt)
- __field(u64, rcvhdrq_phys)
- __field(u32, eager_cnt)
- __field(u64, rcvegr_phys)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = uctxt->ctxt;
- __entry->credits = uctxt->sc->credits;
- __entry->hw_free = (u64)uctxt->sc->hw_free;
- __entry->piobase = (u64)uctxt->sc->base_addr;
- __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
- __entry->eager_cnt = uctxt->egrbufs.alloced;
- __entry->rcvegr_phys =
- uctxt->egrbufs.rcvtids[0].phys;
- ),
- TP_printk("[%s] ctxt %u " UCTXT_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->credits,
- __entry->hw_free,
- __entry->piobase,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_phys,
- __entry->eager_cnt,
- __entry->rcvegr_phys
- )
-);
-
-#define CINFO_FMT \
- "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
-TRACE_EVENT(hfi1_ctxt_info,
- TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
- struct hfi1_ctxt_info cinfo),
- TP_ARGS(dd, ctxt, subctxt, cinfo),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(unsigned, subctxt)
- __field(u16, egrtids)
- __field(u16, rcvhdrq_cnt)
- __field(u16, rcvhdrq_size)
- __field(u16, sdma_ring_size)
- __field(u32, rcvegr_size)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->egrtids = cinfo.egrtids;
- __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
- __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
- __entry->sdma_ring_size = cinfo.sdma_ring_size;
- __entry->rcvegr_size = cinfo.rcvegr_size;
- ),
- TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->egrtids,
- __entry->rcvegr_size,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_size,
- __entry->sdma_ring_size
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_sma
-
-#define BCT_FORMAT \
- "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
-
-#define BCT(field) \
- be16_to_cpu( \
- ((struct buffer_control *)__get_dynamic_array(bct))->field \
- )
-
-DECLARE_EVENT_CLASS(hfi1_bct_template,
- TP_PROTO(struct hfi1_devdata *dd,
- struct buffer_control *bc),
- TP_ARGS(dd, bc),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __dynamic_array(u8, bct, sizeof(*bc))
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- memcpy(__get_dynamic_array(bct), bc,
- sizeof(*bc));
- ),
- TP_printk(BCT_FORMAT,
- BCT(overall_shared_limit),
-
- BCT(vl[0].dedicated),
- BCT(vl[0].shared),
-
- BCT(vl[1].dedicated),
- BCT(vl[1].shared),
-
- BCT(vl[2].dedicated),
- BCT(vl[2].shared),
-
- BCT(vl[3].dedicated),
- BCT(vl[3].shared),
-
- BCT(vl[4].dedicated),
- BCT(vl[4].shared),
-
- BCT(vl[5].dedicated),
- BCT(vl[5].shared),
-
- BCT(vl[6].dedicated),
- BCT(vl[6].shared),
-
- BCT(vl[7].dedicated),
- BCT(vl[7].shared),
-
- BCT(vl[15].dedicated),
- BCT(vl[15].shared)
- )
-);
-
-DEFINE_EVENT(hfi1_bct_template, bct_set,
- TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
- TP_ARGS(dd, bc));
-
-DEFINE_EVENT(hfi1_bct_template, bct_get,
- TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
- TP_ARGS(dd, bc));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_sdma
-
-TRACE_EVENT(hfi1_sdma_descriptor,
- TP_PROTO(struct sdma_engine *sde,
- u64 desc0,
- u64 desc1,
- u16 e,
- void *descp),
- TP_ARGS(sde, desc0, desc1, e, descp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(void *, descp)
- __field(u64, desc0)
- __field(u64, desc1)
- __field(u16, e)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->desc0 = desc0;
- __entry->desc1 = desc1;
- __entry->idx = sde->this_idx;
- __entry->descp = descp;
- __entry->e = e;
- ),
- TP_printk(
- "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
- __get_str(dev),
- __entry->idx,
- __parse_sdma_flags(__entry->desc0, __entry->desc1),
- (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
- SDMA_DESC0_PHY_ADDR_MASK,
- (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
- SDMA_DESC1_GENERATION_MASK),
- (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
- SDMA_DESC0_BYTE_COUNT_MASK),
- __entry->desc0,
- __entry->desc1,
- __entry->descp,
- __entry->e
- )
-);
-
-TRACE_EVENT(hfi1_sdma_engine_select,
- TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
- TP_ARGS(dd, sel, vl, idx),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u32, sel)
- __field(u8, vl)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->sel = sel;
- __entry->vl = vl;
- __entry->idx = idx;
- ),
- TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
- __get_str(dev),
- __entry->idx,
- __entry->sel,
- __entry->vl
- )
-);
-
-DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, status)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->status = status;
- __entry->idx = sde->this_idx;
- ),
- TP_printk("[%s] SDE(%u) status %llx",
- __get_str(dev),
- __entry->idx,
- (unsigned long long)__entry->status
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status)
-);
-
-DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status)
-);
-
-DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(int, aidx)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->idx = sde->this_idx;
- __entry->aidx = aidx;
- ),
- TP_printk("[%s] SDE(%u) aidx %d",
- __get_str(dev),
- __entry->idx,
- __entry->aidx
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx));
-
-DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx));
-
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
-TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(struct sdma_engine *sde,
- u16 hwhead,
- u16 swhead,
- struct sdma_txreq *txp
- ),
- TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- __entry->sn = txp ? txp->sn : ~0;
- ),
- TP_printk(
- "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->sn,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
-);
-#else
-TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(struct sdma_engine *sde,
- u16 hwhead, u16 swhead,
- struct sdma_txreq *txp
- ),
- TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- ),
- TP_printk(
- "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
-);
-#endif
-
-DECLARE_EVENT_CLASS(hfi1_sdma_sn,
- TP_PROTO(struct sdma_engine *sde, u64 sn),
- TP_ARGS(sde, sn),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->sn = sn;
- __entry->idx = sde->this_idx;
- ),
- TP_printk("[%s] SDE(%u) sn %llu",
- __get_str(dev),
- __entry->idx,
- __entry->sn
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 sn
- ),
- TP_ARGS(sde, sn)
-);
-
-DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
- TP_PROTO(struct sdma_engine *sde, u64 sn),
- TP_ARGS(sde, sn)
-);
-
-#define USDMA_HDR_FORMAT \
- "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
-
-TRACE_EVENT(hfi1_sdma_user_header,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
- struct hfi1_pkt_header *hdr, u32 tidval),
- TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, req)
- __field(__le32, pbc0)
- __field(__le32, pbc1)
- __field(__be32, lrh0)
- __field(__be32, lrh1)
- __field(__be32, bth0)
- __field(__be32, bth1)
- __field(__be32, bth2)
- __field(__le32, kdeth0)
- __field(__le32, kdeth1)
- __field(__le32, kdeth2)
- __field(__le32, kdeth3)
- __field(__le32, kdeth4)
- __field(__le32, kdeth5)
- __field(__le32, kdeth6)
- __field(__le32, kdeth7)
- __field(__le32, kdeth8)
- __field(u32, tidval)
- ),
- TP_fast_assign(
- __le32 *pbc = (__le32 *)hdr->pbc;
- __be32 *lrh = (__be32 *)hdr->lrh;
- __be32 *bth = (__be32 *)hdr->bth;
- __le32 *kdeth = (__le32 *)&hdr->kdeth;
-
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->req = req;
- __entry->pbc0 = pbc[0];
- __entry->pbc1 = pbc[1];
- __entry->lrh0 = be32_to_cpu(lrh[0]);
- __entry->lrh1 = be32_to_cpu(lrh[1]);
- __entry->bth0 = be32_to_cpu(bth[0]);
- __entry->bth1 = be32_to_cpu(bth[1]);
- __entry->bth2 = be32_to_cpu(bth[2]);
- __entry->kdeth0 = kdeth[0];
- __entry->kdeth1 = kdeth[1];
- __entry->kdeth2 = kdeth[2];
- __entry->kdeth3 = kdeth[3];
- __entry->kdeth4 = kdeth[4];
- __entry->kdeth5 = kdeth[5];
- __entry->kdeth6 = kdeth[6];
- __entry->kdeth7 = kdeth[7];
- __entry->kdeth8 = kdeth[8];
- __entry->tidval = tidval;
- ),
- TP_printk(USDMA_HDR_FORMAT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->req,
- __entry->pbc1,
- __entry->pbc0,
- __entry->lrh0,
- __entry->lrh1,
- __entry->bth0,
- __entry->bth1,
- __entry->bth2,
- __entry->kdeth0,
- __entry->kdeth1,
- __entry->kdeth2,
- __entry->kdeth3,
- __entry->kdeth4,
- __entry->kdeth5,
- __entry->kdeth6,
- __entry->kdeth7,
- __entry->kdeth8,
- __entry->tidval
- )
- );
-
-#define SDMA_UREQ_FMT \
- "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
-TRACE_EVENT(hfi1_sdma_user_reqinfo,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
- TP_ARGS(dd, ctxt, subctxt, i),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd);
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u8, ver_opcode)
- __field(u8, iovcnt)
- __field(u16, npkts)
- __field(u16, fragsize)
- __field(u16, comp_idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->ver_opcode = i[0] & 0xff;
- __entry->iovcnt = (i[0] >> 8) & 0xff;
- __entry->npkts = i[1];
- __entry->fragsize = i[2];
- __entry->comp_idx = i[3];
- ),
- TP_printk(SDMA_UREQ_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->ver_opcode,
- __entry->iovcnt,
- __entry->npkts,
- __entry->fragsize,
- __entry->comp_idx
- )
- );
-
-#define usdma_complete_name(st) { st, #st }
-#define show_usdma_complete_state(st) \
- __print_symbolic(st, \
- usdma_complete_name(FREE), \
- usdma_complete_name(QUEUED), \
- usdma_complete_name(COMPLETE), \
- usdma_complete_name(ERROR))
-
-TRACE_EVENT(hfi1_sdma_user_completion,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
- u8 state, int code),
- TP_ARGS(dd, ctxt, subctxt, idx, state, code),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, idx)
- __field(u8, state)
- __field(int, code)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->idx = idx;
- __entry->state = state;
- __entry->code = code;
- ),
- TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
- __get_str(dev), __entry->ctxt, __entry->subctxt,
- __entry->idx, show_usdma_complete_state(__entry->state),
- __entry->code)
- );
-
-const char *print_u32_array(struct trace_seq *, u32 *, int);
-#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
-
-TRACE_EVENT(hfi1_sdma_user_header_ahg,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
- u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
- TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, req)
- __field(u8, sde)
- __field(u8, idx)
- __field(int, len)
- __field(u32, tidval)
- __array(u32, ahg, 10)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->req = req;
- __entry->sde = sde;
- __entry->idx = ahgidx;
- __entry->len = len;
- __entry->tidval = tidval;
- memcpy(__entry->ahg, ahg, len * sizeof(u32));
- ),
- TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->req,
- __entry->sde,
- __entry->idx,
- __entry->len - 1,
- __print_u32_hex(__entry->ahg, __entry->len),
- __entry->tidval
- )
- );
-
-TRACE_EVENT(hfi1_sdma_state,
- TP_PROTO(struct sdma_engine *sde,
- const char *cstate,
- const char *nstate
- ),
- TP_ARGS(sde, cstate, nstate),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __string(curstate, cstate)
- __string(newstate, nstate)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __assign_str(curstate, cstate);
- __assign_str(newstate, nstate);
- ),
- TP_printk("[%s] current state %s new state %s",
- __get_str(dev),
- __get_str(curstate),
- __get_str(newstate)
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_rc
-
-DECLARE_EVENT_CLASS(hfi1_rc_template,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, s_flags)
- __field(u32, psn)
- __field(u32, s_psn)
- __field(u32, s_next_psn)
- __field(u32, s_sending_psn)
- __field(u32, s_sending_hpsn)
- __field(u32, r_psn)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- __entry->psn = psn;
- __entry->s_psn = qp->s_psn;
- __entry->s_next_psn = qp->s_next_psn;
- __entry->s_sending_psn = qp->s_sending_psn;
- __entry->s_sending_hpsn = qp->s_sending_hpsn;
- __entry->r_psn = qp->r_psn;
- ),
- TP_printk(
- "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->s_flags,
- __entry->psn,
- __entry->s_psn,
- __entry->s_next_psn,
- __entry->s_sending_psn,
- __entry->s_sending_hpsn,
- __entry->r_psn
- )
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_misc
-
-TRACE_EVENT(hfi1_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
- int src),
- TP_ARGS(dd, is_entry, src),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __array(char, buf, 64)
- __field(int, src)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd)
- is_entry->is_name(__entry->buf, 64,
- src - is_entry->start);
- __entry->src = src;
- ),
- TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
- __entry->src)
-);
-
-/*
- * Note:
- * This produces a REALLY ugly trace in the console output when the string is
- * too long.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_trace
-
-#define MAX_MSG_LEN 512
-
-DECLARE_EVENT_CLASS(hfi1_trace_template,
- TP_PROTO(const char *function, struct va_format *vaf),
- TP_ARGS(function, vaf),
- TP_STRUCT__entry(__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf
- (__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >=
- MAX_MSG_LEN);
- ),
- TP_printk("(%s) %s",
- __get_str(function),
- __get_str(msg))
-);
-
-/*
- * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
- * actual function to work and can not be in a macro.
- */
-#define __hfi1_trace_def(lvl) \
-void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
- \
-DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
- TP_PROTO(const char *function, struct va_format *vaf), \
- TP_ARGS(function, vaf))
-
-#define __hfi1_trace_fn(lvl) \
-void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
-{ \
- struct va_format vaf = { \
- .fmt = fmt, \
- }; \
- va_list args; \
- \
- va_start(args, fmt); \
- vaf.va = &args; \
- trace_hfi1_ ##lvl(func, &vaf); \
- va_end(args); \
- return; \
-}
-
-/*
- * To create a new trace level simply define it below and as a __hfi1_trace_fn
- * in trace.c. This will create all the hooks for calling
- * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
- * the debugfs stuff.
- */
-__hfi1_trace_def(PKT);
-__hfi1_trace_def(PROC);
-__hfi1_trace_def(SDMA);
-__hfi1_trace_def(LINKVERB);
-__hfi1_trace_def(DEBUG);
-__hfi1_trace_def(SNOOP);
-__hfi1_trace_def(CNTR);
-__hfi1_trace_def(PIO);
-__hfi1_trace_def(DC8051);
-__hfi1_trace_def(FIRMWARE);
-__hfi1_trace_def(RCVCTRL);
-__hfi1_trace_def(TID);
-__hfi1_trace_def(MMU);
-
-#define hfi1_cdbg(which, fmt, ...) \
- __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
-
-#define hfi1_dbg(fmt, ...) \
- hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
-
-/*
- * Define HFI1_EARLY_DBG at compile time or here to enable early trace
- * messages. Do not check in an enablement for this.
- */
-
-#ifdef HFI1_EARLY_DBG
-#define hfi1_dbg_early(fmt, ...) \
- trace_printk(fmt, ##__VA_ARGS__)
-#else
-#define hfi1_dbg_early(fmt, ...)
-#endif
-
-#endif /* __HFI1_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c
deleted file mode 100644
index e82e52a63..000000000
--- a/drivers/staging/rdma/hfi1/twsi.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "hfi.h"
-#include "twsi.h"
-
-/*
- * "Two Wire Serial Interface" support.
- *
- * Originally written for a not-quite-i2c serial eeprom, which is
- * still used on some supported boards. Later boards have added a
- * variety of other uses, most board-specific, so the bit-boffing
- * part has been split off to this file, while the other parts
- * have been moved to chip-specific files.
- *
- * We have also dropped all pretense of fully generic (e.g. pretend
- * we don't know whether '1' is the higher voltage) interface, as
- * the restrictions of the generic i2c interface (e.g. no access from
- * driver itself) make it unsuitable for this use.
- */
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the hfi1_ib device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip. Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct hfi1_devdata *dd, u32 target)
-{
- /*
- * implicit read of EXTStatus is as good as explicit
- * read of scratch, if all we want to do is flush
- * writes.
- */
- hfi1_gpio_mod(dd, target, 0, 0, 0);
- rmb(); /* inlined, so prevent compiler reordering */
-}
-
-/*
- * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
- * for "almost compliant" modules
- */
-#define SCL_WAIT_USEC 1000
-
-/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
- * Should be 20, but some chips need more.
- */
-#define TWSI_BUF_WAIT_USEC 60
-
-static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit)
-{
- u32 mask;
-
- udelay(1);
-
- mask = QSFP_HFI0_I2CCLK;
-
- /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask);
-
- /*
- * Allow for slow slaves by simple
- * delay for falling edge, sampling on rise.
- */
- if (!bit) {
- udelay(2);
- } else {
- int rise_usec;
-
- for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
- if (mask & hfi1_gpio_mod(dd, target, 0, 0, 0))
- break;
- udelay(2);
- }
- if (rise_usec <= 0)
- dd_dev_err(dd, "SCL interface stuck low > %d uSec\n",
- SCL_WAIT_USEC);
- }
- i2c_wait_for_writes(dd, target);
-}
-
-static u8 scl_in(struct hfi1_devdata *dd, u32 target, int wait)
-{
- u32 read_val, mask;
-
- mask = QSFP_HFI0_I2CCLK;
- /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, 0, mask);
- read_val = hfi1_gpio_mod(dd, target, 0, 0, 0);
- if (wait)
- i2c_wait_for_writes(dd, target);
- return (read_val & mask) >> GPIO_SCL_NUM;
-}
-
-static void sda_out(struct hfi1_devdata *dd, u32 target, u8 bit)
-{
- u32 mask;
-
- mask = QSFP_HFI0_I2CDAT;
-
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, bit ? 0 : mask, mask);
-
- i2c_wait_for_writes(dd, target);
- udelay(2);
-}
-
-static u8 sda_in(struct hfi1_devdata *dd, u32 target, int wait)
-{
- u32 read_val, mask;
-
- mask = QSFP_HFI0_I2CDAT;
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- hfi1_gpio_mod(dd, target, 0, 0, mask);
- read_val = hfi1_gpio_mod(dd, target, 0, 0, 0);
- if (wait)
- i2c_wait_for_writes(dd, target);
- return (read_val & mask) >> GPIO_SDA_NUM;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the hfi1_ib device
- */
-static int i2c_ackrcv(struct hfi1_devdata *dd, u32 target)
-{
- u8 ack_received;
-
- /* AT ENTRY SCL = LOW */
- /* change direction, ignore data */
- ack_received = sda_in(dd, target, 1);
- scl_out(dd, target, 1);
- ack_received = sda_in(dd, target, 1) == 0;
- scl_out(dd, target, 0);
- return ack_received;
-}
-
-static void stop_cmd(struct hfi1_devdata *dd, u32 target);
-
-/**
- * rd_byte - read a byte, sending STOP on last, else ACK
- * @dd: the hfi1_ib device
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct hfi1_devdata *dd, u32 target, int last)
-{
- int bit_cntr, data;
-
- data = 0;
-
- for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
- data <<= 1;
- scl_out(dd, target, 1);
- data |= sda_in(dd, target, 0);
- scl_out(dd, target, 0);
- }
- if (last) {
- scl_out(dd, target, 1);
- stop_cmd(dd, target);
- } else {
- sda_out(dd, target, 0);
- scl_out(dd, target, 1);
- scl_out(dd, target, 0);
- sda_out(dd, target, 1);
- }
- return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the hfi1_ib device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct hfi1_devdata *dd, u32 target, u8 data)
-{
- int bit_cntr;
- u8 bit;
-
- for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
- bit = (data >> bit_cntr) & 1;
- sda_out(dd, target, bit);
- scl_out(dd, target, 1);
- scl_out(dd, target, 0);
- }
- return (!i2c_ackrcv(dd, target)) ? 1 : 0;
-}
-
-/*
- * issue TWSI start sequence:
- * (both clock/data high, clock high, data low while clock is high)
- */
-static void start_seq(struct hfi1_devdata *dd, u32 target)
-{
- sda_out(dd, target, 1);
- scl_out(dd, target, 1);
- sda_out(dd, target, 0);
- udelay(1);
- scl_out(dd, target, 0);
-}
-
-/**
- * stop_seq - transmit the stop sequence
- * @dd: the hfi1_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_seq(struct hfi1_devdata *dd, u32 target)
-{
- scl_out(dd, target, 0);
- sda_out(dd, target, 0);
- scl_out(dd, target, 1);
- sda_out(dd, target, 1);
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the hfi1_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct hfi1_devdata *dd, u32 target)
-{
- stop_seq(dd, target);
- udelay(TWSI_BUF_WAIT_USEC);
-}
-
-/**
- * hfi1_twsi_reset - reset I2C communication
- * @dd: the hfi1_ib device
- * returns 0 if ok, -EIO on error
- */
-int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target)
-{
- int clock_cycles_left = 9;
- u32 mask;
-
- /* Both SCL and SDA should be high. If not, there
- * is something wrong.
- */
- mask = QSFP_HFI0_I2CCLK | QSFP_HFI0_I2CDAT;
-
- /*
- * Force pins to desired innocuous state.
- * This is the default power-on state with out=0 and dir=0,
- * So tri-stated and should be floating high (barring HW problems)
- */
- hfi1_gpio_mod(dd, target, 0, 0, mask);
-
- /* Check if SCL is low, if it is low then we have a slave device
- * misbehaving and there is not much we can do.
- */
- if (!scl_in(dd, target, 0))
- return -EIO;
-
- /* Check if SDA is low, if it is low then we have to clock SDA
- * up to 9 times for the device to release the bus
- */
- while (clock_cycles_left--) {
- if (sda_in(dd, target, 0))
- return 0;
- scl_out(dd, target, 0);
- scl_out(dd, target, 1);
- }
-
- return -EIO;
-}
-
-#define HFI1_TWSI_START 0x100
-#define HFI1_TWSI_STOP 0x200
-
-/* Write byte to TWSI, optionally prefixed with START or suffixed with
- * STOP.
- * returns 0 if OK (ACK received), else != 0
- */
-static int twsi_wr(struct hfi1_devdata *dd, u32 target, int data, int flags)
-{
- int ret = 1;
-
- if (flags & HFI1_TWSI_START)
- start_seq(dd, target);
-
- /* Leaves SCL low (from i2c_ackrcv()) */
- ret = wr_byte(dd, target, data);
-
- if (flags & HFI1_TWSI_STOP)
- stop_cmd(dd, target);
- return ret;
-}
-
-/* Added functionality for IBA7220-based cards */
-#define HFI1_TEMP_DEV 0x98
-
-/*
- * hfi1_twsi_blk_rd
- * General interface for data transfer from twsi devices.
- * One vestige of its former role is that it recognizes a device
- * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a N-byte "address" which selects
- * the "register" or "offset" within the device from which data should
- * be read.
- */
-int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- void *buffer, int len)
-{
- u8 *bp = buffer;
- int ret = 1;
- int i;
- int offset_size;
-
- /* obtain the offset size, strip it from the device address */
- offset_size = (dev >> 8) & 0xff;
- dev &= 0xff;
-
- /* allow at most a 2 byte offset */
- if (offset_size > 2)
- goto bail;
-
- if (dev == HFI1_TWSI_NO_DEV) {
- /* legacy not-really-I2C */
- addr = (addr << 1) | READ_CMD;
- ret = twsi_wr(dd, target, addr, HFI1_TWSI_START);
- } else {
- /* Actual I2C */
- if (offset_size) {
- ret = twsi_wr(dd, target,
- dev | WRITE_CMD, HFI1_TWSI_START);
- if (ret) {
- stop_cmd(dd, target);
- goto bail;
- }
-
- for (i = 0; i < offset_size; i++) {
- ret = twsi_wr(dd, target,
- (addr >> (i * 8)) & 0xff, 0);
- udelay(TWSI_BUF_WAIT_USEC);
- if (ret) {
- dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n",
- i, addr);
- goto bail;
- }
- }
- }
- ret = twsi_wr(dd, target, dev | READ_CMD, HFI1_TWSI_START);
- }
- if (ret) {
- stop_cmd(dd, target);
- goto bail;
- }
-
- /*
- * block devices keeps clocking data out as long as we ack,
- * automatically incrementing the address. Some have "pages"
- * whose boundaries will not be crossed, but the handling
- * of these is left to the caller, who is in a better
- * position to know.
- */
- while (len-- > 0) {
- /*
- * Get and store data, sending ACK if length remaining,
- * else STOP
- */
- *bp++ = rd_byte(dd, target, !len);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * hfi1_twsi_blk_wr
- * General interface for data transfer to twsi devices.
- * One vestige of its former role is that it recognizes a device
- * HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a N-byte "address" which selects
- * the "register" or "offset" within the device to which data should
- * be written.
- */
-int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- const void *buffer, int len)
-{
- const u8 *bp = buffer;
- int ret = 1;
- int i;
- int offset_size;
-
- /* obtain the offset size, strip it from the device address */
- offset_size = (dev >> 8) & 0xff;
- dev &= 0xff;
-
- /* allow at most a 2 byte offset */
- if (offset_size > 2)
- goto bail;
-
- if (dev == HFI1_TWSI_NO_DEV) {
- if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD,
- HFI1_TWSI_START)) {
- goto failed_write;
- }
- } else {
- /* Real I2C */
- if (twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START))
- goto failed_write;
- }
-
- for (i = 0; i < offset_size; i++) {
- ret = twsi_wr(dd, target, (addr >> (i * 8)) & 0xff, 0);
- udelay(TWSI_BUF_WAIT_USEC);
- if (ret) {
- dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n",
- i, addr);
- goto bail;
- }
- }
-
- for (i = 0; i < len; i++)
- if (twsi_wr(dd, target, *bp++, 0))
- goto failed_write;
-
- ret = 0;
-
-failed_write:
- stop_cmd(dd, target);
-
-bail:
- return ret;
-}
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/staging/rdma/hfi1/twsi.h
deleted file mode 100644
index 5b8a5b5e7..000000000
--- a/drivers/staging/rdma/hfi1/twsi.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef _TWSI_H
-#define _TWSI_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#define HFI1_TWSI_NO_DEV 0xFF
-
-struct hfi1_devdata;
-
-/* Bit position of SDA/SCL pins in ASIC_QSFP* registers */
-#define GPIO_SDA_NUM 1
-#define GPIO_SCL_NUM 0
-
-/* these functions must be called with qsfp_lock held */
-int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target);
-int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- void *buffer, int len);
-int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
- const void *buffer, int len);
-
-#endif /* _TWSI_H */
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c
deleted file mode 100644
index df773d433..000000000
--- a/drivers/staging/rdma/hfi1/uc.c
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "hfi.h"
-#include "verbs_txreq.h"
-#include "qp.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/* only opcode mask for adaptive pio */
-const u32 uc_only_opcode =
- BIT(OP(SEND_ONLY) & 0x1f) |
- BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
- BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f));
-
-/**
- * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
- * @qp: a pointer to the QP
- *
- * Assume s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_other_headers *ohdr;
- struct rvt_swqe *wqe;
- u32 hwords = 5;
- u32 bth0 = 0;
- u32 len;
- u32 pmtu = qp->pmtu;
- int middle = 0;
-
- ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
- goto bail_no_tx;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (iowait_sdma_pending(&priv->s_iowait)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- clear_ahg(qp);
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done_free_tx;
- }
-
- ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
-
- /* Get the next send request. */
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- qp->s_wqe = NULL;
- switch (qp->s_state) {
- default:
- if (!(ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_NEXT_SEND_OK))
- goto bail;
- /* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
- clear_ahg(qp);
- goto bail;
- }
- /*
- * Start a new request.
- */
- qp->s_psn = wqe->psn;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- len = wqe->length;
- qp->s_len = len;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- if (len > pmtu) {
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND) {
- qp->s_state = OP(SEND_ONLY);
- } else {
- qp->s_state =
- OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / 4;
- if (len > pmtu) {
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
- qp->s_state = OP(RDMA_WRITE_ONLY);
- } else {
- qp->s_state =
- OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the RETH */
- ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- break;
-
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- middle = HFI1_CAP_IS_KSET(SDMA_AHG);
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND) {
- qp->s_state = OP(SEND_LAST);
- } else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- middle = HFI1_CAP_IS_KSET(SDMA_AHG);
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
- qp->s_state = OP(RDMA_WRITE_LAST);
- } else {
- qp->s_state =
- OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- ps->s_txreq->sde = priv->s_sde;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_cur_size = len;
- hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- mask_psn(qp->s_psn++), middle, ps);
- /* pbc */
- ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
- return 1;
-
-done_free_tx:
- hfi1_put_txreq(ps->s_txreq);
- ps->s_txreq = NULL;
- return 1;
-
-bail:
- hfi1_put_txreq(ps->s_txreq);
-
-bail_no_tx:
- ps->s_txreq = NULL;
- qp->s_flags &= ~RVT_S_BUSY;
- qp->s_hdrwords = 0;
- return 0;
-}
-
-/**
- * hfi1_uc_rcv - handle an incoming UC packet
- * @ibp: the port the packet came in on
- * @hdr: the header of the packet
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
- *
- * This is called from qp_rcv() to process an incoming UC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void hfi1_uc_rcv(struct hfi1_packet *packet)
-{
- struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
- struct hfi1_ib_header *hdr = packet->hdr;
- u32 rcv_flags = packet->rcv_flags;
- void *data = packet->ebuf;
- u32 tlen = packet->tlen;
- struct rvt_qp *qp = packet->qp;
- struct hfi1_other_headers *ohdr = packet->ohdr;
- u32 bth0, opcode;
- u32 hdrsize = packet->hlen;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = qp->pmtu;
- struct ib_reth *reth;
- int has_grh = rcv_flags & HFI1_HAS_GRH;
- int ret;
- u32 bth1;
-
- bth0 = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
- return;
-
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
- if (bth1 & HFI1_BECN_SMASK) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 rqpn, lqpn;
- u16 rlid = be16_to_cpu(hdr->lrh[3]);
- u8 sl, sc5;
-
- lqpn = bth1 & RVT_QPN_MASK;
- rqpn = qp->remote_qpn;
-
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- sl = ibp->sc_to_sl[sc5];
-
- process_becn(ppd, sl, rlid, lqpn, rqpn,
- IB_CC_SVCTYPE_UC);
- }
-
- if (bth1 & HFI1_FECN_SMASK) {
- struct ib_grh *grh = NULL;
- u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
- u16 slid = be16_to_cpu(hdr->lrh[3]);
- u16 dlid = be16_to_cpu(hdr->lrh[1]);
- u32 src_qp = qp->remote_qpn;
- u8 sc5;
-
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- if (has_grh)
- grh = &hdr->u.l.grh;
-
- return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5,
- grh);
- }
- }
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode = (bth0 >> 24) & 0xff;
-
- /* Compare the PSN verses the expected PSN. */
- if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
- /*
- * Handle a sequence error.
- * Silently drop any current message.
- */
- qp->r_psn = psn;
-inv:
- if (qp->r_state == OP(SEND_FIRST) ||
- qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
- qp->r_sge.num_sge = 0;
- } else {
- rvt_put_ss(&qp->r_sge);
- }
- qp->r_state = OP(SEND_LAST);
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- goto send_first;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- goto rdma_first;
-
- default:
- goto drop;
- }
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- default:
- if (opcode == OP(SEND_FIRST) ||
- opcode == OP(SEND_ONLY) ||
- opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_FIRST) ||
- opcode == OP(RDMA_WRITE_ONLY) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- break;
- goto inv;
- }
-
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
- qp_comm_est(qp);
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
-send_first:
- if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
- qp->r_sge = qp->s_rdma_read_sge;
- } else {
- ret = hfi1_rvt_get_rwqe(qp, 0);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto drop;
- /*
- * qp->s_rdma_read_sge will be the owner
- * of the mr references.
- */
- qp->s_rdma_read_sge = qp->r_sge;
- }
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto no_immediate_data;
- else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
- goto send_last_imm;
- /* FALLTHROUGH */
- case OP(SEND_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto rewind;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto rewind;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, 0, 0);
- break;
-
- case OP(SEND_LAST_WITH_IMMEDIATE):
-send_last_imm:
- wc.ex.imm_data = ohdr->u.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
- case OP(SEND_LAST):
-no_immediate_data:
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
-send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto rewind;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto rewind;
- wc.opcode = IB_WC_RECV;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 0, 0);
- rvt_put_ss(&qp->s_rdma_read_sge);
-last_imm:
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = qp->remote_ah_attr.dlid;
- /*
- * It seems that IB mandates the presence of an SL in a
- * work completion only for the UD transport (see section
- * 11.4.2 of IBTA Vol. 1).
- *
- * However, the way the SL is chosen below is consistent
- * with the way that IB/qib works and is trying avoid
- * introducing incompatibilities.
- *
- * See also OPA Vol. 1, section 9.7.6, and table 9-17.
- */
- wc.sl = qp->remote_ah_attr.sl;
- /* zero fields that are N/A */
- wc.vendor_err = 0;
- wc.pkey_index = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(IB_BTH_SOLICITED)) != 0);
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
-rdma_first:
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE))) {
- goto drop;
- }
- reth = &ohdr->u.rc.reth;
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- qp->r_sge.sg_list = NULL;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey */
- ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
- vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto drop;
- qp->r_sge.num_sge = 1;
- } else {
- qp->r_sge.num_sge = 0;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_ONLY)) {
- goto rdma_last;
- } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
- wc.ex.imm_data = ohdr->u.rc.imm_data;
- goto rdma_last_imm;
- }
- /* FALLTHROUGH */
- case OP(RDMA_WRITE_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto drop;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto drop;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- wc.ex.imm_data = ohdr->u.imm_data;
-rdma_last_imm:
- wc.wc_flags = IB_WC_WITH_IMM;
-
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
- goto drop;
- if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
- rvt_put_ss(&qp->s_rdma_read_sge);
- } else {
- ret = hfi1_rvt_get_rwqe(qp, 1);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto drop;
- }
- wc.byte_len = qp->r_len;
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
- rvt_put_ss(&qp->r_sge);
- goto last_imm;
-
- case OP(RDMA_WRITE_LAST):
-rdma_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
- goto drop;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
- rvt_put_ss(&qp->r_sge);
- break;
-
- default:
- /* Drop packet for unknown opcodes. */
- goto drop;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- return;
-
-rewind:
- set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
- qp->r_sge.num_sge = 0;
-drop:
- ibp->rvp.n_pkt_drops++;
- return;
-
-op_err:
- hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-}
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
deleted file mode 100644
index ae8a70f70..000000000
--- a/drivers/staging/rdma/hfi1/ud.c
+++ /dev/null
@@ -1,911 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/net.h>
-#include <rdma/ib_smi.h>
-
-#include "hfi.h"
-#include "mad.h"
-#include "verbs_txreq.h"
-#include "qp.h"
-
-/**
- * ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from hfi1_make_ud_req() to forward a WQE addressed
- * to the same HFI.
- * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
- * while this is being called.
- */
-static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
-{
- struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct hfi1_pportdata *ppd;
- struct rvt_qp *qp;
- struct ib_ah_attr *ah_attr;
- unsigned long flags;
- struct rvt_sge_state ssge;
- struct rvt_sge *sge;
- struct ib_wc wc;
- u32 length;
- enum ib_qp_type sqptype, dqptype;
-
- rcu_read_lock();
-
- qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
- swqe->ud_wr.remote_qpn);
- if (!qp) {
- ibp->rvp.n_pkt_drops++;
- rcu_read_unlock();
- return;
- }
-
- sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : sqp->ibqp.qp_type;
- dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : qp->ibqp.qp_type;
-
- if (dqptype != sqptype ||
- !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- goto drop;
- }
-
- ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
- ppd = ppd_from_ibp(ibp);
-
- if (qp->ibqp.qp_num > 1) {
- u16 pkey;
- u16 slid;
- u8 sc5 = ibp->sl_to_sc[ah_attr->sl];
-
- pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
- slid = ppd->lid | (ah_attr->src_path_bits &
- ((1 << ppd->lmc) - 1));
- if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
- qp->s_pkey_index, slid))) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey,
- ah_attr->sl,
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- slid, ah_attr->dlid);
- goto drop;
- }
- }
-
- /*
- * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- if (qp->ibqp.qp_num) {
- u32 qkey;
-
- qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
- sqp->qkey : swqe->ud_wr.remote_qkey;
- if (unlikely(qkey != qp->qkey)) {
- u16 lid;
-
- lid = ppd->lid | (ah_attr->src_path_bits &
- ((1 << ppd->lmc) - 1));
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
- ah_attr->sl,
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- lid,
- ah_attr->dlid);
- goto drop;
- }
- }
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- length = swqe->length;
- memset(&wc, 0, sizeof(wc));
- wc.byte_len = length + sizeof(struct ib_grh);
-
- if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = swqe->wr.ex.imm_data;
- }
-
- spin_lock_irqsave(&qp->r_lock, flags);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & RVT_R_REUSE_SGE) {
- qp->r_flags &= ~RVT_R_REUSE_SGE;
- } else {
- int ret;
-
- ret = hfi1_rvt_get_rwqe(qp, 0);
- if (ret < 0) {
- hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto bail_unlock;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->rvp.n_vl15_dropped++;
- goto bail_unlock;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= RVT_R_REUSE_SGE;
- ibp->rvp.n_pkt_drops++;
- goto bail_unlock;
- }
-
- if (ah_attr->ah_flags & IB_AH_GRH) {
- hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
- sizeof(struct ib_grh), 1, 0);
- wc.wc_flags |= IB_WC_GRH;
- } else {
- hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
- }
- ssge.sg_list = swqe->sg_list + 1;
- ssge.sge = *swqe->sg_list;
- ssge.num_sge = swqe->wr.num_sge;
- sge = &ssge.sge;
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1, 0);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ssge.num_sge)
- *sge = *ssge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
- rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto bail_unlock;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = sqp->ibqp.qp_num;
- if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
- if (sqp->ibqp.qp_type == IB_QPT_GSI ||
- sqp->ibqp.qp_type == IB_QPT_SMI)
- wc.pkey_index = swqe->ud_wr.pkey_index;
- else
- wc.pkey_index = sqp->s_pkey_index;
- } else {
- wc.pkey_index = 0;
- }
- wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
- /* Check for loopback when the port lid is not set */
- if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
- wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
- wc.sl = ah_attr->sl;
- wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- swqe->wr.send_flags & IB_SEND_SOLICITED);
- ibp->rvp.n_loop_pkts++;
-bail_unlock:
- spin_unlock_irqrestore(&qp->r_lock, flags);
-drop:
- rcu_read_unlock();
-}
-
-/**
- * hfi1_make_ud_req - construct a UD request packet
- * @qp: the QP
- *
- * Assume s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_other_headers *ohdr;
- struct ib_ah_attr *ah_attr;
- struct hfi1_pportdata *ppd;
- struct hfi1_ibport *ibp;
- struct rvt_swqe *wqe;
- u32 nwords;
- u32 extra_bytes;
- u32 bth0;
- u16 lrh0;
- u16 lid;
- int next_cur;
- u8 sc5;
-
- ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
- goto bail_no_tx;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (iowait_sdma_pending(&priv->s_iowait)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done_free_tx;
- }
-
- /* see post_one_send() */
- smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
- goto bail;
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- next_cur = qp->s_cur + 1;
- if (next_cur >= qp->s_size)
- next_cur = 0;
-
- /* Construct the header. */
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ppd = ppd_from_ibp(ibp);
- ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
- if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
- ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
- lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
- if (unlikely(!loopback &&
- (lid == ppd->lid ||
- (lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
- qp->ibqp.qp_type == IB_QPT_GSI)))) {
- unsigned long flags;
- /*
- * If DMAs are in progress, we can't generate
- * a completion for the loopback packet since
- * it would be out of order.
- * Instead of waiting, we could queue a
- * zero length descriptor so we get a callback.
- */
- if (iowait_sdma_pending(&priv->s_iowait)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- qp->s_cur = next_cur;
- local_irq_save(flags);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- ud_loopback(qp, wqe);
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
- goto done_free_tx;
- }
- }
-
- qp->s_cur = next_cur;
- extra_bytes = -wqe->length & 3;
- nwords = (wqe->length + extra_bytes) >> 2;
-
- /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
- qp->s_hdrwords = 7;
- qp->s_cur_size = wqe->length;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_srate = ah_attr->static_rate;
- qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
- qp->s_wqe = wqe;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
-
- if (ah_attr->ah_flags & IB_AH_GRH) {
- /* Header size in 32-bit words. */
- qp->s_hdrwords += hfi1_make_grh(ibp,
- &ps->s_txreq->phdr.hdr.u.l.grh,
- &ah_attr->grh,
- qp->s_hdrwords, nwords);
- lrh0 = HFI1_LRH_GRH;
- ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
- /*
- * Don't worry about sending to locally attached multicast
- * QPs. It is unspecified by the spec. what happens.
- */
- } else {
- /* Header size in 32-bit words. */
- lrh0 = HFI1_LRH_BTH;
- ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- }
- if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- qp->s_hdrwords++;
- ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
- bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- } else {
- bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
- }
- sc5 = ibp->sl_to_sc[ah_attr->sl];
- lrh0 |= (ah_attr->sl & 0xf) << 4;
- if (qp->ibqp.qp_type == IB_QPT_SMI) {
- lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
- priv->s_sc = 0xf;
- } else {
- lrh0 |= (sc5 & 0xf) << 12;
- priv->s_sc = sc5;
- }
- priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
- ps->s_txreq->sde = priv->s_sde;
- priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
- ps->s_txreq->psc = priv->s_sendcontext;
- ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
- ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);
- ps->s_txreq->phdr.hdr.lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
- ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
- } else {
- lid = ppd->lid;
- if (lid) {
- lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
- ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(lid);
- } else {
- ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
- }
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth0 |= extra_bytes << 20;
- if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
- bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
- else
- bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
- /*
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
- qp->qkey : wqe->ud_wr.remote_qkey);
- ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
- /* disarm any ahg */
- priv->s_hdr->ahgcount = 0;
- priv->s_hdr->ahgidx = 0;
- priv->s_hdr->tx_flags = 0;
- priv->s_hdr->sde = NULL;
- /* pbc */
- ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
-
- return 1;
-
-done_free_tx:
- hfi1_put_txreq(ps->s_txreq);
- ps->s_txreq = NULL;
- return 1;
-
-bail:
- hfi1_put_txreq(ps->s_txreq);
-
-bail_no_tx:
- ps->s_txreq = NULL;
- qp->s_flags &= ~RVT_S_BUSY;
- qp->s_hdrwords = 0;
- return 0;
-}
-
-/*
- * Hardware can't check this so we do it here.
- *
- * This is a slightly different algorithm than the standard pkey check. It
- * special cases the management keys and allows for 0x7fff and 0xffff to be in
- * the table at the same time.
- *
- * @returns the index found or -1 if not found
- */
-int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
-{
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned i;
-
- if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
- unsigned lim_idx = -1;
-
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
- /* here we look for an exact match */
- if (ppd->pkeys[i] == pkey)
- return i;
- if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
- lim_idx = i;
- }
-
- /* did not find 0xffff return 0x7fff idx if found */
- if (pkey == FULL_MGMT_P_KEY)
- return lim_idx;
-
- /* no match... */
- return -1;
- }
-
- pkey &= 0x7fff; /* remove limited/full membership bit */
-
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
- if ((ppd->pkeys[i] & 0x7fff) == pkey)
- return i;
-
- /*
- * Should not get here, this means hardware failed to validate pkeys.
- */
- return -1;
-}
-
-void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
- u32 pkey, u32 slid, u32 dlid, u8 sc5,
- const struct ib_grh *old_grh)
-{
- u64 pbc, pbc_flags = 0;
- u32 bth0, plen, vl, hwords = 5;
- u16 lrh0;
- u8 sl = ibp->sc_to_sl[sc5];
- struct hfi1_ib_header hdr;
- struct hfi1_other_headers *ohdr;
- struct pio_buf *pbuf;
- struct send_context *ctxt = qp_to_send_context(qp, sc5);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- if (old_grh) {
- struct ib_grh *grh = &hdr.u.l.grh;
-
- grh->version_tclass_flow = old_grh->version_tclass_flow;
- grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2);
- grh->hop_limit = 0xff;
- grh->sgid = old_grh->dgid;
- grh->dgid = old_grh->sgid;
- ohdr = &hdr.u.l.oth;
- lrh0 = HFI1_LRH_GRH;
- hwords += sizeof(struct ib_grh) / sizeof(u32);
- } else {
- ohdr = &hdr.u.oth;
- lrh0 = HFI1_LRH_BTH;
- }
-
- lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
-
- bth0 = pkey | (IB_OPCODE_CNP << 24);
- ohdr->bth[0] = cpu_to_be32(bth0);
-
- ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << HFI1_BECN_SHIFT));
- ohdr->bth[2] = 0; /* PSN 0 */
-
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(dlid);
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(slid);
-
- plen = 2 /* PBC */ + hwords;
- pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
- vl = sc_to_vlt(ppd->dd, sc5);
- pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
- if (ctxt) {
- pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
- if (pbuf)
- ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
- &hdr, hwords);
- }
-}
-
-/*
- * opa_smp_check() - Do the regular pkey checking, and the additional
- * checks for SMPs specified in OPAv1 rev 0.90, section 9.10.26
- * ("SMA Packet Checks").
- *
- * Note that:
- * - Checks are done using the pkey directly from the packet's BTH,
- * and specifically _not_ the pkey that we attach to the completion,
- * which may be different.
- * - These checks are specifically for "non-local" SMPs (i.e., SMPs
- * which originated on another node). SMPs which are sent from, and
- * destined to this node are checked in opa_local_smp_check().
- *
- * At the point where opa_smp_check() is called, we know:
- * - destination QP is QP0
- *
- * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
- */
-static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
- struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
-{
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- /*
- * I don't think it's possible for us to get here with sc != 0xf,
- * but check it to be certain.
- */
- if (sc5 != 0xf)
- return 1;
-
- if (rcv_pkey_check(ppd, pkey, sc5, slid))
- return 1;
-
- /*
- * At this point we know (and so don't need to check again) that
- * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
- * (see ingress_pkey_check).
- */
- if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
- smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
- ingress_pkey_table_fail(ppd, pkey, slid);
- return 1;
- }
-
- /*
- * SMPs fall into one of four (disjoint) categories:
- * SMA request, SMA response, trap, or trap repress.
- * Our response depends, in part, on which type of
- * SMP we're processing.
- *
- * If this is not an SMA request, or trap repress:
- * - accept MAD if the port is running an SM
- * - pkey == FULL_MGMT_P_KEY =>
- * reply with unsupported method (i.e., just mark
- * the smp's status field here, and let it be
- * processed normally)
- * - pkey != LIM_MGMT_P_KEY =>
- * increment port recv constraint errors, drop MAD
- * If this is an SMA request or trap repress:
- * - pkey != FULL_MGMT_P_KEY =>
- * increment port recv constraint errors, drop MAD
- */
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- case IB_MGMT_METHOD_SET:
- case IB_MGMT_METHOD_REPORT:
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (pkey != FULL_MGMT_P_KEY) {
- ingress_pkey_table_fail(ppd, pkey, slid);
- return 1;
- }
- break;
- case IB_MGMT_METHOD_SEND:
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_GET_RESP:
- case IB_MGMT_METHOD_REPORT_RESP:
- if (ibp->rvp.port_cap_flags & IB_PORT_SM)
- return 0;
- if (pkey == FULL_MGMT_P_KEY) {
- smp->status |= IB_SMP_UNSUP_METHOD;
- return 0;
- }
- if (pkey != LIM_MGMT_P_KEY) {
- ingress_pkey_table_fail(ppd, pkey, slid);
- return 1;
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-/**
- * hfi1_ud_rcv - receive an incoming UD packet
- * @ibp: the port the packet came in on
- * @hdr: the packet header
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void hfi1_ud_rcv(struct hfi1_packet *packet)
-{
- struct hfi1_other_headers *ohdr = packet->ohdr;
- int opcode;
- u32 hdrsize = packet->hlen;
- u32 pad;
- struct ib_wc wc;
- u32 qkey;
- u32 src_qp;
- u16 dlid, pkey;
- int mgmt_pkey_idx = -1;
- struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
- struct hfi1_ib_header *hdr = packet->hdr;
- u32 rcv_flags = packet->rcv_flags;
- void *data = packet->ebuf;
- u32 tlen = packet->tlen;
- struct rvt_qp *qp = packet->qp;
- bool has_grh = rcv_flags & HFI1_HAS_GRH;
- bool sc4_bit = has_sc4_bit(packet);
- u8 sc;
- u32 bth1;
- int is_mcast;
- struct ib_grh *grh = NULL;
-
- qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
- dlid = be16_to_cpu(hdr->lrh[1]);
- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & HFI1_BECN_SMASK)) {
- /*
- * In pre-B0 h/w the CNP_OPCODE is handled via an
- * error path.
- */
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- u8 sl, sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
- sl = ibp->sc_to_sl[sc5];
-
- process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
- }
-
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- opcode &= 0xff;
-
- pkey = (u16)be32_to_cpu(ohdr->bth[0]);
-
- if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
- u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
-
- return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
- }
- /*
- * Get the number of bytes the message was padded by
- * and drop incomplete packets.
- */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
-
- tlen -= hdrsize + pad + 4;
-
- /*
- * Check that the permissive LID is only used on QP0
- * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
- */
- if (qp->ibqp.qp_num) {
- if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE))
- goto drop;
- if (qp->ibqp.qp_num > 1) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid;
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
-
- slid = be16_to_cpu(hdr->lrh[3]);
- if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
- /*
- * Traps will not be sent for packets dropped
- * by the HW. This is fine, as sending trap
- * for invalid pkeys is optional according to
- * IB spec (release 1.3, section 10.9.4)
- */
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- pkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) &
- 0xF,
- src_qp, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
- return;
- }
- } else {
- /* GSI packet */
- mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
- if (mgmt_pkey_idx < 0)
- goto drop;
- }
- if (unlikely(qkey != qp->qkey)) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- src_qp, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
- return;
- }
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (unlikely(qp->ibqp.qp_num == 1 &&
- (tlen > 2048 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
- goto drop;
- } else {
- /* Received on QP0, and so by definition, this is an SMP */
- struct opa_smp *smp = (struct opa_smp *)data;
- u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
-
- if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
- goto drop;
-
- if (tlen > 2048)
- goto drop;
- if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE) &&
- smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- goto drop;
-
- /* look up SMI pkey */
- mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
- if (mgmt_pkey_idx < 0)
- goto drop;
- }
-
- if (qp->ibqp.qp_num > 1 &&
- opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
- wc.ex.imm_data = ohdr->u.ud.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
- } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
- } else {
- goto drop;
- }
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- wc.byte_len = tlen + sizeof(struct ib_grh);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & RVT_R_REUSE_SGE) {
- qp->r_flags &= ~RVT_R_REUSE_SGE;
- } else {
- int ret;
-
- ret = hfi1_rvt_get_rwqe(qp, 0);
- if (ret < 0) {
- hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->rvp.n_vl15_dropped++;
- return;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= RVT_R_REUSE_SGE;
- goto drop;
- }
- if (has_grh) {
- hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), 1, 0);
- wc.wc_flags |= IB_WC_GRH;
- } else {
- hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
- }
- hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
- 1, 0);
- rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- return;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
- wc.qp = &qp->ibqp;
- wc.src_qp = src_qp;
-
- if (qp->ibqp.qp_type == IB_QPT_GSI ||
- qp->ibqp.qp_type == IB_QPT_SMI) {
- if (mgmt_pkey_idx < 0) {
- if (net_ratelimit()) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = ppd->dd;
-
- dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
- qp->ibqp.qp_type);
- mgmt_pkey_idx = 0;
- }
- }
- wc.pkey_index = (unsigned)mgmt_pkey_idx;
- } else {
- wc.pkey_index = 0;
- }
-
- wc.slid = be16_to_cpu(hdr->lrh[3]);
- sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc |= sc4_bit;
- wc.sl = ibp->sc_to_sl[sc];
-
- /*
- * Save the LMC lower bits if the destination LID is a unicast LID.
- */
- wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
- dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(IB_BTH_SOLICITED)) != 0);
- return;
-
-drop:
- ibp->rvp.n_pkt_drops++;
-}
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
deleted file mode 100644
index 8bd56d5c7..000000000
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <asm/page.h>
-
-#include "user_exp_rcv.h"
-#include "trace.h"
-#include "mmu_rb.h"
-
-struct tid_group {
- struct list_head list;
- unsigned base;
- u8 size;
- u8 used;
- u8 map;
-};
-
-struct tid_rb_node {
- struct mmu_rb_node mmu;
- unsigned long phys;
- struct tid_group *grp;
- u32 rcventry;
- dma_addr_t dma_addr;
- bool freed;
- unsigned npages;
- struct page *pages[0];
-};
-
-struct tid_pageset {
- u16 idx;
- u16 count;
-};
-
-#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
-
-#define num_user_pages(vaddr, len) \
- (1 + (((((unsigned long)(vaddr) + \
- (unsigned long)(len) - 1) & PAGE_MASK) - \
- ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
-
-static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *,
- struct rb_root *);
-static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
-static int set_rcvarray_entry(struct file *, unsigned long, u32,
- struct tid_group *, struct page **, unsigned);
-static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
-static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
-static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
- struct tid_pageset *, unsigned, u16, struct page **,
- u32 *, unsigned *, unsigned *);
-static int unprogram_rcvarray(struct file *, u32, struct tid_group **);
-static void clear_tid_node(struct hfi1_filedata *, u16, struct tid_rb_node *);
-
-static struct mmu_rb_ops tid_rb_ops = {
- .insert = mmu_rb_insert,
- .remove = mmu_rb_remove,
- .invalidate = mmu_rb_invalidate
-};
-
-static inline u32 rcventry2tidinfo(u32 rcventry)
-{
- u32 pair = rcventry & ~0x1;
-
- return EXP_TID_SET(IDX, pair >> 1) |
- EXP_TID_SET(CTRL, 1 << (rcventry - pair));
-}
-
-static inline void exp_tid_group_init(struct exp_tid_set *set)
-{
- INIT_LIST_HEAD(&set->list);
- set->count = 0;
-}
-
-static inline void tid_group_remove(struct tid_group *grp,
- struct exp_tid_set *set)
-{
- list_del_init(&grp->list);
- set->count--;
-}
-
-static inline void tid_group_add_tail(struct tid_group *grp,
- struct exp_tid_set *set)
-{
- list_add_tail(&grp->list, &set->list);
- set->count++;
-}
-
-static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
-{
- struct tid_group *grp =
- list_first_entry(&set->list, struct tid_group, list);
- list_del_init(&grp->list);
- set->count--;
- return grp;
-}
-
-static inline void tid_group_move(struct tid_group *group,
- struct exp_tid_set *s1,
- struct exp_tid_set *s2)
-{
- tid_group_remove(group, s1);
- tid_group_add_tail(group, s2);
-}
-
-/*
- * Initialize context and file private data needed for Expected
- * receive caching. This needs to be done after the context has
- * been configured with the eager/expected RcvEntry counts.
- */
-int hfi1_user_exp_rcv_init(struct file *fp)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned tidbase;
- int i, ret = 0;
-
- spin_lock_init(&fd->tid_lock);
- spin_lock_init(&fd->invalid_lock);
- fd->tid_rb_root = RB_ROOT;
-
- if (!uctxt->subctxt_cnt || !fd->subctxt) {
- exp_tid_group_init(&uctxt->tid_group_list);
- exp_tid_group_init(&uctxt->tid_used_list);
- exp_tid_group_init(&uctxt->tid_full_list);
-
- tidbase = uctxt->expected_base;
- for (i = 0; i < uctxt->expected_count /
- dd->rcv_entries.group_size; i++) {
- struct tid_group *grp;
-
- grp = kzalloc(sizeof(*grp), GFP_KERNEL);
- if (!grp) {
- /*
- * If we fail here, the groups already
- * allocated will be freed by the close
- * call.
- */
- ret = -ENOMEM;
- goto done;
- }
- grp->size = dd->rcv_entries.group_size;
- grp->base = tidbase;
- tid_group_add_tail(grp, &uctxt->tid_group_list);
- tidbase += dd->rcv_entries.group_size;
- }
- }
-
- fd->entry_to_rb = kcalloc(uctxt->expected_count,
- sizeof(struct rb_node *),
- GFP_KERNEL);
- if (!fd->entry_to_rb)
- return -ENOMEM;
-
- if (!HFI1_CAP_IS_USET(TID_UNMAP)) {
- fd->invalid_tid_idx = 0;
- fd->invalid_tids = kzalloc(uctxt->expected_count *
- sizeof(u32), GFP_KERNEL);
- if (!fd->invalid_tids) {
- ret = -ENOMEM;
- goto done;
- }
-
- /*
- * Register MMU notifier callbacks. If the registration
- * fails, continue but turn off the TID caching for
- * all user contexts.
- */
- ret = hfi1_mmu_rb_register(&fd->tid_rb_root, &tid_rb_ops);
- if (ret) {
- dd_dev_info(dd,
- "Failed MMU notifier registration %d\n",
- ret);
- HFI1_CAP_USET(TID_UNMAP);
- ret = 0;
- }
- }
-
- /*
- * PSM does not have a good way to separate, count, and
- * effectively enforce a limit on RcvArray entries used by
- * subctxts (when context sharing is used) when TID caching
- * is enabled. To help with that, we calculate a per-process
- * RcvArray entry share and enforce that.
- * If TID caching is not in use, PSM deals with usage on its
- * own. In that case, we allow any subctxt to take all of the
- * entries.
- *
- * Make sure that we set the tid counts only after successful
- * init.
- */
- spin_lock(&fd->tid_lock);
- if (uctxt->subctxt_cnt && !HFI1_CAP_IS_USET(TID_UNMAP)) {
- u16 remainder;
-
- fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
- remainder = uctxt->expected_count % uctxt->subctxt_cnt;
- if (remainder && fd->subctxt < remainder)
- fd->tid_limit++;
- } else {
- fd->tid_limit = uctxt->expected_count;
- }
- spin_unlock(&fd->tid_lock);
-done:
- return ret;
-}
-
-int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
-{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct tid_group *grp, *gptr;
-
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
- return 0;
- /*
- * The notifier would have been removed when the process'es mm
- * was freed.
- */
- if (!HFI1_CAP_IS_USET(TID_UNMAP))
- hfi1_mmu_rb_unregister(&fd->tid_rb_root);
-
- kfree(fd->invalid_tids);
-
- if (!uctxt->cnt) {
- if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
- unlock_exp_tids(uctxt, &uctxt->tid_full_list,
- &fd->tid_rb_root);
- if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
- unlock_exp_tids(uctxt, &uctxt->tid_used_list,
- &fd->tid_rb_root);
- list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
- list) {
- list_del_init(&grp->list);
- kfree(grp);
- }
- hfi1_clear_tids(uctxt);
- }
-
- kfree(fd->entry_to_rb);
- return 0;
-}
-
-/*
- * Write an "empty" RcvArray entry.
- * This function exists so the TID registaration code can use it
- * to write to unused/unneeded entries and still take advantage
- * of the WC performance improvements. The HFI will ignore this
- * write to the RcvArray entry.
- */
-static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
-{
- /*
- * Doing the WC fill writes only makes sense if the device is
- * present and the RcvArray has been mapped as WC memory.
- */
- if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc)
- writeq(0, dd->rcvarray_wc + (index * 8));
-}
-
-/*
- * RcvArray entry allocation for Expected Receives is done by the
- * following algorithm:
- *
- * The context keeps 3 lists of groups of RcvArray entries:
- * 1. List of empty groups - tid_group_list
- * This list is created during user context creation and
- * contains elements which describe sets (of 8) of empty
- * RcvArray entries.
- * 2. List of partially used groups - tid_used_list
- * This list contains sets of RcvArray entries which are
- * not completely used up. Another mapping request could
- * use some of all of the remaining entries.
- * 3. List of full groups - tid_full_list
- * This is the list where sets that are completely used
- * up go.
- *
- * An attempt to optimize the usage of RcvArray entries is
- * made by finding all sets of physically contiguous pages in a
- * user's buffer.
- * These physically contiguous sets are further split into
- * sizes supported by the receive engine of the HFI. The
- * resulting sets of pages are stored in struct tid_pageset,
- * which describes the sets as:
- * * .count - number of pages in this set
- * * .idx - starting index into struct page ** array
- * of this set
- *
- * From this point on, the algorithm deals with the page sets
- * described above. The number of pagesets is divided by the
- * RcvArray group size to produce the number of full groups
- * needed.
- *
- * Groups from the 3 lists are manipulated using the following
- * rules:
- * 1. For each set of 8 pagesets, a complete group from
- * tid_group_list is taken, programmed, and moved to
- * the tid_full_list list.
- * 2. For all remaining pagesets:
- * 2.1 If the tid_used_list is empty and the tid_group_list
- * is empty, stop processing pageset and return only
- * what has been programmed up to this point.
- * 2.2 If the tid_used_list is empty and the tid_group_list
- * is not empty, move a group from tid_group_list to
- * tid_used_list.
- * 2.3 For each group is tid_used_group, program as much as
- * can fit into the group. If the group becomes fully
- * used, move it to tid_full_list.
- */
-int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
-{
- int ret = 0, need_group = 0, pinned;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets,
- tididx = 0, mapped, mapped_pages = 0;
- unsigned long vaddr = tinfo->vaddr;
- struct page **pages = NULL;
- u32 *tidlist = NULL;
- struct tid_pageset *pagesets = NULL;
-
- /* Get the number of pages the user buffer spans */
- npages = num_user_pages(vaddr, tinfo->length);
- if (!npages)
- return -EINVAL;
-
- if (npages > uctxt->expected_count) {
- dd_dev_err(dd, "Expected buffer too big\n");
- return -EINVAL;
- }
-
- /* Verify that access is OK for the user buffer */
- if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
- npages * PAGE_SIZE)) {
- dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
- (void *)vaddr, npages);
- return -EFAULT;
- }
-
- pagesets = kcalloc(uctxt->expected_count, sizeof(*pagesets),
- GFP_KERNEL);
- if (!pagesets)
- return -ENOMEM;
-
- /* Allocate the array of struct page pointers needed for pinning */
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /*
- * Pin all the pages of the user buffer. If we can't pin all the
- * pages, accept the amount pinned so far and program only that.
- * User space knows how to deal with partially programmed buffers.
- */
- if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages))
- return -ENOMEM;
- pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages);
- if (pinned <= 0) {
- ret = pinned;
- goto bail;
- }
- fd->tid_n_pinned += npages;
-
- /* Find sets of physically contiguous pages */
- npagesets = find_phys_blocks(pages, pinned, pagesets);
-
- /*
- * We don't need to access this under a lock since tid_used is per
- * process and the same process cannot be in hfi1_user_exp_rcv_clear()
- * and hfi1_user_exp_rcv_setup() at the same time.
- */
- spin_lock(&fd->tid_lock);
- if (fd->tid_used + npagesets > fd->tid_limit)
- pageset_count = fd->tid_limit - fd->tid_used;
- else
- pageset_count = npagesets;
- spin_unlock(&fd->tid_lock);
-
- if (!pageset_count)
- goto bail;
-
- ngroups = pageset_count / dd->rcv_entries.group_size;
- tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
- if (!tidlist) {
- ret = -ENOMEM;
- goto nomem;
- }
-
- tididx = 0;
-
- /*
- * From this point on, we are going to be using shared (between master
- * and subcontexts) context resources. We need to take the lock.
- */
- mutex_lock(&uctxt->exp_lock);
- /*
- * The first step is to program the RcvArray entries which are complete
- * groups.
- */
- while (ngroups && uctxt->tid_group_list.count) {
- struct tid_group *grp =
- tid_group_pop(&uctxt->tid_group_list);
-
- ret = program_rcvarray(fp, vaddr, grp, pagesets,
- pageidx, dd->rcv_entries.group_size,
- pages, tidlist, &tididx, &mapped);
- /*
- * If there was a failure to program the RcvArray
- * entries for the entire group, reset the grp fields
- * and add the grp back to the free group list.
- */
- if (ret <= 0) {
- tid_group_add_tail(grp, &uctxt->tid_group_list);
- hfi1_cdbg(TID,
- "Failed to program RcvArray group %d", ret);
- goto unlock;
- }
-
- tid_group_add_tail(grp, &uctxt->tid_full_list);
- ngroups--;
- pageidx += ret;
- mapped_pages += mapped;
- }
-
- while (pageidx < pageset_count) {
- struct tid_group *grp, *ptr;
- /*
- * If we don't have any partially used tid groups, check
- * if we have empty groups. If so, take one from there and
- * put in the partially used list.
- */
- if (!uctxt->tid_used_list.count || need_group) {
- if (!uctxt->tid_group_list.count)
- goto unlock;
-
- grp = tid_group_pop(&uctxt->tid_group_list);
- tid_group_add_tail(grp, &uctxt->tid_used_list);
- need_group = 0;
- }
- /*
- * There is an optimization opportunity here - instead of
- * fitting as many page sets as we can, check for a group
- * later on in the list that could fit all of them.
- */
- list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
- list) {
- unsigned use = min_t(unsigned, pageset_count - pageidx,
- grp->size - grp->used);
-
- ret = program_rcvarray(fp, vaddr, grp, pagesets,
- pageidx, use, pages, tidlist,
- &tididx, &mapped);
- if (ret < 0) {
- hfi1_cdbg(TID,
- "Failed to program RcvArray entries %d",
- ret);
- ret = -EFAULT;
- goto unlock;
- } else if (ret > 0) {
- if (grp->used == grp->size)
- tid_group_move(grp,
- &uctxt->tid_used_list,
- &uctxt->tid_full_list);
- pageidx += ret;
- mapped_pages += mapped;
- need_group = 0;
- /* Check if we are done so we break out early */
- if (pageidx >= pageset_count)
- break;
- } else if (WARN_ON(ret == 0)) {
- /*
- * If ret is 0, we did not program any entries
- * into this group, which can only happen if
- * we've screwed up the accounting somewhere.
- * Warn and try to continue.
- */
- need_group = 1;
- }
- }
- }
-unlock:
- mutex_unlock(&uctxt->exp_lock);
-nomem:
- hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
- mapped_pages, ret);
- if (tididx) {
- spin_lock(&fd->tid_lock);
- fd->tid_used += tididx;
- spin_unlock(&fd->tid_lock);
- tinfo->tidcnt = tididx;
- tinfo->length = mapped_pages * PAGE_SIZE;
-
- if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
- tidlist, sizeof(tidlist[0]) * tididx)) {
- /*
- * On failure to copy to the user level, we need to undo
- * everything done so far so we don't leak resources.
- */
- tinfo->tidlist = (unsigned long)&tidlist;
- hfi1_user_exp_rcv_clear(fp, tinfo);
- tinfo->tidlist = 0;
- ret = -EFAULT;
- goto bail;
- }
- }
-
- /*
- * If not everything was mapped (due to insufficient RcvArray entries,
- * for example), unpin all unmapped pages so we can pin them nex time.
- */
- if (mapped_pages != pinned) {
- hfi1_release_user_pages(current->mm, &pages[mapped_pages],
- pinned - mapped_pages,
- false);
- fd->tid_n_pinned -= pinned - mapped_pages;
- }
-bail:
- kfree(pagesets);
- kfree(pages);
- kfree(tidlist);
- return ret > 0 ? 0 : ret;
-}
-
-int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
-{
- int ret = 0;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- u32 *tidinfo;
- unsigned tididx;
-
- tidinfo = kcalloc(tinfo->tidcnt, sizeof(*tidinfo), GFP_KERNEL);
- if (!tidinfo)
- return -ENOMEM;
-
- if (copy_from_user(tidinfo, (void __user *)(unsigned long)
- tinfo->tidlist, sizeof(tidinfo[0]) *
- tinfo->tidcnt)) {
- ret = -EFAULT;
- goto done;
- }
-
- mutex_lock(&uctxt->exp_lock);
- for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
- ret = unprogram_rcvarray(fp, tidinfo[tididx], NULL);
- if (ret) {
- hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
- ret);
- break;
- }
- }
- spin_lock(&fd->tid_lock);
- fd->tid_used -= tididx;
- spin_unlock(&fd->tid_lock);
- tinfo->tidcnt = tididx;
- mutex_unlock(&uctxt->exp_lock);
-done:
- kfree(tidinfo);
- return ret;
-}
-
-int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- unsigned long *ev = uctxt->dd->events +
- (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
- u32 *array;
- int ret = 0;
-
- if (!fd->invalid_tids)
- return -EINVAL;
-
- /*
- * copy_to_user() can sleep, which will leave the invalid_lock
- * locked and cause the MMU notifier to be blocked on the lock
- * for a long time.
- * Copy the data to a local buffer so we can release the lock.
- */
- array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
- if (!array)
- return -EFAULT;
-
- spin_lock(&fd->invalid_lock);
- if (fd->invalid_tid_idx) {
- memcpy(array, fd->invalid_tids, sizeof(*array) *
- fd->invalid_tid_idx);
- memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
- fd->invalid_tid_idx);
- tinfo->tidcnt = fd->invalid_tid_idx;
- fd->invalid_tid_idx = 0;
- /*
- * Reset the user flag while still holding the lock.
- * Otherwise, PSM can miss events.
- */
- clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
- } else {
- tinfo->tidcnt = 0;
- }
- spin_unlock(&fd->invalid_lock);
-
- if (tinfo->tidcnt) {
- if (copy_to_user((void __user *)tinfo->tidlist,
- array, sizeof(*array) * tinfo->tidcnt))
- ret = -EFAULT;
- }
- kfree(array);
-
- return ret;
-}
-
-static u32 find_phys_blocks(struct page **pages, unsigned npages,
- struct tid_pageset *list)
-{
- unsigned pagecount, pageidx, setcount = 0, i;
- unsigned long pfn, this_pfn;
-
- if (!npages)
- return 0;
-
- /*
- * Look for sets of physically contiguous pages in the user buffer.
- * This will allow us to optimize Expected RcvArray entry usage by
- * using the bigger supported sizes.
- */
- pfn = page_to_pfn(pages[0]);
- for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
- this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
-
- /*
- * If the pfn's are not sequential, pages are not physically
- * contiguous.
- */
- if (this_pfn != ++pfn) {
- /*
- * At this point we have to loop over the set of
- * physically contiguous pages and break them down it
- * sizes supported by the HW.
- * There are two main constraints:
- * 1. The max buffer size is MAX_EXPECTED_BUFFER.
- * If the total set size is bigger than that
- * program only a MAX_EXPECTED_BUFFER chunk.
- * 2. The buffer size has to be a power of two. If
- * it is not, round down to the closes power of
- * 2 and program that size.
- */
- while (pagecount) {
- int maxpages = pagecount;
- u32 bufsize = pagecount * PAGE_SIZE;
-
- if (bufsize > MAX_EXPECTED_BUFFER)
- maxpages =
- MAX_EXPECTED_BUFFER >>
- PAGE_SHIFT;
- else if (!is_power_of_2(bufsize))
- maxpages =
- rounddown_pow_of_two(bufsize) >>
- PAGE_SHIFT;
-
- list[setcount].idx = pageidx;
- list[setcount].count = maxpages;
- pagecount -= maxpages;
- pageidx += maxpages;
- setcount++;
- }
- pageidx = i;
- pagecount = 1;
- pfn = this_pfn;
- } else {
- pagecount++;
- }
- }
- return setcount;
-}
-
-/**
- * program_rcvarray() - program an RcvArray group with receive buffers
- * @fp: file pointer
- * @vaddr: starting user virtual address
- * @grp: RcvArray group
- * @sets: array of struct tid_pageset holding information on physically
- * contiguous chunks from the user buffer
- * @start: starting index into sets array
- * @count: number of struct tid_pageset's to program
- * @pages: an array of struct page * for the user buffer
- * @tidlist: the array of u32 elements when the information about the
- * programmed RcvArray entries is to be encoded.
- * @tididx: starting offset into tidlist
- * @pmapped: (output parameter) number of pages programmed into the RcvArray
- * entries.
- *
- * This function will program up to 'count' number of RcvArray entries from the
- * group 'grp'. To make best use of write-combining writes, the function will
- * perform writes to the unused RcvArray entries which will be ignored by the
- * HW. Each RcvArray entry will be programmed with a physically contiguous
- * buffer chunk from the user's virtual buffer.
- *
- * Return:
- * -EINVAL if the requested count is larger than the size of the group,
- * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
- * number of RcvArray entries programmed.
- */
-static int program_rcvarray(struct file *fp, unsigned long vaddr,
- struct tid_group *grp,
- struct tid_pageset *sets,
- unsigned start, u16 count, struct page **pages,
- u32 *tidlist, unsigned *tididx, unsigned *pmapped)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- u16 idx;
- u32 tidinfo = 0, rcventry, useidx = 0;
- int mapped = 0;
-
- /* Count should never be larger than the group size */
- if (count > grp->size)
- return -EINVAL;
-
- /* Find the first unused entry in the group */
- for (idx = 0; idx < grp->size; idx++) {
- if (!(grp->map & (1 << idx))) {
- useidx = idx;
- break;
- }
- rcv_array_wc_fill(dd, grp->base + idx);
- }
-
- idx = 0;
- while (idx < count) {
- u16 npages, pageidx, setidx = start + idx;
- int ret = 0;
-
- /*
- * If this entry in the group is used, move to the next one.
- * If we go past the end of the group, exit the loop.
- */
- if (useidx >= grp->size) {
- break;
- } else if (grp->map & (1 << useidx)) {
- rcv_array_wc_fill(dd, grp->base + useidx);
- useidx++;
- continue;
- }
-
- rcventry = grp->base + useidx;
- npages = sets[setidx].count;
- pageidx = sets[setidx].idx;
-
- ret = set_rcvarray_entry(fp, vaddr + (pageidx * PAGE_SIZE),
- rcventry, grp, pages + pageidx,
- npages);
- if (ret)
- return ret;
- mapped += npages;
-
- tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
- EXP_TID_SET(LEN, npages);
- tidlist[(*tididx)++] = tidinfo;
- grp->used++;
- grp->map |= 1 << useidx++;
- idx++;
- }
-
- /* Fill the rest of the group with "blank" writes */
- for (; useidx < grp->size; useidx++)
- rcv_array_wc_fill(dd, grp->base + useidx);
- *pmapped = mapped;
- return idx;
-}
-
-static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
- u32 rcventry, struct tid_group *grp,
- struct page **pages, unsigned npages)
-{
- int ret;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct tid_rb_node *node;
- struct hfi1_devdata *dd = uctxt->dd;
- struct rb_root *root = &fd->tid_rb_root;
- dma_addr_t phys;
-
- /*
- * Allocate the node first so we can handle a potential
- * failure before we've programmed anything.
- */
- node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
- GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- phys = pci_map_single(dd->pcidev,
- __va(page_to_phys(pages[0])),
- npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, phys)) {
- dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
- phys);
- kfree(node);
- return -EFAULT;
- }
-
- node->mmu.addr = vaddr;
- node->mmu.len = npages * PAGE_SIZE;
- node->phys = page_to_phys(pages[0]);
- node->npages = npages;
- node->rcventry = rcventry;
- node->dma_addr = phys;
- node->grp = grp;
- node->freed = false;
- memcpy(node->pages, pages, sizeof(struct page *) * npages);
-
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- ret = mmu_rb_insert(root, &node->mmu);
- else
- ret = hfi1_mmu_rb_insert(root, &node->mmu);
-
- if (ret) {
- hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
- node->rcventry, node->mmu.addr, node->phys, ret);
- pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- kfree(node);
- return -EFAULT;
- }
- hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
- trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
- node->mmu.addr, node->phys, phys);
- return 0;
-}
-
-static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
- struct tid_group **grp)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- struct tid_rb_node *node;
- u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
- u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
-
- if (tididx >= uctxt->expected_count) {
- dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
- tididx, uctxt->ctxt);
- return -EINVAL;
- }
-
- if (tidctrl == 0x3)
- return -EINVAL;
-
- rcventry = tididx + (tidctrl - 1);
-
- node = fd->entry_to_rb[rcventry];
- if (!node || node->rcventry != (uctxt->expected_base + rcventry))
- return -EBADF;
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
- else
- hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
-
- if (grp)
- *grp = node->grp;
- clear_tid_node(fd, fd->subctxt, node);
- return 0;
-}
-
-static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
- struct tid_rb_node *node)
-{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
-
- trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
- node->npages, node->mmu.addr, node->phys,
- node->dma_addr);
-
- hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0);
- /*
- * Make sure device has seen the write before we unpin the
- * pages.
- */
- flush_wc();
-
- pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
- PCI_DMA_FROMDEVICE);
- hfi1_release_user_pages(current->mm, node->pages, node->npages, true);
- fd->tid_n_pinned -= node->npages;
-
- node->grp->used--;
- node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
-
- if (node->grp->used == node->grp->size - 1)
- tid_group_move(node->grp, &uctxt->tid_full_list,
- &uctxt->tid_used_list);
- else if (!node->grp->used)
- tid_group_move(node->grp, &uctxt->tid_used_list,
- &uctxt->tid_group_list);
- kfree(node);
-}
-
-static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
- struct exp_tid_set *set, struct rb_root *root)
-{
- struct tid_group *grp, *ptr;
- struct hfi1_filedata *fd = container_of(root, struct hfi1_filedata,
- tid_rb_root);
- int i;
-
- list_for_each_entry_safe(grp, ptr, &set->list, list) {
- list_del_init(&grp->list);
-
- for (i = 0; i < grp->size; i++) {
- if (grp->map & (1 << i)) {
- u16 rcventry = grp->base + i;
- struct tid_rb_node *node;
-
- node = fd->entry_to_rb[rcventry -
- uctxt->expected_base];
- if (!node || node->rcventry != rcventry)
- continue;
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu, NULL);
- else
- hfi1_mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu);
- clear_tid_node(fd, -1, node);
- }
- }
- }
-}
-
-static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
-{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
- struct hfi1_ctxtdata *uctxt = fdata->uctxt;
- struct tid_rb_node *node =
- container_of(mnode, struct tid_rb_node, mmu);
-
- if (node->freed)
- return 0;
-
- trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
- node->rcventry, node->npages, node->dma_addr);
- node->freed = true;
-
- spin_lock(&fdata->invalid_lock);
- if (fdata->invalid_tid_idx < uctxt->expected_count) {
- fdata->invalid_tids[fdata->invalid_tid_idx] =
- rcventry2tidinfo(node->rcventry - uctxt->expected_base);
- fdata->invalid_tids[fdata->invalid_tid_idx] |=
- EXP_TID_SET(LEN, node->npages);
- if (!fdata->invalid_tid_idx) {
- unsigned long *ev;
-
- /*
- * hfi1_set_uevent_bits() sets a user event flag
- * for all processes. Because calling into the
- * driver to process TID cache invalidations is
- * expensive and TID cache invalidations are
- * handled on a per-process basis, we can
- * optimize this to set the flag only for the
- * process in question.
- */
- ev = uctxt->dd->events +
- (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
- set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
- }
- fdata->invalid_tid_idx++;
- }
- spin_unlock(&fdata->invalid_lock);
- return 0;
-}
-
-static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
-{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
- u32 base = fdata->uctxt->expected_base;
-
- fdata->entry_to_rb[tnode->rcventry - base] = tnode;
- return 0;
-}
-
-static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
- struct mm_struct *mm)
-{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
- u32 base = fdata->uctxt->expected_base;
-
- fdata->entry_to_rb[tnode->rcventry - base] = NULL;
-}
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/staging/rdma/hfi1/user_exp_rcv.h
deleted file mode 100644
index 9bc8d9fba..000000000
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _HFI1_USER_EXP_RCV_H
-#define _HFI1_USER_EXP_RCV_H
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "hfi.h"
-
-#define EXP_TID_TIDLEN_MASK 0x7FFULL
-#define EXP_TID_TIDLEN_SHIFT 0
-#define EXP_TID_TIDCTRL_MASK 0x3ULL
-#define EXP_TID_TIDCTRL_SHIFT 20
-#define EXP_TID_TIDIDX_MASK 0x3FFULL
-#define EXP_TID_TIDIDX_SHIFT 22
-#define EXP_TID_GET(tid, field) \
- (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK)
-
-#define EXP_TID_SET(field, value) \
- (((value) & EXP_TID_TID##field##_MASK) << \
- EXP_TID_TID##field##_SHIFT)
-#define EXP_TID_CLEAR(tid, field) ({ \
- (tid) &= ~(EXP_TID_TID##field##_MASK << \
- EXP_TID_TID##field##_SHIFT); \
- })
-#define EXP_TID_RESET(tid, field, value) do { \
- EXP_TID_CLEAR(tid, field); \
- (tid) |= EXP_TID_SET(field, (value)); \
- } while (0)
-
-int hfi1_user_exp_rcv_init(struct file *);
-int hfi1_user_exp_rcv_free(struct hfi1_filedata *);
-int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
-int hfi1_user_exp_rcv_clear(struct file *, struct hfi1_tid_info *);
-int hfi1_user_exp_rcv_invalid(struct file *, struct hfi1_tid_info *);
-
-#endif /* _HFI1_USER_EXP_RCV_H */
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c
deleted file mode 100644
index 88e10b5f5..000000000
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <linux/module.h>
-
-#include "hfi.h"
-
-static unsigned long cache_size = 256;
-module_param(cache_size, ulong, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
-
-/*
- * Determine whether the caller can pin pages.
- *
- * This function should be used in the implementation of buffer caches.
- * The cache implementation should call this function prior to attempting
- * to pin buffer pages in order to determine whether they should do so.
- * The function computes cache limits based on the configured ulimit and
- * cache size. Use of this function is especially important for caches
- * which are not limited in any other way (e.g. by HW resources) and, thus,
- * could keeping caching buffers.
- *
- */
-bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
-{
- unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
- size = (cache_size * (1UL << 20)); /* convert to bytes */
- unsigned usr_ctxts = dd->num_rcv_contexts - dd->first_user_ctxt;
- bool can_lock = capable(CAP_IPC_LOCK);
-
- /*
- * Calculate per-cache size. The calculation below uses only a quarter
- * of the available per-context limit. This leaves space for other
- * pinning. Should we worry about shared ctxts?
- */
- cache_limit = (ulimit / usr_ctxts) / 4;
-
- /* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
- if (ulimit != (-1UL) && size > cache_limit)
- size = cache_limit;
-
- /* Convert to number of pages */
- size = DIV_ROUND_UP(size, PAGE_SIZE);
-
- down_read(&current->mm->mmap_sem);
- pinned = current->mm->pinned_vm;
- up_read(&current->mm->mmap_sem);
-
- /* First, check the absolute limit against all pinned pages. */
- if (pinned + npages >= ulimit && !can_lock)
- return false;
-
- return ((nlocked + npages) <= size) || can_lock;
-}
-
-int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
- struct page **pages)
-{
- int ret;
-
- ret = get_user_pages_fast(vaddr, npages, writable, pages);
- if (ret < 0)
- return ret;
-
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm += ret;
- up_write(&current->mm->mmap_sem);
-
- return ret;
-}
-
-void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
- size_t npages, bool dirty)
-{
- size_t i;
-
- for (i = 0; i < npages; i++) {
- if (dirty)
- set_page_dirty_lock(p[i]);
- put_page(p[i]);
- }
-
- if (mm) { /* during close after signal, mm can be NULL */
- down_write(&mm->mmap_sem);
- mm->pinned_vm -= npages;
- up_write(&mm->mmap_sem);
- }
-}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
deleted file mode 100644
index d53a65954..000000000
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ /dev/null
@@ -1,1590 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/mmu_context.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-
-#include "hfi.h"
-#include "sdma.h"
-#include "user_sdma.h"
-#include "verbs.h" /* for the headers */
-#include "common.h" /* for struct hfi1_tid_info */
-#include "trace.h"
-#include "mmu_rb.h"
-
-static uint hfi1_sdma_comp_ring_size = 128;
-module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
-MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
-
-/* The maximum number of Data io vectors per message/request */
-#define MAX_VECTORS_PER_REQ 8
-/*
- * Maximum number of packet to send from each message/request
- * before moving to the next one.
- */
-#define MAX_PKTS_PER_QUEUE 16
-
-#define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
-
-#define req_opcode(x) \
- (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
-#define req_version(x) \
- (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
-#define req_iovcnt(x) \
- (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
-
-/* Number of BTH.PSN bits used for sequence number in expected rcvs */
-#define BTH_SEQ_MASK 0x7ffull
-
-/*
- * Define fields in the KDETH header so we can update the header
- * template.
- */
-#define KDETH_OFFSET_SHIFT 0
-#define KDETH_OFFSET_MASK 0x7fff
-#define KDETH_OM_SHIFT 15
-#define KDETH_OM_MASK 0x1
-#define KDETH_TID_SHIFT 16
-#define KDETH_TID_MASK 0x3ff
-#define KDETH_TIDCTRL_SHIFT 26
-#define KDETH_TIDCTRL_MASK 0x3
-#define KDETH_INTR_SHIFT 28
-#define KDETH_INTR_MASK 0x1
-#define KDETH_SH_SHIFT 29
-#define KDETH_SH_MASK 0x1
-#define KDETH_HCRC_UPPER_SHIFT 16
-#define KDETH_HCRC_UPPER_MASK 0xff
-#define KDETH_HCRC_LOWER_SHIFT 24
-#define KDETH_HCRC_LOWER_MASK 0xff
-
-#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
-#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
-
-#define KDETH_GET(val, field) \
- (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
-#define KDETH_SET(dw, field, val) do { \
- u32 dwval = le32_to_cpu(dw); \
- dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
- dwval |= (((val) & KDETH_##field##_MASK) << \
- KDETH_##field##_SHIFT); \
- dw = cpu_to_le32(dwval); \
- } while (0)
-
-#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
- do { \
- if ((idx) < ARRAY_SIZE((arr))) \
- (arr)[(idx++)] = sdma_build_ahg_descriptor( \
- (__force u16)(value), (dw), (bit), \
- (width)); \
- else \
- return -ERANGE; \
- } while (0)
-
-/* KDETH OM multipliers and switch over point */
-#define KDETH_OM_SMALL 4
-#define KDETH_OM_LARGE 64
-#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
-
-/* Last packet in the request */
-#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
-
-#define SDMA_REQ_IN_USE 0
-#define SDMA_REQ_FOR_THREAD 1
-#define SDMA_REQ_SEND_DONE 2
-#define SDMA_REQ_HAVE_AHG 3
-#define SDMA_REQ_HAS_ERROR 4
-#define SDMA_REQ_DONE_ERROR 5
-
-#define SDMA_PKT_Q_INACTIVE BIT(0)
-#define SDMA_PKT_Q_ACTIVE BIT(1)
-#define SDMA_PKT_Q_DEFERRED BIT(2)
-
-/*
- * Maximum retry attempts to submit a TX request
- * before putting the process to sleep.
- */
-#define MAX_DEFER_RETRY_COUNT 1
-
-static unsigned initial_pkt_count = 8;
-
-#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
-
-struct user_sdma_iovec {
- struct list_head list;
- struct iovec iov;
- /* number of pages in this vector */
- unsigned npages;
- /* array of pinned pages for this vector */
- struct page **pages;
- /*
- * offset into the virtual address space of the vector at
- * which we last left off.
- */
- u64 offset;
-};
-
-struct sdma_mmu_node {
- struct mmu_rb_node rb;
- struct list_head list;
- struct hfi1_user_sdma_pkt_q *pq;
- atomic_t refcount;
- struct page **pages;
- unsigned npages;
-};
-
-struct user_sdma_request {
- struct sdma_req_info info;
- struct hfi1_user_sdma_pkt_q *pq;
- struct hfi1_user_sdma_comp_q *cq;
- /* This is the original header from user space */
- struct hfi1_pkt_header hdr;
- /*
- * Pointer to the SDMA engine for this request.
- * Since different request could be on different VLs,
- * each request will need it's own engine pointer.
- */
- struct sdma_engine *sde;
- u8 ahg_idx;
- u32 ahg[9];
- /*
- * KDETH.Offset (Eager) field
- * We need to remember the initial value so the headers
- * can be updated properly.
- */
- u32 koffset;
- /*
- * KDETH.OFFSET (TID) field
- * The offset can cover multiple packets, depending on the
- * size of the TID entry.
- */
- u32 tidoffset;
- /*
- * KDETH.OM
- * Remember this because the header template always sets it
- * to 0.
- */
- u8 omfactor;
- /*
- * We copy the iovs for this request (based on
- * info.iovcnt). These are only the data vectors
- */
- unsigned data_iovs;
- /* total length of the data in the request */
- u32 data_len;
- /* progress index moving along the iovs array */
- unsigned iov_idx;
- struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
- /* number of elements copied to the tids array */
- u16 n_tids;
- /* TID array values copied from the tid_iov vector */
- u32 *tids;
- u16 tididx;
- u32 sent;
- u64 seqnum;
- u64 seqcomp;
- u64 seqsubmitted;
- struct list_head txps;
- unsigned long flags;
- /* status of the last txreq completed */
- int status;
-};
-
-/*
- * A single txreq could span up to 3 physical pages when the MTU
- * is sufficiently large (> 4K). Each of the IOV pointers also
- * needs it's own set of flags so the vector has been handled
- * independently of each other.
- */
-struct user_sdma_txreq {
- /* Packet header for the txreq */
- struct hfi1_pkt_header hdr;
- struct sdma_txreq txreq;
- struct list_head list;
- struct user_sdma_request *req;
- u16 flags;
- unsigned busycount;
- u64 seqnum;
-};
-
-#define SDMA_DBG(req, fmt, ...) \
- hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
- (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
- ##__VA_ARGS__)
-#define SDMA_Q_DBG(pq, fmt, ...) \
- hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
- (pq)->subctxt, ##__VA_ARGS__)
-
-static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
-static int num_user_pages(const struct iovec *);
-static void user_sdma_txreq_cb(struct sdma_txreq *, int);
-static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
-static void user_sdma_free_request(struct user_sdma_request *, bool);
-static int pin_vector_pages(struct user_sdma_request *,
- struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
- unsigned);
-static int check_header_template(struct user_sdma_request *,
- struct hfi1_pkt_header *, u32, u32);
-static int set_txreq_header(struct user_sdma_request *,
- struct user_sdma_txreq *, u32);
-static int set_txreq_header_ahg(struct user_sdma_request *,
- struct user_sdma_txreq *, u32);
-static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *,
- struct hfi1_user_sdma_comp_q *,
- u16, enum hfi1_sdma_comp_state, int);
-static inline u32 set_pkt_bth_psn(__be32, u8, u32);
-static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
-
-static int defer_packet_queue(
- struct sdma_engine *,
- struct iowait *,
- struct sdma_txreq *,
- unsigned seq);
-static void activate_packet_queue(struct iowait *, int);
-static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
-static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
-static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
-
-static struct mmu_rb_ops sdma_rb_ops = {
- .filter = sdma_rb_filter,
- .insert = sdma_rb_insert,
- .remove = sdma_rb_remove,
- .invalidate = sdma_rb_invalidate
-};
-
-static int defer_packet_queue(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *txreq,
- unsigned seq)
-{
- struct hfi1_user_sdma_pkt_q *pq =
- container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
- struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
- struct user_sdma_txreq *tx =
- container_of(txreq, struct user_sdma_txreq, txreq);
-
- if (sdma_progress(sde, seq, txreq)) {
- if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
- goto eagain;
- }
- /*
- * We are assuming that if the list is enqueued somewhere, it
- * is to the dmawait list since that is the only place where
- * it is supposed to be enqueued.
- */
- xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
- write_seqlock(&dev->iowait_lock);
- if (list_empty(&pq->busy.list))
- list_add_tail(&pq->busy.list, &sde->dmawait);
- write_sequnlock(&dev->iowait_lock);
- return -EBUSY;
-eagain:
- return -EAGAIN;
-}
-
-static void activate_packet_queue(struct iowait *wait, int reason)
-{
- struct hfi1_user_sdma_pkt_q *pq =
- container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
- xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
- wake_up(&wait->wait_dma);
-};
-
-static void sdma_kmem_cache_ctor(void *obj)
-{
- struct user_sdma_txreq *tx = obj;
-
- memset(tx, 0, sizeof(*tx));
-}
-
-int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
-{
- struct hfi1_filedata *fd;
- int ret = 0;
- unsigned memsize;
- char buf[64];
- struct hfi1_devdata *dd;
- struct hfi1_user_sdma_comp_q *cq;
- struct hfi1_user_sdma_pkt_q *pq;
- unsigned long flags;
-
- if (!uctxt || !fp) {
- ret = -EBADF;
- goto done;
- }
-
- fd = fp->private_data;
-
- if (!hfi1_sdma_comp_ring_size) {
- ret = -EINVAL;
- goto done;
- }
-
- dd = uctxt->dd;
-
- pq = kzalloc(sizeof(*pq), GFP_KERNEL);
- if (!pq)
- goto pq_nomem;
-
- memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
- pq->reqs = kzalloc(memsize, GFP_KERNEL);
- if (!pq->reqs)
- goto pq_reqs_nomem;
-
- INIT_LIST_HEAD(&pq->list);
- pq->dd = dd;
- pq->ctxt = uctxt->ctxt;
- pq->subctxt = fd->subctxt;
- pq->n_max_reqs = hfi1_sdma_comp_ring_size;
- pq->state = SDMA_PKT_Q_INACTIVE;
- atomic_set(&pq->n_reqs, 0);
- init_waitqueue_head(&pq->wait);
- pq->sdma_rb_root = RB_ROOT;
- INIT_LIST_HEAD(&pq->evict);
- spin_lock_init(&pq->evict_lock);
-
- iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
- activate_packet_queue, NULL);
- pq->reqidx = 0;
- snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
- fd->subctxt);
- pq->txreq_cache = kmem_cache_create(buf,
- sizeof(struct user_sdma_txreq),
- L1_CACHE_BYTES,
- SLAB_HWCACHE_ALIGN,
- sdma_kmem_cache_ctor);
- if (!pq->txreq_cache) {
- dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
- uctxt->ctxt);
- goto pq_txreq_nomem;
- }
- fd->pq = pq;
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq)
- goto cq_nomem;
-
- memsize = PAGE_ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size);
- cq->comps = vmalloc_user(memsize);
- if (!cq->comps)
- goto cq_comps_nomem;
-
- cq->nentries = hfi1_sdma_comp_ring_size;
- fd->cq = cq;
-
- ret = hfi1_mmu_rb_register(&pq->sdma_rb_root, &sdma_rb_ops);
- if (ret) {
- dd_dev_err(dd, "Failed to register with MMU %d", ret);
- goto done;
- }
-
- spin_lock_irqsave(&uctxt->sdma_qlock, flags);
- list_add(&pq->list, &uctxt->sdma_queues);
- spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
- goto done;
-
-cq_comps_nomem:
- kfree(cq);
-cq_nomem:
- kmem_cache_destroy(pq->txreq_cache);
-pq_txreq_nomem:
- kfree(pq->reqs);
-pq_reqs_nomem:
- kfree(pq);
- fd->pq = NULL;
-pq_nomem:
- ret = -ENOMEM;
-done:
- return ret;
-}
-
-int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
-{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_user_sdma_pkt_q *pq;
- unsigned long flags;
-
- hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
- uctxt->ctxt, fd->subctxt);
- pq = fd->pq;
- hfi1_mmu_rb_unregister(&pq->sdma_rb_root);
- if (pq) {
- spin_lock_irqsave(&uctxt->sdma_qlock, flags);
- if (!list_empty(&pq->list))
- list_del_init(&pq->list);
- spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
- iowait_sdma_drain(&pq->busy);
- /* Wait until all requests have been freed. */
- wait_event_interruptible(
- pq->wait,
- (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
- kfree(pq->reqs);
- kmem_cache_destroy(pq->txreq_cache);
- kfree(pq);
- fd->pq = NULL;
- }
- if (fd->cq) {
- vfree(fd->cq->comps);
- kfree(fd->cq);
- fd->cq = NULL;
- }
- return 0;
-}
-
-int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
- unsigned long dim, unsigned long *count)
-{
- int ret = 0, i = 0;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
- struct hfi1_user_sdma_comp_q *cq = fd->cq;
- struct hfi1_devdata *dd = pq->dd;
- unsigned long idx = 0;
- u8 pcount = initial_pkt_count;
- struct sdma_req_info info;
- struct user_sdma_request *req;
- u8 opcode, sc, vl;
-
- if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
- hfi1_cdbg(
- SDMA,
- "[%u:%u:%u] First vector not big enough for header %lu/%lu",
- dd->unit, uctxt->ctxt, fd->subctxt,
- iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
- return -EINVAL;
- }
- ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
- if (ret) {
- hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
- dd->unit, uctxt->ctxt, fd->subctxt, ret);
- return -EFAULT;
- }
-
- trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
- (u16 *)&info);
- if (cq->comps[info.comp_idx].status == QUEUED ||
- test_bit(SDMA_REQ_IN_USE, &pq->reqs[info.comp_idx].flags)) {
- hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
- dd->unit, uctxt->ctxt, fd->subctxt,
- info.comp_idx);
- return -EBADSLT;
- }
- if (!info.fragsize) {
- hfi1_cdbg(SDMA,
- "[%u:%u:%u:%u] Request does not specify fragsize",
- dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
- return -EINVAL;
- }
- /*
- * We've done all the safety checks that we can up to this point,
- * "allocate" the request entry.
- */
- hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
- uctxt->ctxt, fd->subctxt, info.comp_idx);
- req = pq->reqs + info.comp_idx;
- memset(req, 0, sizeof(*req));
- /* Mark the request as IN_USE before we start filling it in. */
- set_bit(SDMA_REQ_IN_USE, &req->flags);
- req->data_iovs = req_iovcnt(info.ctrl) - 1;
- req->pq = pq;
- req->cq = cq;
- req->status = -1;
- INIT_LIST_HEAD(&req->txps);
-
- memcpy(&req->info, &info, sizeof(info));
-
- if (req_opcode(info.ctrl) == EXPECTED)
- req->data_iovs--;
-
- if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
- SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
- MAX_VECTORS_PER_REQ);
- return -EINVAL;
- }
- /* Copy the header from the user buffer */
- ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
- sizeof(req->hdr));
- if (ret) {
- SDMA_DBG(req, "Failed to copy header template (%d)", ret);
- ret = -EFAULT;
- goto free_req;
- }
-
- /* If Static rate control is not enabled, sanitize the header. */
- if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
- req->hdr.pbc[2] = 0;
-
- /* Validate the opcode. Do not trust packets from user space blindly. */
- opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
- if ((opcode & USER_OPCODE_CHECK_MASK) !=
- USER_OPCODE_CHECK_VAL) {
- SDMA_DBG(req, "Invalid opcode (%d)", opcode);
- ret = -EINVAL;
- goto free_req;
- }
- /*
- * Validate the vl. Do not trust packets from user space blindly.
- * VL comes from PBC, SC comes from LRH, and the VL needs to
- * match the SC look up.
- */
- vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
- sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
- (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
- if (vl >= dd->pport->vls_operational ||
- vl != sc_to_vlt(dd, sc)) {
- SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
- ret = -EINVAL;
- goto free_req;
- }
-
- /*
- * Also should check the BTH.lnh. If it says the next header is GRH then
- * the RXE parsing will be off and will land in the middle of the KDETH
- * or miss it entirely.
- */
- if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
- SDMA_DBG(req, "User tried to pass in a GRH");
- ret = -EINVAL;
- goto free_req;
- }
-
- req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
- /*
- * Calculate the initial TID offset based on the values of
- * KDETH.OFFSET and KDETH.OM that are passed in.
- */
- req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
- (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
- KDETH_OM_LARGE : KDETH_OM_SMALL);
- SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
- idx++;
-
- /* Save all the IO vector structures */
- while (i < req->data_iovs) {
- INIT_LIST_HEAD(&req->iovs[i].list);
- memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
- ret = pin_vector_pages(req, &req->iovs[i]);
- if (ret) {
- req->status = ret;
- goto free_req;
- }
- req->data_len += req->iovs[i++].iov.iov_len;
- }
- SDMA_DBG(req, "total data length %u", req->data_len);
-
- if (pcount > req->info.npkts)
- pcount = req->info.npkts;
- /*
- * Copy any TID info
- * User space will provide the TID info only when the
- * request type is EXPECTED. This is true even if there is
- * only one packet in the request and the header is already
- * setup. The reason for the singular TID case is that the
- * driver needs to perform safety checks.
- */
- if (req_opcode(req->info.ctrl) == EXPECTED) {
- u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
-
- if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
- ret = -EINVAL;
- goto free_req;
- }
- req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL);
- if (!req->tids) {
- ret = -ENOMEM;
- goto free_req;
- }
- /*
- * We have to copy all of the tids because they may vary
- * in size and, therefore, the TID count might not be
- * equal to the pkt count. However, there is no way to
- * tell at this point.
- */
- ret = copy_from_user(req->tids, iovec[idx].iov_base,
- ntids * sizeof(*req->tids));
- if (ret) {
- SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
- ntids, ret);
- ret = -EFAULT;
- goto free_req;
- }
- req->n_tids = ntids;
- idx++;
- }
-
- /* Have to select the engine */
- req->sde = sdma_select_engine_vl(dd,
- (u32)(uctxt->ctxt + fd->subctxt),
- vl);
- if (!req->sde || !sdma_running(req->sde)) {
- ret = -ECOMM;
- goto free_req;
- }
-
- /* We don't need an AHG entry if the request contains only one packet */
- if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) {
- int ahg = sdma_ahg_alloc(req->sde);
-
- if (likely(ahg >= 0)) {
- req->ahg_idx = (u8)ahg;
- set_bit(SDMA_REQ_HAVE_AHG, &req->flags);
- }
- }
-
- set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
- atomic_inc(&pq->n_reqs);
- /* Send the first N packets in the request to buy us some time */
- ret = user_sdma_send_pkts(req, pcount);
- if (unlikely(ret < 0 && ret != -EBUSY)) {
- req->status = ret;
- goto free_req;
- }
-
- /*
- * It is possible that the SDMA engine would have processed all the
- * submitted packets by the time we get here. Therefore, only set
- * packet queue state to ACTIVE if there are still uncompleted
- * requests.
- */
- if (atomic_read(&pq->n_reqs))
- xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
-
- /*
- * This is a somewhat blocking send implementation.
- * The driver will block the caller until all packets of the
- * request have been submitted to the SDMA engine. However, it
- * will not wait for send completions.
- */
- while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
- ret = user_sdma_send_pkts(req, pcount);
- if (ret < 0) {
- if (ret != -EBUSY) {
- req->status = ret;
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
- if (ACCESS_ONCE(req->seqcomp) ==
- req->seqsubmitted - 1)
- goto free_req;
- return ret;
- }
- wait_event_interruptible_timeout(
- pq->busy.wait_dma,
- (pq->state == SDMA_PKT_Q_ACTIVE),
- msecs_to_jiffies(
- SDMA_IOWAIT_TIMEOUT));
- }
- }
- *count += idx;
- return 0;
-free_req:
- user_sdma_free_request(req, true);
- pq_update(pq);
- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
- return ret;
-}
-
-static inline u32 compute_data_length(struct user_sdma_request *req,
- struct user_sdma_txreq *tx)
-{
- /*
- * Determine the proper size of the packet data.
- * The size of the data of the first packet is in the header
- * template. However, it includes the header and ICRC, which need
- * to be subtracted.
- * The size of the remaining packets is the minimum of the frag
- * size (MTU) or remaining data in the request.
- */
- u32 len;
-
- if (!req->seqnum) {
- len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
- (sizeof(tx->hdr) - 4));
- } else if (req_opcode(req->info.ctrl) == EXPECTED) {
- u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
- PAGE_SIZE;
- /*
- * Get the data length based on the remaining space in the
- * TID pair.
- */
- len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
- /* If we've filled up the TID pair, move to the next one. */
- if (unlikely(!len) && ++req->tididx < req->n_tids &&
- req->tids[req->tididx]) {
- tidlen = EXP_TID_GET(req->tids[req->tididx],
- LEN) * PAGE_SIZE;
- req->tidoffset = 0;
- len = min_t(u32, tidlen, req->info.fragsize);
- }
- /*
- * Since the TID pairs map entire pages, make sure that we
- * are not going to try to send more data that we have
- * remaining.
- */
- len = min(len, req->data_len - req->sent);
- } else {
- len = min(req->data_len - req->sent, (u32)req->info.fragsize);
- }
- SDMA_DBG(req, "Data Length = %u", len);
- return len;
-}
-
-static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
-{
- /* (Size of complete header - size of PBC) + 4B ICRC + data length */
- return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
-}
-
-static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
-{
- int ret = 0;
- unsigned npkts = 0;
- struct user_sdma_txreq *tx = NULL;
- struct hfi1_user_sdma_pkt_q *pq = NULL;
- struct user_sdma_iovec *iovec = NULL;
-
- if (!req->pq)
- return -EINVAL;
-
- pq = req->pq;
-
- /* If tx completion has reported an error, we are done. */
- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
- return -EFAULT;
- }
-
- /*
- * Check if we might have sent the entire request already
- */
- if (unlikely(req->seqnum == req->info.npkts)) {
- if (!list_empty(&req->txps))
- goto dosend;
- return ret;
- }
-
- if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
- maxpkts = req->info.npkts - req->seqnum;
-
- while (npkts < maxpkts) {
- u32 datalen = 0, queued = 0, data_sent = 0;
- u64 iov_offset = 0;
-
- /*
- * Check whether any of the completions have come back
- * with errors. If so, we are not going to process any
- * more packets from this request.
- */
- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
- return -EFAULT;
- }
-
- tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
-
- tx->flags = 0;
- tx->req = req;
- tx->busycount = 0;
- INIT_LIST_HEAD(&tx->list);
-
- if (req->seqnum == req->info.npkts - 1)
- tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
-
- /*
- * Calculate the payload size - this is min of the fragment
- * (MTU) size or the remaining bytes in the request but only
- * if we have payload data.
- */
- if (req->data_len) {
- iovec = &req->iovs[req->iov_idx];
- if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
- if (++req->iov_idx == req->data_iovs) {
- ret = -EFAULT;
- goto free_txreq;
- }
- iovec = &req->iovs[req->iov_idx];
- WARN_ON(iovec->offset);
- }
-
- datalen = compute_data_length(req, tx);
- if (!datalen) {
- SDMA_DBG(req,
- "Request has data but pkt len is 0");
- ret = -EFAULT;
- goto free_tx;
- }
- }
-
- if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
- if (!req->seqnum) {
- u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
- u32 lrhlen = get_lrh_len(req->hdr, datalen);
- /*
- * Copy the request header into the tx header
- * because the HW needs a cacheline-aligned
- * address.
- * This copy can be optimized out if the hdr
- * member of user_sdma_request were also
- * cacheline aligned.
- */
- memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
- if (PBC2LRH(pbclen) != lrhlen) {
- pbclen = (pbclen & 0xf000) |
- LRH2PBC(lrhlen);
- tx->hdr.pbc[0] = cpu_to_le16(pbclen);
- }
- ret = sdma_txinit_ahg(&tx->txreq,
- SDMA_TXREQ_F_AHG_COPY,
- sizeof(tx->hdr) + datalen,
- req->ahg_idx, 0, NULL, 0,
- user_sdma_txreq_cb);
- if (ret)
- goto free_tx;
- ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
- &tx->hdr,
- sizeof(tx->hdr));
- if (ret)
- goto free_txreq;
- } else {
- int changes;
-
- changes = set_txreq_header_ahg(req, tx,
- datalen);
- if (changes < 0)
- goto free_tx;
- sdma_txinit_ahg(&tx->txreq,
- SDMA_TXREQ_F_USE_AHG,
- datalen, req->ahg_idx, changes,
- req->ahg, sizeof(req->hdr),
- user_sdma_txreq_cb);
- }
- } else {
- ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
- datalen, user_sdma_txreq_cb);
- if (ret)
- goto free_tx;
- /*
- * Modify the header for this packet. This only needs
- * to be done if we are not going to use AHG. Otherwise,
- * the HW will do it based on the changes we gave it
- * during sdma_txinit_ahg().
- */
- ret = set_txreq_header(req, tx, datalen);
- if (ret)
- goto free_txreq;
- }
-
- /*
- * If the request contains any data vectors, add up to
- * fragsize bytes to the descriptor.
- */
- while (queued < datalen &&
- (req->sent + data_sent) < req->data_len) {
- unsigned long base, offset;
- unsigned pageidx, len;
-
- base = (unsigned long)iovec->iov.iov_base;
- offset = offset_in_page(base + iovec->offset +
- iov_offset);
- pageidx = (((iovec->offset + iov_offset +
- base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
- len = offset + req->info.fragsize > PAGE_SIZE ?
- PAGE_SIZE - offset : req->info.fragsize;
- len = min((datalen - queued), len);
- ret = sdma_txadd_page(pq->dd, &tx->txreq,
- iovec->pages[pageidx],
- offset, len);
- if (ret) {
- SDMA_DBG(req, "SDMA txreq add page failed %d\n",
- ret);
- goto free_txreq;
- }
- iov_offset += len;
- queued += len;
- data_sent += len;
- if (unlikely(queued < datalen &&
- pageidx == iovec->npages &&
- req->iov_idx < req->data_iovs - 1)) {
- iovec->offset += iov_offset;
- iovec = &req->iovs[++req->iov_idx];
- iov_offset = 0;
- }
- }
- /*
- * The txreq was submitted successfully so we can update
- * the counters.
- */
- req->koffset += datalen;
- if (req_opcode(req->info.ctrl) == EXPECTED)
- req->tidoffset += datalen;
- req->sent += data_sent;
- if (req->data_len)
- iovec->offset += iov_offset;
- list_add_tail(&tx->txreq.list, &req->txps);
- /*
- * It is important to increment this here as it is used to
- * generate the BTH.PSN and, therefore, can't be bulk-updated
- * outside of the loop.
- */
- tx->seqnum = req->seqnum++;
- npkts++;
- }
-dosend:
- ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps);
- if (list_empty(&req->txps)) {
- req->seqsubmitted = req->seqnum;
- if (req->seqnum == req->info.npkts) {
- set_bit(SDMA_REQ_SEND_DONE, &req->flags);
- /*
- * The txreq has already been submitted to the HW queue
- * so we can free the AHG entry now. Corruption will not
- * happen due to the sequential manner in which
- * descriptors are processed.
- */
- if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
- sdma_ahg_free(req->sde, req->ahg_idx);
- }
- } else if (ret > 0) {
- req->seqsubmitted += ret;
- ret = 0;
- }
- return ret;
-
-free_txreq:
- sdma_txclean(pq->dd, &tx->txreq);
-free_tx:
- kmem_cache_free(pq->txreq_cache, tx);
- return ret;
-}
-
-/*
- * How many pages in this iovec element?
- */
-static inline int num_user_pages(const struct iovec *iov)
-{
- const unsigned long addr = (unsigned long)iov->iov_base;
- const unsigned long len = iov->iov_len;
- const unsigned long spage = addr & PAGE_MASK;
- const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
- return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-/* Caller must hold pq->evict_lock */
-static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
-{
- u32 cleared = 0;
- struct sdma_mmu_node *node, *ptr;
-
- list_for_each_entry_safe_reverse(node, ptr, &pq->evict, list) {
- /* Make sure that no one is still using the node. */
- if (!atomic_read(&node->refcount)) {
- /*
- * Need to use the page count now as the remove callback
- * will free the node.
- */
- cleared += node->npages;
- spin_unlock(&pq->evict_lock);
- hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb);
- spin_lock(&pq->evict_lock);
- if (cleared >= npages)
- break;
- }
- }
- return cleared;
-}
-
-static int pin_vector_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec) {
- int ret = 0, pinned, npages, cleared;
- struct page **pages;
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- struct sdma_mmu_node *node = NULL;
- struct mmu_rb_node *rb_node;
-
- rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
- (unsigned long)iovec->iov.iov_base,
- iovec->iov.iov_len);
- if (rb_node && !IS_ERR(rb_node))
- node = container_of(rb_node, struct sdma_mmu_node, rb);
- else
- rb_node = NULL;
-
- if (!node) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- node->rb.addr = (unsigned long)iovec->iov.iov_base;
- node->rb.len = iovec->iov.iov_len;
- node->pq = pq;
- atomic_set(&node->refcount, 0);
- INIT_LIST_HEAD(&node->list);
- }
-
- npages = num_user_pages(&iovec->iov);
- if (node->npages < npages) {
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- SDMA_DBG(req, "Failed page array alloc");
- ret = -ENOMEM;
- goto bail;
- }
- memcpy(pages, node->pages, node->npages * sizeof(*pages));
-
- npages -= node->npages;
-retry:
- if (!hfi1_can_pin_pages(pq->dd, pq->n_locked, npages)) {
- spin_lock(&pq->evict_lock);
- cleared = sdma_cache_evict(pq, npages);
- spin_unlock(&pq->evict_lock);
- if (cleared >= npages)
- goto retry;
- }
- pinned = hfi1_acquire_user_pages(
- ((unsigned long)iovec->iov.iov_base +
- (node->npages * PAGE_SIZE)), npages, 0,
- pages + node->npages);
- if (pinned < 0) {
- kfree(pages);
- ret = pinned;
- goto bail;
- }
- if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, node->npages,
- pinned);
- ret = -EFAULT;
- goto bail;
- }
- kfree(node->pages);
- node->pages = pages;
- node->npages += pinned;
- npages = node->npages;
- spin_lock(&pq->evict_lock);
- if (!rb_node)
- list_add(&node->list, &pq->evict);
- else
- list_move(&node->list, &pq->evict);
- pq->n_locked += pinned;
- spin_unlock(&pq->evict_lock);
- }
- iovec->pages = node->pages;
- iovec->npages = npages;
-
- if (!rb_node) {
- ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
- if (ret) {
- spin_lock(&pq->evict_lock);
- list_del(&node->list);
- pq->n_locked -= node->npages;
- spin_unlock(&pq->evict_lock);
- ret = 0;
- goto bail;
- }
- } else {
- atomic_inc(&node->refcount);
- }
- return 0;
-bail:
- if (!rb_node)
- kfree(node);
- return ret;
-}
-
-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned start, unsigned npages)
-{
- hfi1_release_user_pages(mm, pages + start, npages, 0);
- kfree(pages);
-}
-
-static int check_header_template(struct user_sdma_request *req,
- struct hfi1_pkt_header *hdr, u32 lrhlen,
- u32 datalen)
-{
- /*
- * Perform safety checks for any type of packet:
- * - transfer size is multiple of 64bytes
- * - packet length is multiple of 4bytes
- * - entire request length is multiple of 4bytes
- * - packet length is not larger than MTU size
- *
- * These checks are only done for the first packet of the
- * transfer since the header is "given" to us by user space.
- * For the remainder of the packets we compute the values.
- */
- if (req->info.fragsize % PIO_BLOCK_SIZE ||
- lrhlen & 0x3 || req->data_len & 0x3 ||
- lrhlen > get_lrh_len(*hdr, req->info.fragsize))
- return -EINVAL;
-
- if (req_opcode(req->info.ctrl) == EXPECTED) {
- /*
- * The header is checked only on the first packet. Furthermore,
- * we ensure that at least one TID entry is copied when the
- * request is submitted. Therefore, we don't have to verify that
- * tididx points to something sane.
- */
- u32 tidval = req->tids[req->tididx],
- tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
- tididx = EXP_TID_GET(tidval, IDX),
- tidctrl = EXP_TID_GET(tidval, CTRL),
- tidoff;
- __le32 kval = hdr->kdeth.ver_tid_offset;
-
- tidoff = KDETH_GET(kval, OFFSET) *
- (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
- KDETH_OM_LARGE : KDETH_OM_SMALL);
- /*
- * Expected receive packets have the following
- * additional checks:
- * - offset is not larger than the TID size
- * - TIDCtrl values match between header and TID array
- * - TID indexes match between header and TID array
- */
- if ((tidoff + datalen > tidlen) ||
- KDETH_GET(kval, TIDCTRL) != tidctrl ||
- KDETH_GET(kval, TID) != tididx)
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Correctly set the BTH.PSN field based on type of
- * transfer - eager packets can just increment the PSN but
- * expected packets encode generation and sequence in the
- * BTH.PSN field so just incrementing will result in errors.
- */
-static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
-{
- u32 val = be32_to_cpu(bthpsn),
- mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
- 0xffffffull),
- psn = val & mask;
- if (expct)
- psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
- else
- psn = psn + frags;
- return psn & mask;
-}
-
-static int set_txreq_header(struct user_sdma_request *req,
- struct user_sdma_txreq *tx, u32 datalen)
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- struct hfi1_pkt_header *hdr = &tx->hdr;
- u16 pbclen;
- int ret;
- u32 tidval = 0, lrhlen = get_lrh_len(*hdr, datalen);
-
- /* Copy the header template to the request before modification */
- memcpy(hdr, &req->hdr, sizeof(*hdr));
-
- /*
- * Check if the PBC and LRH length are mismatched. If so
- * adjust both in the header.
- */
- pbclen = le16_to_cpu(hdr->pbc[0]);
- if (PBC2LRH(pbclen) != lrhlen) {
- pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
- hdr->pbc[0] = cpu_to_le16(pbclen);
- hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
- /*
- * Third packet
- * This is the first packet in the sequence that has
- * a "static" size that can be used for the rest of
- * the packets (besides the last one).
- */
- if (unlikely(req->seqnum == 2)) {
- /*
- * From this point on the lengths in both the
- * PBC and LRH are the same until the last
- * packet.
- * Adjust the template so we don't have to update
- * every packet
- */
- req->hdr.pbc[0] = hdr->pbc[0];
- req->hdr.lrh[2] = hdr->lrh[2];
- }
- }
- /*
- * We only have to modify the header if this is not the
- * first packet in the request. Otherwise, we use the
- * header given to us.
- */
- if (unlikely(!req->seqnum)) {
- ret = check_header_template(req, hdr, lrhlen, datalen);
- if (ret)
- return ret;
- goto done;
- }
-
- hdr->bth[2] = cpu_to_be32(
- set_pkt_bth_psn(hdr->bth[2],
- (req_opcode(req->info.ctrl) == EXPECTED),
- req->seqnum));
-
- /* Set ACK request on last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
- hdr->bth[2] |= cpu_to_be32(1UL << 31);
-
- /* Set the new offset */
- hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
- /* Expected packets have to fill in the new TID information */
- if (req_opcode(req->info.ctrl) == EXPECTED) {
- tidval = req->tids[req->tididx];
- /*
- * If the offset puts us at the end of the current TID,
- * advance everything.
- */
- if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
- PAGE_SIZE)) {
- req->tidoffset = 0;
- /*
- * Since we don't copy all the TIDs, all at once,
- * we have to check again.
- */
- if (++req->tididx > req->n_tids - 1 ||
- !req->tids[req->tididx]) {
- return -EINVAL;
- }
- tidval = req->tids[req->tididx];
- }
- req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
- KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL;
- /* Set KDETH.TIDCtrl based on value for this TID. */
- KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
- EXP_TID_GET(tidval, CTRL));
- /* Set KDETH.TID based on value for this TID */
- KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
- EXP_TID_GET(tidval, IDX));
- /* Clear KDETH.SH only on the last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
- KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
- /*
- * Set the KDETH.OFFSET and KDETH.OM based on size of
- * transfer.
- */
- SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
- req->tidoffset, req->tidoffset / req->omfactor,
- !!(req->omfactor - KDETH_OM_SMALL));
- KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
- req->tidoffset / req->omfactor);
- KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
- !!(req->omfactor - KDETH_OM_SMALL));
- }
-done:
- trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
- req->info.comp_idx, hdr, tidval);
- return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
-}
-
-static int set_txreq_header_ahg(struct user_sdma_request *req,
- struct user_sdma_txreq *tx, u32 len)
-{
- int diff = 0;
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- struct hfi1_pkt_header *hdr = &req->hdr;
- u16 pbclen = le16_to_cpu(hdr->pbc[0]);
- u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, len);
-
- if (PBC2LRH(pbclen) != lrhlen) {
- /* PBC.PbcLengthDWs */
- AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
- cpu_to_le16(LRH2PBC(lrhlen)));
- /* LRH.PktLen (we need the full 16 bits due to byte swap) */
- AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
- cpu_to_be16(lrhlen >> 2));
- }
-
- /*
- * Do the common updates
- */
- /* BTH.PSN and BTH.A */
- val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
- (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
- val32 |= 1UL << 31;
- AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
- AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
- /* KDETH.Offset */
- AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
- cpu_to_le16(req->koffset & 0xffff));
- AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
- cpu_to_le16(req->koffset >> 16));
- if (req_opcode(req->info.ctrl) == EXPECTED) {
- __le16 val;
-
- tidval = req->tids[req->tididx];
-
- /*
- * If the offset puts us at the end of the current TID,
- * advance everything.
- */
- if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
- PAGE_SIZE)) {
- req->tidoffset = 0;
- /*
- * Since we don't copy all the TIDs, all at once,
- * we have to check again.
- */
- if (++req->tididx > req->n_tids - 1 ||
- !req->tids[req->tididx]) {
- return -EINVAL;
- }
- tidval = req->tids[req->tididx];
- }
- req->omfactor = ((EXP_TID_GET(tidval, LEN) *
- PAGE_SIZE) >=
- KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE :
- KDETH_OM_SMALL;
- /* KDETH.OM and KDETH.OFFSET (TID) */
- AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
- ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
- ((req->tidoffset / req->omfactor) & 0x7fff)));
- /* KDETH.TIDCtrl, KDETH.TID */
- val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
- (EXP_TID_GET(tidval, IDX) & 0x3ff));
- /* Clear KDETH.SH on last packet */
- if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
- val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
- INTR) >> 16);
- val &= cpu_to_le16(~(1U << 13));
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
- } else {
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
- }
- }
-
- trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
- req->info.comp_idx, req->sde->this_idx,
- req->ahg_idx, req->ahg, diff, tidval);
- return diff;
-}
-
-/*
- * SDMA tx request completion callback. Called when the SDMA progress
- * state machine gets notification that the SDMA descriptors for this
- * tx request have been processed by the DMA engine. Called in
- * interrupt context.
- */
-static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
-{
- struct user_sdma_txreq *tx =
- container_of(txreq, struct user_sdma_txreq, txreq);
- struct user_sdma_request *req;
- struct hfi1_user_sdma_pkt_q *pq;
- struct hfi1_user_sdma_comp_q *cq;
- u16 idx;
-
- if (!tx->req)
- return;
-
- req = tx->req;
- pq = req->pq;
- cq = req->cq;
-
- if (status != SDMA_TXREQ_S_OK) {
- SDMA_DBG(req, "SDMA completion with error %d",
- status);
- set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
- }
-
- req->seqcomp = tx->seqnum;
- kmem_cache_free(pq->txreq_cache, tx);
- tx = NULL;
-
- idx = req->info.comp_idx;
- if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
- if (req->seqcomp == req->info.npkts - 1) {
- req->status = 0;
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, COMPLETE, 0);
- }
- } else {
- if (status != SDMA_TXREQ_S_OK)
- req->status = status;
- if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
- (test_bit(SDMA_REQ_SEND_DONE, &req->flags) ||
- test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) {
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, ERROR, req->status);
- }
- }
-}
-
-static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
-{
- if (atomic_dec_and_test(&pq->n_reqs)) {
- xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
- wake_up(&pq->wait);
- }
-}
-
-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
-{
- if (!list_empty(&req->txps)) {
- struct sdma_txreq *t, *p;
-
- list_for_each_entry_safe(t, p, &req->txps, list) {
- struct user_sdma_txreq *tx =
- container_of(t, struct user_sdma_txreq, txreq);
- list_del_init(&t->list);
- sdma_txclean(req->pq->dd, t);
- kmem_cache_free(req->pq->txreq_cache, tx);
- }
- }
- if (req->data_iovs) {
- struct sdma_mmu_node *node;
- struct mmu_rb_node *mnode;
- int i;
-
- for (i = 0; i < req->data_iovs; i++) {
- mnode = hfi1_mmu_rb_search(
- &req->pq->sdma_rb_root,
- (unsigned long)req->iovs[i].iov.iov_base,
- req->iovs[i].iov.iov_len);
- if (!mnode || IS_ERR(mnode))
- continue;
-
- node = container_of(mnode, struct sdma_mmu_node, rb);
- if (unpin)
- hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
- &node->rb);
- else
- atomic_dec(&node->refcount);
- }
- }
- kfree(req->tids);
- clear_bit(SDMA_REQ_IN_USE, &req->flags);
-}
-
-static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
- struct hfi1_user_sdma_comp_q *cq,
- u16 idx, enum hfi1_sdma_comp_state state,
- int ret)
-{
- hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
- pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
- cq->comps[idx].status = state;
- if (state == ERROR)
- cq->comps[idx].errcode = -ret;
- trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
- idx, state, ret);
-}
-
-static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
- unsigned long len)
-{
- return (bool)(node->addr == addr);
-}
-
-static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
-{
- struct sdma_mmu_node *node =
- container_of(mnode, struct sdma_mmu_node, rb);
-
- atomic_inc(&node->refcount);
- return 0;
-}
-
-static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
- struct mm_struct *mm)
-{
- struct sdma_mmu_node *node =
- container_of(mnode, struct sdma_mmu_node, rb);
-
- spin_lock(&node->pq->evict_lock);
- list_del(&node->list);
- node->pq->n_locked -= node->npages;
- spin_unlock(&node->pq->evict_lock);
-
- /*
- * If mm is set, we are being called by the MMU notifier and we
- * should not pass a mm_struct to unpin_vector_page(). This is to
- * prevent a deadlock when hfi1_release_user_pages() attempts to
- * take the mmap_sem, which the MMU notifier has already taken.
- */
- unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
- node->npages);
- /*
- * If called by the MMU notifier, we have to adjust the pinned
- * page count ourselves.
- */
- if (mm)
- mm->pinned_vm -= node->npages;
- kfree(node);
-}
-
-static int sdma_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
-{
- struct sdma_mmu_node *node =
- container_of(mnode, struct sdma_mmu_node, rb);
-
- if (!atomic_read(&node->refcount))
- return 1;
- return 0;
-}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h
deleted file mode 100644
index b9240e351..000000000
--- a/drivers/staging/rdma/hfi1/user_sdma.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/device.h>
-#include <linux/wait.h>
-
-#include "common.h"
-#include "iowait.h"
-#include "user_exp_rcv.h"
-
-extern uint extended_psn;
-
-struct hfi1_user_sdma_pkt_q {
- struct list_head list;
- unsigned ctxt;
- unsigned subctxt;
- u16 n_max_reqs;
- atomic_t n_reqs;
- u16 reqidx;
- struct hfi1_devdata *dd;
- struct kmem_cache *txreq_cache;
- struct user_sdma_request *reqs;
- struct iowait busy;
- unsigned state;
- wait_queue_head_t wait;
- unsigned long unpinned;
- struct rb_root sdma_rb_root;
- u32 n_locked;
- struct list_head evict;
- spinlock_t evict_lock; /* protect evict and n_locked */
-};
-
-struct hfi1_user_sdma_comp_q {
- u16 nentries;
- struct hfi1_sdma_comp_entry *comps;
-};
-
-int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *, struct file *);
-int hfi1_user_sdma_free_queues(struct hfi1_filedata *);
-int hfi1_user_sdma_process_request(struct file *, struct iovec *, unsigned long,
- unsigned long *);
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
deleted file mode 100644
index 89f2aad45..000000000
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ /dev/null
@@ -1,1740 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-#include <linux/mm.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
-
-#include "hfi.h"
-#include "common.h"
-#include "device.h"
-#include "trace.h"
-#include "qp.h"
-#include "verbs_txreq.h"
-
-static unsigned int hfi1_lkey_table_size = 16;
-module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
- S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
- "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int hfi1_max_pds = 0xFFFF;
-module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
-MODULE_PARM_DESC(max_pds,
- "Maximum number of protection domains to support");
-
-static unsigned int hfi1_max_ahs = 0xFFFF;
-module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int hfi1_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
- "Maximum number of completion queue entries to support");
-
-unsigned int hfi1_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int hfi1_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int hfi1_max_qps = 16384;
-module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int hfi1_max_sges = 0x60;
-module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int hfi1_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
- "Maximum number of multicast groups to support");
-
-unsigned int hfi1_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
- uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
- "Maximum number of attached QPs to support");
-
-unsigned int hfi1_max_srqs = 1024;
-module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int hfi1_max_srq_sges = 128;
-module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int hfi1_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-unsigned short piothreshold = 256;
-module_param(piothreshold, ushort, S_IRUGO);
-MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
-
-#define COPY_CACHELESS 1
-#define COPY_ADAPTIVE 2
-static unsigned int sge_copy_mode;
-module_param(sge_copy_mode, uint, S_IRUGO);
-MODULE_PARM_DESC(sge_copy_mode,
- "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
-
-static void verbs_sdma_complete(
- struct sdma_txreq *cookie,
- int status);
-
-static int pio_wait(struct rvt_qp *qp,
- struct send_context *sc,
- struct hfi1_pkt_state *ps,
- u32 flag);
-
-/* Length of buffer to create verbs txreq cache name */
-#define TXREQ_NAME_LEN 24
-
-static uint wss_threshold;
-module_param(wss_threshold, uint, S_IRUGO);
-MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
-static uint wss_clean_period = 256;
-module_param(wss_clean_period, uint, S_IRUGO);
-MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
-
-/* memory working set size */
-struct hfi1_wss {
- unsigned long *entries;
- atomic_t total_count;
- atomic_t clean_counter;
- atomic_t clean_entry;
-
- int threshold;
- int num_entries;
- long pages_mask;
-};
-
-static struct hfi1_wss wss;
-
-int hfi1_wss_init(void)
-{
- long llc_size;
- long llc_bits;
- long table_size;
- long table_bits;
-
- /* check for a valid percent range - default to 80 if none or invalid */
- if (wss_threshold < 1 || wss_threshold > 100)
- wss_threshold = 80;
- /* reject a wildly large period */
- if (wss_clean_period > 1000000)
- wss_clean_period = 256;
- /* reject a zero period */
- if (wss_clean_period == 0)
- wss_clean_period = 1;
-
- /*
- * Calculate the table size - the next power of 2 larger than the
- * LLC size. LLC size is in KiB.
- */
- llc_size = wss_llc_size() * 1024;
- table_size = roundup_pow_of_two(llc_size);
-
- /* one bit per page in rounded up table */
- llc_bits = llc_size / PAGE_SIZE;
- table_bits = table_size / PAGE_SIZE;
- wss.pages_mask = table_bits - 1;
- wss.num_entries = table_bits / BITS_PER_LONG;
-
- wss.threshold = (llc_bits * wss_threshold) / 100;
- if (wss.threshold == 0)
- wss.threshold = 1;
-
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
- GFP_KERNEL);
- if (!wss.entries) {
- hfi1_wss_exit();
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void hfi1_wss_exit(void)
-{
- /* coded to handle partially initialized and repeat callers */
- kfree(wss.entries);
- wss.entries = NULL;
-}
-
-/*
- * Advance the clean counter. When the clean period has expired,
- * clean an entry.
- *
- * This is implemented in atomics to avoid locking. Because multiple
- * variables are involved, it can be racy which can lead to slightly
- * inaccurate information. Since this is only a heuristic, this is
- * OK. Any innaccuracies will clean themselves out as the counter
- * advances. That said, it is unlikely the entry clean operation will
- * race - the next possible racer will not start until the next clean
- * period.
- *
- * The clean counter is implemented as a decrement to zero. When zero
- * is reached an entry is cleaned.
- */
-static void wss_advance_clean_counter(void)
-{
- int entry;
- int weight;
- unsigned long bits;
-
- /* become the cleaner if we decrement the counter to zero */
- if (atomic_dec_and_test(&wss.clean_counter)) {
- /*
- * Set, not add, the clean period. This avoids an issue
- * where the counter could decrement below the clean period.
- * Doing a set can result in lost decrements, slowing the
- * clean advance. Since this a heuristic, this possible
- * slowdown is OK.
- *
- * An alternative is to loop, advancing the counter by a
- * clean period until the result is > 0. However, this could
- * lead to several threads keeping another in the clean loop.
- * This could be mitigated by limiting the number of times
- * we stay in the loop.
- */
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- /*
- * Uniquely grab the entry to clean and move to next.
- * The current entry is always the lower bits of
- * wss.clean_entry. The table size, wss.num_entries,
- * is always a power-of-2.
- */
- entry = (atomic_inc_return(&wss.clean_entry) - 1)
- & (wss.num_entries - 1);
-
- /* clear the entry and count the bits */
- bits = xchg(&wss.entries[entry], 0);
- weight = hweight64((u64)bits);
- /* only adjust the contended total count if needed */
- if (weight)
- atomic_sub(weight, &wss.total_count);
- }
-}
-
-/*
- * Insert the given address into the working set array.
- */
-static void wss_insert(void *address)
-{
- u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
- u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
- u32 nr = page & (BITS_PER_LONG - 1);
-
- if (!test_and_set_bit(nr, &wss.entries[entry]))
- atomic_inc(&wss.total_count);
-
- wss_advance_clean_counter();
-}
-
-/*
- * Is the working set larger than the threshold?
- */
-static inline int wss_exceeds_threshold(void)
-{
- return atomic_read(&wss.total_count) >= wss.threshold;
-}
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * Length of header by opcode, 0 --> not supported
- */
-const u8 hdr_len_by_opcode[256] = {
- /* RC */
- [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
- [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
- [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
- [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
- [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
- [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
- [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
- [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
- [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
- [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
- [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
- [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
- [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
- [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
- [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
- [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
- [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
- /* UC */
- [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
- [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
- [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
- [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
- [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
- [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
- [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
- [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
- [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
- [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
- [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
- [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
- /* UD */
- [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
- [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
-};
-
-static const opcode_handler opcode_handler_tbl[256] = {
- /* RC */
- [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
- [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
- /* UC */
- [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
- [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
- /* UD */
- [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
- [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
- /* CNP */
- [IB_OPCODE_CNP] = &hfi1_cnp_rcv
-};
-
-/*
- * System image GUID.
- */
-__be64 ib_hfi1_sys_image_guid;
-
-/**
- * hfi1_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- * @copy_last: do a separate copy of the last 8 bytes
- */
-void hfi1_copy_sge(
- struct rvt_sge_state *ss,
- void *data, u32 length,
- int release,
- int copy_last)
-{
- struct rvt_sge *sge = &ss->sge;
- int in_last = 0;
- int i;
- int cacheless_copy = 0;
-
- if (sge_copy_mode == COPY_CACHELESS) {
- cacheless_copy = length >= PAGE_SIZE;
- } else if (sge_copy_mode == COPY_ADAPTIVE) {
- if (length >= PAGE_SIZE) {
- /*
- * NOTE: this *assumes*:
- * o The first vaddr is the dest.
- * o If multiple pages, then vaddr is sequential.
- */
- wss_insert(sge->vaddr);
- if (length >= (2 * PAGE_SIZE))
- wss_insert(sge->vaddr + PAGE_SIZE);
-
- cacheless_copy = wss_exceeds_threshold();
- } else {
- wss_advance_clean_counter();
- }
- }
- if (copy_last) {
- if (length > 8) {
- length -= 8;
- } else {
- copy_last = 0;
- in_last = 1;
- }
- }
-
-again:
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- if (unlikely(in_last)) {
- /* enforce byte transfer ordering */
- for (i = 0; i < len; i++)
- ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
- } else if (cacheless_copy) {
- cacheless_memcpy(sge->vaddr, data, len);
- } else {
- memcpy(sge->vaddr, data, len);
- }
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (release)
- rvt_put_mr(sge->mr);
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- data += len;
- length -= len;
- }
-
- if (copy_last) {
- copy_last = 0;
- in_last = 1;
- length = 8;
- goto again;
- }
-}
-
-/**
- * hfi1_skip_sge - skip over SGE memory
- * @ss: the SGE state
- * @length: the number of bytes to skip
- */
-void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
-{
- struct rvt_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = sge->length;
-
- if (len > length)
- len = length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (release)
- rvt_put_mr(sge->mr);
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
-}
-
-/*
- * Make sure the QP is ready and able to accept the given opcode.
- */
-static inline int qp_ok(int opcode, struct hfi1_packet *packet)
-{
- struct hfi1_ibport *ibp;
-
- if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
- goto dropit;
- if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
- (opcode == IB_OPCODE_CNP))
- return 1;
-dropit:
- ibp = &packet->rcd->ppd->ibport_data;
- ibp->rvp.n_pkt_drops++;
- return 0;
-}
-
-/**
- * hfi1_ib_rcv - process an incoming packet
- * @packet: data packet information
- *
- * This is called to process an incoming packet at interrupt level.
- *
- * Tlen is the length of the header + data + CRC in bytes.
- */
-void hfi1_ib_rcv(struct hfi1_packet *packet)
-{
- struct hfi1_ctxtdata *rcd = packet->rcd;
- struct hfi1_ib_header *hdr = packet->hdr;
- u32 tlen = packet->tlen;
- struct hfi1_pportdata *ppd = rcd->ppd;
- struct hfi1_ibport *ibp = &ppd->ibport_data;
- struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
- unsigned long flags;
- u32 qp_num;
- int lnh;
- u8 opcode;
- u16 lid;
-
- /* Check for GRH */
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH) {
- packet->ohdr = &hdr->u.oth;
- } else if (lnh == HFI1_LRH_GRH) {
- u32 vtf;
-
- packet->ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- packet->rcv_flags |= HFI1_HAS_GRH;
- } else {
- goto drop;
- }
-
- trace_input_ibhdr(rcd->dd, hdr);
-
- opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
- inc_opstats(tlen, &rcd->opstats->stats[opcode]);
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK;
- lid = be16_to_cpu(hdr->lrh[1]);
- if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
- (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) {
- struct rvt_mcast *mcast;
- struct rvt_mcast_qp *p;
-
- if (lnh != HFI1_LRH_GRH)
- goto drop;
- mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
- if (!mcast)
- goto drop;
- list_for_each_entry_rcu(p, &mcast->qp_list, list) {
- packet->qp = p->qp;
- spin_lock_irqsave(&packet->qp->r_lock, flags);
- if (likely((qp_ok(opcode, packet))))
- opcode_handler_tbl[opcode](packet);
- spin_unlock_irqrestore(&packet->qp->r_lock, flags);
- }
- /*
- * Notify rvt_multicast_detach() if it is waiting for us
- * to finish.
- */
- if (atomic_dec_return(&mcast->refcount) <= 1)
- wake_up(&mcast->wait);
- } else {
- rcu_read_lock();
- packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
- if (!packet->qp) {
- rcu_read_unlock();
- goto drop;
- }
- spin_lock_irqsave(&packet->qp->r_lock, flags);
- if (likely((qp_ok(opcode, packet))))
- opcode_handler_tbl[opcode](packet);
- spin_unlock_irqrestore(&packet->qp->r_lock, flags);
- rcu_read_unlock();
- }
- return;
-
-drop:
- ibp->rvp.n_pkt_drops++;
-}
-
-/*
- * This is called from a timer to check for QPs
- * which need kernel memory in order to send a packet.
- */
-static void mem_timer(unsigned long data)
-{
- struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
- struct list_head *list = &dev->memwait;
- struct rvt_qp *qp = NULL;
- struct iowait *wait;
- unsigned long flags;
- struct hfi1_qp_priv *priv;
-
- write_seqlock_irqsave(&dev->iowait_lock, flags);
- if (!list_empty(list)) {
- wait = list_first_entry(list, struct iowait, list);
- qp = iowait_to_qp(wait);
- priv = qp->priv;
- list_del_init(&priv->s_iowait.list);
- /* refcount held until actual wake up */
- if (!list_empty(list))
- mod_timer(&dev->mem_timer, jiffies + 1);
- }
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
-
- if (qp)
- hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
-}
-
-void update_sge(struct rvt_sge_state *ss, u32 length)
-{
- struct rvt_sge *sge = &ss->sge;
-
- sge->vaddr += length;
- sge->length -= length;
- sge->sge_length -= length;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- return;
- sge->n = 0;
- }
- sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
- }
-}
-
-/*
- * This is called with progress side lock held.
- */
-/* New API */
-static void verbs_sdma_complete(
- struct sdma_txreq *cookie,
- int status)
-{
- struct verbs_txreq *tx =
- container_of(cookie, struct verbs_txreq, txreq);
- struct rvt_qp *qp = tx->qp;
-
- spin_lock(&qp->s_lock);
- if (tx->wqe) {
- hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- struct hfi1_ib_header *hdr;
-
- hdr = &tx->phdr.hdr;
- hfi1_rc_send_complete(qp, hdr);
- }
- spin_unlock(&qp->s_lock);
-
- hfi1_put_txreq(tx);
-}
-
-static int wait_kmem(struct hfi1_ibdev *dev,
- struct rvt_qp *qp,
- struct hfi1_pkt_state *ps)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- write_seqlock(&dev->iowait_lock);
- list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
- if (list_empty(&priv->s_iowait.list)) {
- if (list_empty(&dev->memwait))
- mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= RVT_S_WAIT_KMEM;
- list_add_tail(&priv->s_iowait.list, &dev->memwait);
- trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
- atomic_inc(&qp->refcount);
- }
- write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- return ret;
-}
-
-/*
- * This routine calls txadds for each sg entry.
- *
- * Add failures will revert the sge cursor
- */
-static noinline int build_verbs_ulp_payload(
- struct sdma_engine *sde,
- struct rvt_sge_state *ss,
- u32 length,
- struct verbs_txreq *tx)
-{
- struct rvt_sge *sg_list = ss->sg_list;
- struct rvt_sge sge = ss->sge;
- u8 num_sge = ss->num_sge;
- u32 len;
- int ret = 0;
-
- while (length) {
- len = ss->sge.length;
- if (len > length)
- len = length;
- if (len > ss->sge.sge_length)
- len = ss->sge.sge_length;
- WARN_ON_ONCE(len == 0);
- ret = sdma_txadd_kvaddr(
- sde->dd,
- &tx->txreq,
- ss->sge.vaddr,
- len);
- if (ret)
- goto bail_txadd;
- update_sge(ss, len);
- length -= len;
- }
- return ret;
-bail_txadd:
- /* unwind cursor */
- ss->sge = sge;
- ss->num_sge = num_sge;
- ss->sg_list = sg_list;
- return ret;
-}
-
-/*
- * Build the number of DMA descriptors needed to send length bytes of data.
- *
- * NOTE: DMA mapping is held in the tx until completed in the ring or
- * the tx desc is freed without having been submitted to the ring
- *
- * This routine ensures all the helper routine calls succeed.
- */
-/* New API */
-static int build_verbs_tx_desc(
- struct sdma_engine *sde,
- struct rvt_sge_state *ss,
- u32 length,
- struct verbs_txreq *tx,
- struct ahg_ib_header *ahdr,
- u64 pbc)
-{
- int ret = 0;
- struct hfi1_pio_header *phdr = &tx->phdr;
- u16 hdrbytes = tx->hdr_dwords << 2;
-
- if (!ahdr->ahgcount) {
- ret = sdma_txinit_ahg(
- &tx->txreq,
- ahdr->tx_flags,
- hdrbytes + length,
- ahdr->ahgidx,
- 0,
- NULL,
- 0,
- verbs_sdma_complete);
- if (ret)
- goto bail_txadd;
- phdr->pbc = cpu_to_le64(pbc);
- ret = sdma_txadd_kvaddr(
- sde->dd,
- &tx->txreq,
- phdr,
- hdrbytes);
- if (ret)
- goto bail_txadd;
- } else {
- ret = sdma_txinit_ahg(
- &tx->txreq,
- ahdr->tx_flags,
- length,
- ahdr->ahgidx,
- ahdr->ahgcount,
- ahdr->ahgdesc,
- hdrbytes,
- verbs_sdma_complete);
- if (ret)
- goto bail_txadd;
- }
-
- /* add the ulp payload - if any. ss can be NULL for acks */
- if (ss)
- ret = build_verbs_ulp_payload(sde, ss, length, tx);
-bail_txadd:
- return ret;
-}
-
-int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct ahg_ib_header *ahdr = priv->s_hdr;
- u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
- u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
- struct hfi1_ibdev *dev = ps->dev;
- struct hfi1_pportdata *ppd = ps->ppd;
- struct verbs_txreq *tx;
- u64 pbc_flags = 0;
- u8 sc5 = priv->s_sc;
-
- int ret;
-
- tx = ps->s_txreq;
- if (!sdma_txreq_built(&tx->txreq)) {
- if (likely(pbc == 0)) {
- u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
- /* No vl15 here */
- /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
- pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
-
- pbc = create_pbc(ppd,
- pbc_flags,
- qp->srate_mbps,
- vl,
- plen);
- }
- tx->wqe = qp->s_wqe;
- ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
- if (unlikely(ret))
- goto bail_build;
- }
- ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq);
- if (unlikely(ret < 0)) {
- if (ret == -ECOMM)
- goto bail_ecomm;
- return ret;
- }
- trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
- &ps->s_txreq->phdr.hdr);
- return ret;
-
-bail_ecomm:
- /* The current one got "sent" */
- return 0;
-bail_build:
- ret = wait_kmem(dev, qp, ps);
- if (!ret) {
- /* free txreq - bad state */
- hfi1_put_txreq(ps->s_txreq);
- ps->s_txreq = NULL;
- }
- return ret;
-}
-
-/*
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int pio_wait(struct rvt_qp *qp,
- struct send_context *sc,
- struct hfi1_pkt_state *ps,
- u32 flag)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_devdata *dd = sc->dd;
- struct hfi1_ibdev *dev = &dd->verbs_dev;
- unsigned long flags;
- int ret = 0;
-
- /*
- * Note that as soon as want_buffer() is called and
- * possibly before it returns, sc_piobufavail()
- * could be called. Therefore, put QP on the I/O wait list before
- * enabling the PIO avail interrupt.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- write_seqlock(&dev->iowait_lock);
- list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
- if (list_empty(&priv->s_iowait.list)) {
- struct hfi1_ibdev *dev = &dd->verbs_dev;
- int was_empty;
-
- dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
- dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
- dev->n_piowait++;
- qp->s_flags |= flag;
- was_empty = list_empty(&sc->piowait);
- list_add_tail(&priv->s_iowait.list, &sc->piowait);
- trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
- atomic_inc(&qp->refcount);
- /* counting: only call wantpiobuf_intr if first user */
- if (was_empty)
- hfi1_sc_wantpiobuf_intr(sc, 1);
- }
- write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-static void verbs_pio_complete(void *arg, int code)
-{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
- struct hfi1_qp_priv *priv = qp->priv;
-
- if (iowait_pio_dec(&priv->s_iowait))
- iowait_drain_wakeup(&priv->s_iowait);
-}
-
-int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
- u32 dwords = (len + 3) >> 2;
- u32 plen = hdrwords + dwords + 2; /* includes pbc */
- struct hfi1_pportdata *ppd = ps->ppd;
- u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
- u64 pbc_flags = 0;
- u8 sc5;
- unsigned long flags = 0;
- struct send_context *sc;
- struct pio_buf *pbuf;
- int wc_status = IB_WC_SUCCESS;
- int ret = 0;
- pio_release_cb cb = NULL;
-
- /* only RC/UC use complete */
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- case IB_QPT_UC:
- cb = verbs_pio_complete;
- break;
- default:
- break;
- }
-
- /* vl15 special case taken care of in ud.c */
- sc5 = priv->s_sc;
- sc = ps->s_txreq->psc;
-
- if (likely(pbc == 0)) {
- u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
- /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
- pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
- pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
- }
- if (cb)
- iowait_pio_inc(&priv->s_iowait);
- pbuf = sc_buffer_alloc(sc, plen, cb, qp);
- if (unlikely(!pbuf)) {
- if (cb)
- verbs_pio_complete(qp, 0);
- if (ppd->host_link_state != HLS_UP_ACTIVE) {
- /*
- * If we have filled the PIO buffers to capacity and are
- * not in an active state this request is not going to
- * go out to so just complete it with an error or else a
- * ULP or the core may be stuck waiting.
- */
- hfi1_cdbg(
- PIO,
- "alloc failed. state not active, completing");
- wc_status = IB_WC_GENERAL_ERR;
- goto pio_bail;
- } else {
- /*
- * This is a normal occurrence. The PIO buffs are full
- * up but we are still happily sending, well we could be
- * so lets continue to queue the request.
- */
- hfi1_cdbg(PIO, "alloc failed. state active, queuing");
- ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
- if (!ret)
- /* txreq not queued - free */
- goto bail;
- /* tx consumed in wait */
- return ret;
- }
- }
-
- if (len == 0) {
- pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
- } else {
- if (ss) {
- seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4);
- while (len) {
- void *addr = ss->sge.vaddr;
- u32 slen = ss->sge.length;
-
- if (slen > len)
- slen = len;
- update_sge(ss, slen);
- seg_pio_copy_mid(pbuf, addr, slen);
- len -= slen;
- }
- seg_pio_copy_end(pbuf);
- }
- }
-
- trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
- &ps->s_txreq->phdr.hdr);
-
-pio_bail:
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, wc_status);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
-
- ret = 0;
-
-bail:
- hfi1_put_txreq(ps->s_txreq);
- return ret;
-}
-
-/*
- * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
- * being an entry from the ingress partition key table), return 0
- * otherwise. Use the matching criteria for egress partition keys
- * specified in the OPAv1 spec., section 9.1l.7.
- */
-static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
-{
- u16 mkey = pkey & PKEY_LOW_15_MASK;
- u16 ment = ent & PKEY_LOW_15_MASK;
-
- if (mkey == ment) {
- /*
- * If pkey[15] is set (full partition member),
- * is bit 15 in the corresponding table element
- * clear (limited member)?
- */
- if (pkey & PKEY_MEMBER_MASK)
- return !!(ent & PKEY_MEMBER_MASK);
- return 1;
- }
- return 0;
-}
-
-/*
- * egress_pkey_check - return 0 if hdr's pkey matches according to the
- * criteria in the OPAv1 spec., section 9.11.7.
- */
-static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
- struct hfi1_ib_header *hdr,
- struct rvt_qp *qp)
-{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_other_headers *ohdr;
- struct hfi1_devdata *dd;
- int i = 0;
- u16 pkey;
- u8 lnh, sc5 = priv->s_sc;
-
- if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
- return 0;
-
- /* locate the pkey within the headers */
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- ohdr = &hdr->u.oth;
-
- pkey = (u16)be32_to_cpu(ohdr->bth[0]);
-
- /* If SC15, pkey[0:14] must be 0x7fff */
- if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
- goto bad;
-
- /* Is the pkey = 0x0, or 0x8000? */
- if ((pkey & PKEY_LOW_15_MASK) == 0)
- goto bad;
-
- /* The most likely matching pkey has index qp->s_pkey_index */
- if (unlikely(!egress_pkey_matches_entry(pkey,
- ppd->pkeys
- [qp->s_pkey_index]))) {
- /* no match - try the entire table */
- for (; i < MAX_PKEY_VALUES; i++) {
- if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
- break;
- }
- }
-
- if (i < MAX_PKEY_VALUES)
- return 0;
-bad:
- incr_cntr64(&ppd->port_xmit_constraint_errors);
- dd = ppd->dd;
- if (!(dd->err_info_xmit_constraint.status & OPA_EI_STATUS_SMASK)) {
- u16 slid = be16_to_cpu(hdr->lrh[3]);
-
- dd->err_info_xmit_constraint.status |= OPA_EI_STATUS_SMASK;
- dd->err_info_xmit_constraint.slid = slid;
- dd->err_info_xmit_constraint.pkey = pkey;
- }
- return 1;
-}
-
-/**
- * get_send_routine - choose an egress routine
- *
- * Choose an egress routine based on QP type
- * and size
- */
-static inline send_routine get_send_routine(struct rvt_qp *qp,
- struct verbs_txreq *tx)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_ib_header *h = &tx->phdr.hdr;
-
- if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
- return dd->process_pio_send;
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- return dd->process_pio_send;
- case IB_QPT_GSI:
- case IB_QPT_UD:
- break;
- case IB_QPT_RC:
- if (piothreshold &&
- qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
- (BIT(get_opcode(h) & 0x1f) & rc_only_opcode) &&
- iowait_sdma_pending(&priv->s_iowait) == 0 &&
- !sdma_txreq_built(&tx->txreq))
- return dd->process_pio_send;
- break;
- case IB_QPT_UC:
- if (piothreshold &&
- qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
- (BIT(get_opcode(h) & 0x1f) & uc_only_opcode) &&
- iowait_sdma_pending(&priv->s_iowait) == 0 &&
- !sdma_txreq_built(&tx->txreq))
- return dd->process_pio_send;
- break;
- default:
- break;
- }
- return dd->process_dma_send;
-}
-
-/**
- * hfi1_verbs_send - send a packet
- * @qp: the QP to send on
- * @ps: the state of the packet to send
- *
- * Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
- */
-int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct hfi1_qp_priv *priv = qp->priv;
- send_routine sr;
- int ret;
-
- sr = get_send_routine(qp, ps->s_txreq);
- ret = egress_pkey_check(dd->pport, &ps->s_txreq->phdr.hdr, qp);
- if (unlikely(ret)) {
- /*
- * The value we are returning here does not get propagated to
- * the verbs caller. Thus we need to complete the request with
- * error otherwise the caller could be sitting waiting on the
- * completion event. Only do this for PIO. SDMA has its own
- * mechanism for handling the errors. So for SDMA we can just
- * return.
- */
- if (sr == dd->process_pio_send) {
- unsigned long flags;
-
- hfi1_cdbg(PIO, "%s() Failed. Completing with err",
- __func__);
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- return -EINVAL;
- }
- if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
- return pio_wait(qp,
- ps->s_txreq->psc,
- ps,
- RVT_S_WAIT_PIO_DRAIN);
- return sr(qp, ps, 0);
-}
-
-/**
- * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
- * @dd: the device data structure
- */
-static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
-{
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
-
- memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
-
- rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
- rdi->dparms.props.page_size_cap = PAGE_SIZE;
- rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
- rdi->dparms.props.vendor_part_id = dd->pcidev->device;
- rdi->dparms.props.hw_ver = dd->minrev;
- rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
- rdi->dparms.props.max_mr_size = ~0ULL;
- rdi->dparms.props.max_qp = hfi1_max_qps;
- rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
- rdi->dparms.props.max_sge = hfi1_max_sges;
- rdi->dparms.props.max_sge_rd = hfi1_max_sges;
- rdi->dparms.props.max_cq = hfi1_max_cqs;
- rdi->dparms.props.max_ah = hfi1_max_ahs;
- rdi->dparms.props.max_cqe = hfi1_max_cqes;
- rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
- rdi->dparms.props.max_map_per_fmr = 32767;
- rdi->dparms.props.max_pd = hfi1_max_pds;
- rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
- rdi->dparms.props.max_qp_init_rd_atom = 255;
- rdi->dparms.props.max_srq = hfi1_max_srqs;
- rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
- rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
- rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
- rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
- rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
- rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
- rdi->dparms.props.max_total_mcast_qp_attach =
- rdi->dparms.props.max_mcast_qp_attach *
- rdi->dparms.props.max_mcast_grp;
-}
-
-static inline u16 opa_speed_to_ib(u16 in)
-{
- u16 out = 0;
-
- if (in & OPA_LINK_SPEED_25G)
- out |= IB_SPEED_EDR;
- if (in & OPA_LINK_SPEED_12_5G)
- out |= IB_SPEED_FDR;
-
- return out;
-}
-
-/*
- * Convert a single OPA link width (no multiple flags) to an IB value.
- * A zero OPA link width means link down, which means the IB width value
- * is a don't care.
- */
-static inline u16 opa_width_to_ib(u16 in)
-{
- switch (in) {
- case OPA_LINK_WIDTH_1X:
- /* map 2x and 3x to 1x as they don't exist in IB */
- case OPA_LINK_WIDTH_2X:
- case OPA_LINK_WIDTH_3X:
- return IB_WIDTH_1X;
- default: /* link down or unknown, return our largest width */
- case OPA_LINK_WIDTH_4X:
- return IB_WIDTH_4X;
- }
-}
-
-static int query_port(struct rvt_dev_info *rdi, u8 port_num,
- struct ib_port_attr *props)
-{
- struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
- struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
- struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
- u16 lid = ppd->lid;
-
- props->lid = lid ? lid : 0;
- props->lmc = ppd->lmc;
- /* OPA logical states match IB logical states */
- props->state = driver_lstate(ppd);
- props->phys_state = hfi1_ibphys_portstate(ppd);
- props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
- props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
- /* see rate_show() in ib core/sysfs.c */
- props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
- props->max_vl_num = ppd->vls_supported;
-
- /* Once we are a "first class" citizen and have added the OPA MTUs to
- * the core we can advertise the larger MTU enum to the ULPs, for now
- * advertise only 4K.
- *
- * Those applications which are either OPA aware or pass the MTU enum
- * from the Path Records to us will get the new 8k MTU. Those that
- * attempt to process the MTU enum may fail in various ways.
- */
- props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
- 4096 : hfi1_max_mtu), IB_MTU_4096);
- props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
- mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
-
- return 0;
-}
-
-static int modify_device(struct ib_device *device,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(device);
- unsigned i;
- int ret;
-
- if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
- IB_DEVICE_MODIFY_NODE_DESC)) {
- ret = -EOPNOTSUPP;
- goto bail;
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc, 64);
- for (i = 0; i < dd->num_pports; i++) {
- struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
-
- hfi1_node_desc_chg(ibp);
- }
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
- ib_hfi1_sys_image_guid =
- cpu_to_be64(device_modify->sys_image_guid);
- for (i = 0; i < dd->num_pports; i++) {
- struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
-
- hfi1_sys_guid_chg(ibp);
- }
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
-{
- struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
- struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
- struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
-
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
- OPA_LINKDOWN_REASON_UNKNOWN);
- ret = set_link_state(ppd, HLS_DN_DOWNDEF);
- return ret;
-}
-
-static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
- int guid_index, __be64 *guid)
-{
- struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- if (guid_index == 0)
- *guid = cpu_to_be64(ppd->guid);
- else if (guid_index < HFI1_GUIDS_PER_PORT)
- *guid = ibp->guids[guid_index - 1];
- else
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * convert ah port,sl to sc
- */
-u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah)
-{
- struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num);
-
- return ibp->sl_to_sc[ah->sl];
-}
-
-static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
-{
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
- struct hfi1_devdata *dd;
- u8 sc5;
-
- /* test the mapping for validity */
- ibp = to_iport(ibdev, ah_attr->port_num);
- ppd = ppd_from_ibp(ibp);
- sc5 = ibp->sl_to_sc[ah_attr->sl];
- dd = dd_from_ppd(ppd);
- if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
- return -EINVAL;
- return 0;
-}
-
-static void hfi1_notify_new_ah(struct ib_device *ibdev,
- struct ib_ah_attr *ah_attr,
- struct rvt_ah *ah)
-{
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
- struct hfi1_devdata *dd;
- u8 sc5;
-
- /*
- * Do not trust reading anything from rvt_ah at this point as it is not
- * done being setup. We can however modify things which we need to set.
- */
-
- ibp = to_iport(ibdev, ah_attr->port_num);
- ppd = ppd_from_ibp(ibp);
- sc5 = ibp->sl_to_sc[ah->attr.sl];
- dd = dd_from_ppd(ppd);
- ah->vl = sc_to_vlt(dd, sc5);
- if (ah->vl < num_vls || ah->vl == 15)
- ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
-}
-
-struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
-{
- struct ib_ah_attr attr;
- struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct rvt_qp *qp0;
-
- memset(&attr, 0, sizeof(attr));
- attr.dlid = dlid;
- attr.port_num = ppd_from_ibp(ibp)->port;
- rcu_read_lock();
- qp0 = rcu_dereference(ibp->rvp.qp[0]);
- if (qp0)
- ah = ib_create_ah(qp0->ibqp.pd, &attr);
- rcu_read_unlock();
- return ah;
-}
-
-/**
- * hfi1_get_npkeys - return the size of the PKEY table for context 0
- * @dd: the hfi1_ib device
- */
-unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
-{
- return ARRAY_SIZE(dd->pport[0].pkeys);
-}
-
-static void init_ibport(struct hfi1_pportdata *ppd)
-{
- struct hfi1_ibport *ibp = &ppd->ibport_data;
- size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
- int i;
-
- for (i = 0; i < sz; i++) {
- ibp->sl_to_sc[i] = i;
- ibp->sc_to_sl[i] = i;
- }
-
- spin_lock_init(&ibp->rvp.lock);
- /* Set the prefix to the default value (see ch. 4.1.1) */
- ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
- ibp->rvp.sm_lid = 0;
- /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
- ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
- IB_PORT_CAP_MASK_NOTICE_SUP;
- ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
- RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
- RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
-}
-
-/**
- * hfi1_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return 0 if successful, errno if unsuccessful.
- */
-int hfi1_register_ib_device(struct hfi1_devdata *dd)
-{
- struct hfi1_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->rdi.ibdev;
- struct hfi1_pportdata *ppd = dd->pport;
- unsigned i;
- int ret;
- size_t lcpysz = IB_DEVICE_NAME_MAX;
-
- for (i = 0; i < dd->num_pports; i++)
- init_ibport(ppd + i);
-
- /* Only need to initialize non-zero fields. */
-
- setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
-
- seqlock_init(&dev->iowait_lock);
- INIT_LIST_HEAD(&dev->txwait);
- INIT_LIST_HEAD(&dev->memwait);
-
- ret = verbs_txreq_init(dev);
- if (ret)
- goto err_verbs_txreq;
-
- /*
- * The system image GUID is supposed to be the same for all
- * HFIs in a single system but since there can be other
- * device types in the system, we can't be sure this is unique.
- */
- if (!ib_hfi1_sys_image_guid)
- ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
- lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
- strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
- ibdev->owner = THIS_MODULE;
- ibdev->node_guid = cpu_to_be64(ppd->guid);
- ibdev->phys_port_cnt = dd->num_pports;
- ibdev->dma_device = &dd->pcidev->dev;
- ibdev->modify_device = modify_device;
-
- /* keep process mad in the driver */
- ibdev->process_mad = hfi1_process_mad;
-
- strncpy(ibdev->node_desc, init_utsname()->nodename,
- sizeof(ibdev->node_desc));
-
- /*
- * Fill in rvt info object.
- */
- dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
- dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
- dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
- dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
- dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
- dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
- dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
- dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
- /*
- * Fill in rvt info device attributes.
- */
- hfi1_fill_device_attr(dd);
-
- /* queue pair */
- dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
- dd->verbs_dev.rdi.dparms.qpn_start = 0;
- dd->verbs_dev.rdi.dparms.qpn_inc = 1;
- dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
- dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
- dd->verbs_dev.rdi.dparms.qpn_res_end =
- dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
- dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
- dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
- dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
- dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
- dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
- dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
-
- dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
- dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
- dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
- dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
- dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
- dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
- dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
- dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
- dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
- dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
- dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
- dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
- dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
- dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
- dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
- dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
- dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
- dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
-
- /* completeion queue */
- snprintf(dd->verbs_dev.rdi.dparms.cq_name,
- sizeof(dd->verbs_dev.rdi.dparms.cq_name),
- "hfi1_cq%d", dd->unit);
- dd->verbs_dev.rdi.dparms.node = dd->node;
-
- /* misc settings */
- dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
- dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
- dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
- dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
-
- ppd = dd->pport;
- for (i = 0; i < dd->num_pports; i++, ppd++)
- rvt_init_port(&dd->verbs_dev.rdi,
- &ppd->ibport_data.rvp,
- i,
- ppd->pkeys);
-
- ret = rvt_register_device(&dd->verbs_dev.rdi);
- if (ret)
- goto err_verbs_txreq;
-
- ret = hfi1_verbs_register_sysfs(dd);
- if (ret)
- goto err_class;
-
- return ret;
-
-err_class:
- rvt_unregister_device(&dd->verbs_dev.rdi);
-err_verbs_txreq:
- verbs_txreq_exit(dev);
- dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
- return ret;
-}
-
-void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
-{
- struct hfi1_ibdev *dev = &dd->verbs_dev;
-
- hfi1_verbs_unregister_sysfs(dd);
-
- rvt_unregister_device(&dd->verbs_dev.rdi);
-
- if (!list_empty(&dev->txwait))
- dd_dev_err(dd, "txwait list not empty!\n");
- if (!list_empty(&dev->memwait))
- dd_dev_err(dd, "memwait list not empty!\n");
-
- del_timer_sync(&dev->mem_timer);
- verbs_txreq_exit(dev);
-}
-
-void hfi1_cnp_rcv(struct hfi1_packet *packet)
-{
- struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_ib_header *hdr = packet->hdr;
- struct rvt_qp *qp = packet->qp;
- u32 lqpn, rqpn = 0;
- u16 rlid = 0;
- u8 sl, sc5, sc4_bit, svc_type;
- bool sc4_set = has_sc4_bit(packet);
-
- switch (packet->qp->ibqp.qp_type) {
- case IB_QPT_UC:
- rlid = qp->remote_ah_attr.dlid;
- rqpn = qp->remote_qpn;
- svc_type = IB_CC_SVCTYPE_UC;
- break;
- case IB_QPT_RC:
- rlid = qp->remote_ah_attr.dlid;
- rqpn = qp->remote_qpn;
- svc_type = IB_CC_SVCTYPE_RC;
- break;
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- svc_type = IB_CC_SVCTYPE_UD;
- break;
- default:
- ibp->rvp.n_pkt_drops++;
- return;
- }
-
- sc4_bit = sc4_set << 4;
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
- sl = ibp->sc_to_sl[sc5];
- lqpn = qp->ibqp.qp_num;
-
- process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
-}
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h
deleted file mode 100644
index 6c4670fff..000000000
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ /dev/null
@@ -1,532 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef HFI1_VERBS_H
-#define HFI1_VERBS_H
-
-#include <linux/types.h>
-#include <linux/seqlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <linux/workqueue.h>
-#include <linux/kthread.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_mad.h>
-#include <rdma/rdma_vt.h>
-#include <rdma/rdmavt_qp.h>
-#include <rdma/rdmavt_cq.h>
-
-struct hfi1_ctxtdata;
-struct hfi1_pportdata;
-struct hfi1_devdata;
-struct hfi1_packet;
-
-#include "iowait.h"
-
-#define HFI1_MAX_RDMA_ATOMIC 16
-#define HFI1_GUIDS_PER_PORT 5
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define HFI1_UVERBS_ABI_VERSION 2
-
-#define IB_SEQ_NAK (3 << 29)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK 0x20
-#define IB_NAK_PSN_ERROR 0x60
-#define IB_NAK_INVALID_REQUEST 0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR 0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST 0x64
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE 0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
-
-#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
-
-#define IB_BTH_REQ_ACK BIT(31)
-#define IB_BTH_SOLICITED BIT(23)
-#define IB_BTH_MIG_REQ BIT(22)
-
-#define IB_GRH_VERSION 6
-#define IB_GRH_VERSION_MASK 0xF
-#define IB_GRH_VERSION_SHIFT 28
-#define IB_GRH_TCLASS_MASK 0xFF
-#define IB_GRH_TCLASS_SHIFT 20
-#define IB_GRH_FLOW_MASK 0xFFFFF
-#define IB_GRH_FLOW_SHIFT 0
-#define IB_GRH_NEXT_HDR 0x1B
-
-#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
-
-/* flags passed by hfi1_ib_rcv() */
-enum {
- HFI1_HAS_GRH = (1 << 0),
-};
-
-struct ib_reth {
- __be64 vaddr;
- __be32 rkey;
- __be32 length;
-} __packed;
-
-struct ib_atomic_eth {
- __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
- __be32 rkey;
- __be64 swap_data;
- __be64 compare_data;
-} __packed;
-
-union ib_ehdrs {
- struct {
- __be32 deth[2];
- __be32 imm_data;
- } ud;
- struct {
- struct ib_reth reth;
- __be32 imm_data;
- } rc;
- struct {
- __be32 aeth;
- __be32 atomic_ack_eth[2];
- } at;
- __be32 imm_data;
- __be32 aeth;
- struct ib_atomic_eth atomic_eth;
-} __packed;
-
-struct hfi1_other_headers {
- __be32 bth[3];
- union ib_ehdrs u;
-} __packed;
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data). Only the first 56 bytes of the IB header
- * will be in the eager header buffer. The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct hfi1_ib_header {
- __be16 lrh[4];
- union {
- struct {
- struct ib_grh grh;
- struct hfi1_other_headers oth;
- } l;
- struct hfi1_other_headers oth;
- } u;
-} __packed;
-
-struct ahg_ib_header {
- struct sdma_engine *sde;
- u32 ahgdesc[2];
- u16 tx_flags;
- u8 ahgcount;
- u8 ahgidx;
- struct hfi1_ib_header ibh;
-};
-
-struct hfi1_pio_header {
- __le64 pbc;
- struct hfi1_ib_header hdr;
-} __packed;
-
-/*
- * hfi1 specific data structures that will be hidden from rvt after the queue
- * pair is made common
- */
-struct hfi1_qp_priv {
- struct ahg_ib_header *s_hdr; /* next header to send */
- struct sdma_engine *s_sde; /* current sde */
- struct send_context *s_sendcontext; /* current sendcontext */
- u8 s_sc; /* SC[0..4] for next packet */
- u8 r_adefered; /* number of acks defered */
- struct iowait s_iowait;
- struct timer_list s_rnr_timer;
- struct rvt_qp *owner;
-};
-
-/*
- * This structure is used to hold commonly lookedup and computed values during
- * the send engine progress.
- */
-struct hfi1_pkt_state {
- struct hfi1_ibdev *dev;
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
- struct verbs_txreq *s_txreq;
-};
-
-#define HFI1_PSN_CREDIT 16
-
-struct hfi1_opcode_stats {
- u64 n_packets; /* number of packets */
- u64 n_bytes; /* total number of bytes */
-};
-
-struct hfi1_opcode_stats_perctx {
- struct hfi1_opcode_stats stats[256];
-};
-
-static inline void inc_opstats(
- u32 tlen,
- struct hfi1_opcode_stats *stats)
-{
-#ifdef CONFIG_DEBUG_FS
- stats->n_bytes += tlen;
- stats->n_packets++;
-#endif
-}
-
-struct hfi1_ibport {
- struct rvt_qp __rcu *qp[2];
- struct rvt_ibport rvp;
-
- __be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */
-
- /* the first 16 entries are sl_to_vl for !OPA */
- u8 sl_to_sc[32];
- u8 sc_to_sl[32];
-};
-
-struct hfi1_ibdev {
- struct rvt_dev_info rdi; /* Must be first */
-
- /* QP numbers are shared by all IB ports */
- /* protect wait lists */
- seqlock_t iowait_lock;
- struct list_head txwait; /* list for wait verbs_txreq */
- struct list_head memwait; /* list for wait kernel memory */
- struct list_head txreq_free;
- struct kmem_cache *verbs_txreq_cache;
- struct timer_list mem_timer;
-
- u64 n_piowait;
- u64 n_piodrain;
- u64 n_txwait;
- u64 n_kmem_wait;
-
-#ifdef CONFIG_DEBUG_FS
- /* per HFI debugfs */
- struct dentry *hfi1_ibdev_dbg;
- /* per HFI symlinks to above */
- struct dentry *hfi1_ibdev_link;
-#endif
-};
-
-static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
-{
- struct rvt_dev_info *rdi;
-
- rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
- return container_of(rdi, struct hfi1_ibdev, rdi);
-}
-
-static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
-{
- struct hfi1_qp_priv *priv;
-
- priv = container_of(s_iowait, struct hfi1_qp_priv, s_iowait);
- return priv->owner;
-}
-
-/*
- * Send if not busy or waiting for I/O and either
- * a RC response is pending or we can process send work requests.
- */
-static inline int hfi1_send_ok(struct rvt_qp *qp)
-{
- return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
- !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
-}
-
-/*
- * This must be called with s_lock held.
- */
-void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, u16 lid1, u16 lid2);
-void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
-void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
-void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
-int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
-
-/*
- * The PSN_MASK and PSN_SHIFT allow for
- * 1) comparing two PSNs
- * 2) returning the PSN with any upper bits masked
- * 3) returning the difference between to PSNs
- *
- * The number of significant bits in the PSN must
- * necessarily be at least one bit less than
- * the container holding the PSN.
- */
-#ifndef CONFIG_HFI1_VERBS_31BIT_PSN
-#define PSN_MASK 0xFFFFFF
-#define PSN_SHIFT 8
-#else
-#define PSN_MASK 0x7FFFFFFF
-#define PSN_SHIFT 1
-#endif
-#define PSN_MODIFY_MASK 0xFFFFFF
-
-/* Number of bits to pay attention to in the opcode for checking qp type */
-#define OPCODE_QP_MASK 0xE0
-
-/*
- * Compare the lower 24 bits of the msn values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int cmp_msn(u32 a, u32 b)
-{
- return (((int)a) - ((int)b)) << 8;
-}
-
-/*
- * Compare two PSNs
- * Returns an integer <, ==, or > than zero.
- */
-static inline int cmp_psn(u32 a, u32 b)
-{
- return (((int)a) - ((int)b)) << PSN_SHIFT;
-}
-
-/*
- * Return masked PSN
- */
-static inline u32 mask_psn(u32 a)
-{
- return a & PSN_MASK;
-}
-
-/*
- * Return delta between two PSNs
- */
-static inline u32 delta_psn(u32 a, u32 b)
-{
- return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
-}
-
-struct verbs_txreq;
-void hfi1_put_txreq(struct verbs_txreq *tx);
-
-int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-
-void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
- int release, int copy_last);
-
-void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
-
-void hfi1_cnp_rcv(struct hfi1_packet *packet);
-
-void hfi1_uc_rcv(struct hfi1_packet *packet);
-
-void hfi1_rc_rcv(struct hfi1_packet *packet);
-
-void hfi1_rc_hdrerr(
- struct hfi1_ctxtdata *rcd,
- struct hfi1_ib_header *hdr,
- u32 rcv_flags,
- struct rvt_qp *qp);
-
-u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
-
-struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid);
-
-void hfi1_rc_rnr_retry(unsigned long arg);
-void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to);
-void hfi1_rc_timeout(unsigned long arg);
-void hfi1_del_timers_sync(struct rvt_qp *qp);
-void hfi1_stop_rc_timers(struct rvt_qp *qp);
-
-void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr);
-
-void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
-
-void hfi1_ud_rcv(struct hfi1_packet *packet);
-
-int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
-
-int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-
-void hfi1_migrate_qp(struct rvt_qp *qp);
-
-int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata);
-
-void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata);
-
-int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
-
-extern const u32 rc_only_opcode;
-extern const u32 uc_only_opcode;
-
-static inline u8 get_opcode(struct hfi1_ib_header *h)
-{
- u16 lnh = be16_to_cpu(h->lrh[0]) & 3;
-
- if (lnh == IB_LNH_IBA_LOCAL)
- return be32_to_cpu(h->u.oth.bth[0]) >> 24;
- else
- return be32_to_cpu(h->u.l.oth.bth[0]) >> 24;
-}
-
-int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0);
-
-u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
- struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
- struct hfi1_pkt_state *ps);
-
-void _hfi1_do_send(struct work_struct *work);
-
-void hfi1_do_send(struct rvt_qp *qp);
-
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status);
-
-void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct rvt_qp *qp, int is_fecn);
-
-int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-
-int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-
-int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-
-int hfi1_register_ib_device(struct hfi1_devdata *);
-
-void hfi1_unregister_ib_device(struct hfi1_devdata *);
-
-void hfi1_ib_rcv(struct hfi1_packet *packet);
-
-unsigned hfi1_get_npkeys(struct hfi1_devdata *);
-
-int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-
-int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
-
-int hfi1_wss_init(void);
-void hfi1_wss_exit(void);
-
-/* platform specific: return the lowest level cache (llc) size, in KiB */
-static inline int wss_llc_size(void)
-{
- /* assume that the boot CPU value is universal for all CPUs */
- return boot_cpu_data.x86_cache_size;
-}
-
-/* platform specific: cacheless copy */
-static inline void cacheless_memcpy(void *dst, void *src, size_t n)
-{
- /*
- * Use the only available X64 cacheless copy. Add a __user cast
- * to quiet sparse. The src agument is already in the kernel so
- * there are no security issues. The extra fault recovery machinery
- * is not invoked.
- */
- __copy_user_nocache(dst, (void __user *)src, n, 0);
-}
-
-extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
-
-extern const u8 hdr_len_by_opcode[];
-
-extern const int ib_rvt_state_ops[];
-
-extern __be64 ib_hfi1_sys_image_guid; /* in network order */
-
-extern unsigned int hfi1_max_cqes;
-
-extern unsigned int hfi1_max_cqs;
-
-extern unsigned int hfi1_max_qp_wrs;
-
-extern unsigned int hfi1_max_qps;
-
-extern unsigned int hfi1_max_sges;
-
-extern unsigned int hfi1_max_mcast_grps;
-
-extern unsigned int hfi1_max_mcast_qp_attached;
-
-extern unsigned int hfi1_max_srqs;
-
-extern unsigned int hfi1_max_srq_sges;
-
-extern unsigned int hfi1_max_srq_wrs;
-
-extern unsigned short piothreshold;
-
-extern const u32 ib_hfi1_rnr_table[];
-
-#endif /* HFI1_VERBS_H */
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/staging/rdma/hfi1/verbs_txreq.c
deleted file mode 100644
index bc95c4112..000000000
--- a/drivers/staging/rdma/hfi1/verbs_txreq.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "hfi.h"
-#include "verbs_txreq.h"
-#include "qp.h"
-#include "trace.h"
-
-#define TXREQ_LEN 24
-
-void hfi1_put_txreq(struct verbs_txreq *tx)
-{
- struct hfi1_ibdev *dev;
- struct rvt_qp *qp;
- unsigned long flags;
- unsigned int seq;
- struct hfi1_qp_priv *priv;
-
- qp = tx->qp;
- dev = to_idev(qp->ibqp.device);
-
- if (tx->mr)
- rvt_put_mr(tx->mr);
-
- sdma_txclean(dd_from_dev(dev), &tx->txreq);
-
- /* Free verbs_txreq and return to slab cache */
- kmem_cache_free(dev->verbs_txreq_cache, tx);
-
- do {
- seq = read_seqbegin(&dev->iowait_lock);
- if (!list_empty(&dev->txwait)) {
- struct iowait *wait;
-
- write_seqlock_irqsave(&dev->iowait_lock, flags);
- wait = list_first_entry(&dev->txwait, struct iowait,
- list);
- qp = iowait_to_qp(wait);
- priv = qp->priv;
- list_del_init(&priv->s_iowait.list);
- /* refcount held until actual wake up */
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
- hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
- break;
- }
- } while (read_seqretry(&dev->iowait_lock, seq));
-}
-
-struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
- struct rvt_qp *qp)
-{
- struct verbs_txreq *tx = ERR_PTR(-EBUSY);
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- write_seqlock(&dev->iowait_lock);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- struct hfi1_qp_priv *priv;
-
- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
- if (tx)
- goto out;
- priv = qp->priv;
- if (list_empty(&priv->s_iowait.list)) {
- dev->n_txwait++;
- qp->s_flags |= RVT_S_WAIT_TX;
- list_add_tail(&priv->s_iowait.list, &dev->txwait);
- trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
- atomic_inc(&qp->refcount);
- }
- qp->s_flags &= ~RVT_S_BUSY;
- }
-out:
- write_sequnlock(&dev->iowait_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return tx;
-}
-
-static void verbs_txreq_kmem_cache_ctor(void *obj)
-{
- struct verbs_txreq *tx = (struct verbs_txreq *)obj;
-
- memset(tx, 0, sizeof(*tx));
-}
-
-int verbs_txreq_init(struct hfi1_ibdev *dev)
-{
- char buf[TXREQ_LEN];
- struct hfi1_devdata *dd = dd_from_dev(dev);
-
- snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
- dev->verbs_txreq_cache = kmem_cache_create(buf,
- sizeof(struct verbs_txreq),
- 0, SLAB_HWCACHE_ALIGN,
- verbs_txreq_kmem_cache_ctor);
- if (!dev->verbs_txreq_cache)
- return -ENOMEM;
- return 0;
-}
-
-void verbs_txreq_exit(struct hfi1_ibdev *dev)
-{
- kmem_cache_destroy(dev->verbs_txreq_cache);
- dev->verbs_txreq_cache = NULL;
-}
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/staging/rdma/hfi1/verbs_txreq.h
deleted file mode 100644
index 1cf69b2fe..000000000
--- a/drivers/staging/rdma/hfi1/verbs_txreq.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright(c) 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef HFI1_VERBS_TXREQ_H
-#define HFI1_VERBS_TXREQ_H
-
-#include <linux/types.h>
-#include <linux/slab.h>
-
-#include "verbs.h"
-#include "sdma_txreq.h"
-#include "iowait.h"
-
-struct verbs_txreq {
- struct hfi1_pio_header phdr;
- struct sdma_txreq txreq;
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_mregion *mr;
- struct rvt_sge_state *ss;
- struct sdma_engine *sde;
- struct send_context *psc;
- u16 hdr_dwords;
-};
-
-struct hfi1_ibdev;
-struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
- struct rvt_qp *qp);
-
-static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
- struct rvt_qp *qp)
-{
- struct verbs_txreq *tx;
- struct hfi1_qp_priv *priv = qp->priv;
-
- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
- if (unlikely(!tx)) {
- /* call slow path to get the lock */
- tx = __get_txreq(dev, qp);
- if (IS_ERR(tx))
- return tx;
- }
- tx->qp = qp;
- tx->mr = NULL;
- tx->sde = priv->s_sde;
- tx->psc = priv->s_sendcontext;
- /* so that we can test if the sdma decriptors are there */
- tx->txreq.num_desc = 0;
- return tx;
-}
-
-static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
-{
- return &tx->txreq;
-}
-
-static inline struct verbs_txreq *get_waiting_verbs_txreq(struct rvt_qp *qp)
-{
- struct sdma_txreq *stx;
- struct hfi1_qp_priv *priv = qp->priv;
-
- stx = iowait_get_txhead(&priv->s_iowait);
- if (stx)
- return container_of(stx, struct verbs_txreq, txreq);
- return NULL;
-}
-
-void hfi1_put_txreq(struct verbs_txreq *tx);
-int verbs_txreq_init(struct hfi1_ibdev *dev);
-void verbs_txreq_exit(struct hfi1_ibdev *dev);
-
-#endif /* HFI1_VERBS_TXREQ_H */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 012860b34..a5755358c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_AP_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index e5a6b7a70..77485235c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_CMD_C_
@@ -263,11 +258,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
@@ -350,7 +345,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
@@ -521,7 +516,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
/* prepare cmd parameter */
param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (param == NULL) {
+ if (!param) {
res = _FAIL;
goto exit;
}
@@ -530,7 +525,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
- if (cmdobj == NULL) {
+ if (!cmdobj) {
res = _FAIL;
kfree(param);
goto exit;
@@ -629,20 +624,20 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
clear_cam_entry(padapter, entry);
} else {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_ATOMIC);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_ATOMIC);
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
res = _FAIL;
@@ -676,13 +671,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
- if (paddbareq_parm == NULL) {
+ if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -713,13 +708,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -757,7 +752,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
/* prepare cmd parameter */
setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param), GFP_KERNEL);
- if (setChannelPlan_param == NULL) {
+ if (!setChannelPlan_param) {
res = _FAIL;
goto exit;
}
@@ -766,7 +761,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmdobj == NULL) {
+ if (!pcmdobj) {
kfree(setChannelPlan_param);
res = _FAIL;
goto exit;
@@ -925,13 +920,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
if (enqueue) {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -968,13 +963,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1010,13 +1005,13 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
if (enqueue) {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1108,13 +1103,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 93e898d59..db5c952ac 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_DEBUG_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 19f11d04d..fbce1f7e6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_EFUSE_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index f4e4baf60..0b0d78fe8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _IEEE80211_C
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index cf60717a6..f85a6abec 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_IOCTL_SET_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index a645a620e..1456499b8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_MLME_C_
@@ -1584,13 +1579,13 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
int res = _SUCCESS;
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
- if (psetauthparm == NULL) {
+ if (!psetauthparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
@@ -1621,11 +1616,11 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
int res = _SUCCESS;
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL; /* try again */
psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
res = _FAIL;
goto err_free_cmd;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 591a9127b..7f32b39e5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_MLME_EXT_C_
@@ -606,8 +601,6 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, bool wait_ack)
@@ -888,8 +881,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
DBG_88E("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
@@ -1212,8 +1203,6 @@ exit:
rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
else
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
-
- return;
}
/* when wait_ack is true, this function should be called at process context */
@@ -2105,7 +2094,6 @@ static void site_survey(struct adapter *padapter)
issue_action_BSSCoexistPacket(padapter);
issue_action_BSSCoexistPacket(padapter);
}
- return;
}
/* collect bss info from Beacon and Probe request/response frames. */
@@ -4295,12 +4283,12 @@ void report_survey_event(struct adapter *padapter,
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct survey_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4332,8 +4320,6 @@ void report_survey_event(struct adapter *padapter,
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
pmlmeext->sitesurvey_res.bss_cnt++;
-
- return;
}
void report_surveydone_event(struct adapter *padapter)
@@ -4347,12 +4333,12 @@ void report_surveydone_event(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4377,8 +4363,6 @@ void report_surveydone_event(struct adapter *padapter)
DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_join_res(struct adapter *padapter, int res)
@@ -4393,12 +4377,12 @@ void report_join_res(struct adapter *padapter, int res)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4429,8 +4413,6 @@ void report_join_res(struct adapter *padapter, int res)
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
@@ -4446,12 +4428,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4486,8 +4468,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
DBG_88E("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
@@ -4501,12 +4481,12 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4532,8 +4512,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
DBG_88E("report_add_sta_event: add STA\n");
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
@@ -4917,11 +4895,11 @@ void survey_timer_hdl(unsigned long data)
}
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
goto exit_survey_timer_hdl;
psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
goto exit_survey_timer_hdl;
}
@@ -4969,7 +4947,6 @@ void link_timer_hdl(unsigned long data)
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
- return;
}
void addba_timer_hdl(unsigned long data)
@@ -5485,7 +5462,7 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 5e1ef9fdc..59c6d8ab6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_PWRCTRL_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 5f53aa1cf..977bb2532 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_RECV_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
index 4ad2d8f63..3fc1a8fd3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_rf.c
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_RF_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index b781ccf45..442a614a3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_SECURITY_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index e725a4708..13a5bf473 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <rtw_sreset.h>
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 78a9b9bf3..a71e25294 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_STA_MGT_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 83096696c..4410fe8d7 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_WLAN_UTIL_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index f2dd7a60f..e0a5567f5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_XMIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index a108e8032..201c15b07 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -557,7 +557,7 @@ int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
u8 WirelessMode = 0xFF; /* invalid value */
u8 max_rate_idx = 0x13; /* MCS7 */
- if (dm_odm->pWirelessMode != NULL)
+ if (dm_odm->pWirelessMode)
WirelessMode = *(dm_odm->pWirelessMode);
if (WirelessMode != 0xFF) {
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index c2ad6a3b9..cce1ea259 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 8e904bd8e..e1964d65b 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 3871cda2e..960cc406d 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 85c17ef94..085f0fbd0 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HAL_INTF_C_
@@ -186,7 +181,7 @@ s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
- if (adapt->HalFunc.init_xmit_priv != NULL)
+ if (adapt->HalFunc.init_xmit_priv)
return adapt->HalFunc.init_xmit_priv(adapt);
return _FAIL;
}
diff --git a/drivers/staging/rtl8188eu/hal/mac_cfg.c b/drivers/staging/rtl8188eu/hal/mac_cfg.c
index 0bc1b2152..6ed5e15ce 100644
--- a/drivers/staging/rtl8188eu/hal/mac_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/mac_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 8d2316b9e..57a127501 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 28b9f7f59..0555e42a3 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index c0242a095..dd9b902c8 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index ae42b4492..a83bbea9b 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_PHYCFG_C_
diff --git a/drivers/staging/rtl8188eu/hal/pwrseq.c b/drivers/staging/rtl8188eu/hal/pwrseq.c
index 20dce42ce..d92a34ea8 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseq.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseq.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include "pwrseq.h"
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index b76b0f5d6..2867864bb 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#include <pwrseqcmd.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 38845d17d..1596274ee 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 44945427c..453f9e729 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 580876313..2422c0297 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_CMD_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index f9919a94a..81f293187 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* */
/* Description: */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 2592bc298..0b444fd3e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HAL_INIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 53cf3baf4..f110c961d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_REDESC_C_
@@ -45,7 +40,7 @@ static void process_link_qual(struct adapter *padapter,
struct rx_pkt_attrib *pattrib;
struct signal_stat *signal_stat;
- if (prframe == NULL || padapter == NULL)
+ if (!prframe || !padapter)
return;
pattrib = &prframe->attrib;
@@ -64,7 +59,7 @@ static void process_link_qual(struct adapter *padapter,
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
{
- struct recv_frame *precvframe = (struct recv_frame *)prframe;
+ struct recv_frame *precvframe = prframe;
/* Check RSSI */
process_rssi(padapter, precvframe);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index a6ba53b48..460a20558 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 564cf53bf..d9e677ef8 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d6d009aaf..255d6f215 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188EU_RECV_C_
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index c96d80487..ec21d8c82 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 07a61b827..363f3a34d 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HCI_HAL_INIT_C_
@@ -62,8 +57,8 @@ static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPip
_ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
/* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
- if (1 == haldata->OutEpNumber) {
- if (1 != NumInPipe)
+ if (haldata->OutEpNumber == 1) {
+ if (NumInPipe != 1)
return result;
}
@@ -179,7 +174,7 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
if (haldata->OutEpQueueSel & TX_SELE_LQ)
numLQ = 0x1C;
- /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ /* NOTE: This step shall be proceed before writing REG_RQPN. */
if (haldata->OutEpQueueSel & TX_SELE_NQ)
numNQ = 0x1C;
value8 = (u8)_NPQ(numNQ);
@@ -457,7 +452,8 @@ static void _InitRetryFunction(struct adapter *Adapter)
* When Who Remark
* 12/10/2010 MHC Separate to smaller function.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
@@ -489,7 +485,8 @@ static void usb_AggSettingTxUpdate(struct adapter *Adapter)
* When Who Remark
* 12/10/2010 MHC Separate to smaller function.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
static void
usb_AggSettingRxUpdate(
struct adapter *Adapter
@@ -655,7 +652,8 @@ static void _InitAntenna_Selection(struct adapter *Adapter)
* Revised History:
* When Who Remark
* 08/23/2010 MHC HW suspend mode switch test..
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
{
u8 val8;
@@ -687,11 +685,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
if (Adapter->pwrctrlpriv.bkeepfwalive) {
-
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
rtl88eu_phy_iq_calibrate(Adapter, true);
} else {
@@ -715,9 +711,8 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Save target channel */
haldata->CurrentChannel = 6;/* default set to 6 */
- if (pwrctrlpriv->reg_rfoff) {
+ if (pwrctrlpriv->reg_rfoff)
pwrctrlpriv->rf_pwrstate = rf_off;
- }
/* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
/* HW GPIO pin. Before PHY_RFConfig8192C. */
@@ -749,10 +744,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
DBG_88E("%s: Download Firmware failed!!\n", __func__);
Adapter->bFWReady = false;
return status;
- } else {
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
- Adapter->bFWReady = true;
}
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
+ Adapter->bFWReady = true;
}
rtl8188e_InitializeFirmwareVars(Adapter);
@@ -878,7 +872,7 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
/* 2010/08/26 MH Merge from 8192CE. */
if (pwrctrlpriv->rf_pwrstate == rf_on) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
- rtl88eu_phy_iq_calibrate(Adapter, true);
+ rtl88eu_phy_iq_calibrate(Adapter, true);
} else {
rtl88eu_phy_iq_calibrate(Adapter, false);
haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
@@ -905,7 +899,6 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
DBG_88E("%s in %dms\n", __func__,
jiffies_to_msecs(jiffies - init_start_time));
-
return status;
}
@@ -968,6 +961,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
haldata->bMacPwrCtrlOn = false;
Adapter->bFWReady = false;
}
+
static void rtl8192cu_hw_power_down(struct adapter *adapt)
{
/* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */
@@ -980,7 +974,6 @@ static void rtl8192cu_hw_power_down(struct adapter *adapt)
static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
{
-
DBG_88E("==> %s\n", __func__);
usb_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
@@ -999,14 +992,14 @@ static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
}
}
return _SUCCESS;
- }
+}
static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
{
u8 i;
struct recv_buf *precvbuf;
uint status;
- struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct recv_priv *precvpriv = &Adapter->recvpriv;
status = _SUCCESS;
@@ -1116,7 +1109,6 @@ readAdapterInfo_8188EU(
Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
}
static void _ReadPROMContent(
@@ -1212,7 +1204,7 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
StopTxBeacon(Adapter);
usb_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
- } else if ((mode == _HW_STATE_ADHOC_)) {
+ } else if (mode == _HW_STATE_ADHOC_) {
ResumeTxBeacon(Adapter);
usb_write8(Adapter, REG_BCN_CTRL, 0x1a);
} else if (mode == _HW_STATE_AP_) {
@@ -1363,7 +1355,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
u64 tsf;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */
@@ -1420,7 +1412,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) | BIT(4));
} else { /* sitesurvey done */
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if ((is_client_associated_to_ap(Adapter)) ||
((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) {
@@ -1490,7 +1482,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
u8 u1bAIFS, aSifsTime;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
usb_write8(Adapter, REG_SLOT, val[0]);
@@ -1790,7 +1782,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
break;
case HW_VAR_H2C_MEDIA_STATUS_RPT:
- rtl8188e_set_FwMediaStatus_cmd(Adapter , (*(__le16 *)val));
+ rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
break;
case HW_VAR_BCN_VALID:
/* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
@@ -1855,7 +1847,6 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
default:
break;
}
-
}
/* */
@@ -1904,19 +1895,19 @@ GetHalDefVar8188EUsb(
case HAL_DEF_RA_DECISION_RATE:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_RA_SGI:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_PT_PWR_STATUS:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, MacID);
}
break;
case HW_VAR_MAX_RX_AMPDU_FACTOR:
@@ -1939,7 +1930,7 @@ GetHalDefVar8188EUsb(
break;
case HW_DEF_ODM_DBG_FLAG:
{
- struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ struct odm_dm_struct *dm_ocm = &haldata->odmpriv;
pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
}
break;
@@ -1967,8 +1958,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
struct sta_info *psta;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
if (mac_id >= NUM_STA) /* CAM_SIZE */
return;
@@ -1981,8 +1972,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
raid = networktype_to_raid(networkType);
mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
- mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&(pmlmeinfo->HT_caps)) : 0;
- if (support_short_GI(adapt, &(pmlmeinfo->HT_caps)))
+ mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&pmlmeinfo->HT_caps) : 0;
+ if (support_short_GI(adapt, &pmlmeinfo->HT_caps))
shortGIrate = true;
break;
case 1:/* for broadcast/multicast */
@@ -2023,8 +2014,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
{
u32 value32;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u32 bcn_ctrl_reg = REG_BCN_CTRL;
/* reset TSF, enable update TSF, correcting TSF On Beacon */
@@ -2083,7 +2074,7 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (adapt->HalData == NULL)
+ if (!adapt->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
halfunc->hal_power_on = rtl8188eu_InitPowerOn;
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 2670d6b6a..8990748a1 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYCFG_H__
#define __INC_HAL8188EPHYCFG_H__
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 9f2969bf8..344c73d10 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYREG_H__
#define __INC_HAL8188EPHYREG_H__
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
index 1bf9bc70a..dbb55247b 100644
--- a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#ifndef __INC_FW_8188E_HW_IMG_H
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
index 6f2b2a436..d244efff3 100644
--- a/drivers/staging/rtl8188eu/include/HalVerDef.h
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_VERSION_DEF_H__
#define __HAL_VERSION_DEF_H__
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 3fb691daa..2c1676d2a 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index dcb032b6c..55506a7da 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8188eu/include/fw.h b/drivers/staging/rtl8188eu/include/fw.h
index 7884d8f65..b016f32a8 100644
--- a/drivers/staging/rtl8188eu/include/fw.h
+++ b/drivers/staging/rtl8188eu/include/fw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/staging/rtl8188eu/include/hal_com.h b/drivers/staging/rtl8188eu/include/hal_com.h
index 47715d949..aaf444733 100644
--- a/drivers/staging/rtl8188eu/include/hal_com.h
+++ b/drivers/staging/rtl8188eu/include/hal_com.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_COMMON_H__
#define __HAL_COMMON_H__
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 1b1c10292..eaf939bd4 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_INTF_H__
#define __HAL_INTF_H__
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index f8f5eb6b7..d8284c84f 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __IEEE80211_H
#define __IEEE80211_H
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
index ae1722c67..5a35b0866 100644
--- a/drivers/staging/rtl8188eu/include/mlme_osdep.h
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __MLME_OSDEP_H_
#define __MLME_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/mp_custom_oid.h b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
index 6fa52cf99..1a06ee6ad 100644
--- a/drivers/staging/rtl8188eu/include/mp_custom_oid.h
+++ b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __CUSTOM_OID_H
#define __CUSTOM_OID_H
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index af781c7cd..dbebf17f3 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index ef792bfd5..da7325d59 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
index 14dce6c4b..72b4db67a 100644
--- a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_RTL8188E_H__
#define __ODM_RTL8188E_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
index 5a61f902b..c82c09013 100644
--- a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_REGDEFINE11N_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index e9390963d..52e51f19f 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 0f236da09..9e5fe1777 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_PRECOMP_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_reg.h b/drivers/staging/rtl8188eu/include/odm_reg.h
index 7f10b695c..3405a44a1 100644
--- a/drivers/staging/rtl8188eu/include/odm_reg.h
+++ b/drivers/staging/rtl8188eu/include/odm_reg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* */
/* File Name: odm_reg.h */
diff --git a/drivers/staging/rtl8188eu/include/odm_types.h b/drivers/staging/rtl8188eu/include/odm_types.h
index c1355b959..3474a9c72 100644
--- a/drivers/staging/rtl8188eu/include/odm_types.h
+++ b/drivers/staging/rtl8188eu/include/odm_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_TYPES_H__
#define __ODM_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 1521744d6..54fca7982 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __OSDEP_INTF_H_
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 22de53d65..5475956c5 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __OSDEP_SERVICE_H_
#define __OSDEP_SERVICE_H_
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index 9dbf8435f..afd61cf4c 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -12,11 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL8188EPWRSEQ_H__
diff --git a/drivers/staging/rtl8188eu/include/pwrseqcmd.h b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
index 468a3fb28..c4a919ea1 100644
--- a/drivers/staging/rtl8188eu/include/pwrseqcmd.h
+++ b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HALPWRSEQCMD_H__
#define __HALPWRSEQCMD_H__
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index fdeb603b6..cad31587c 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RECV_OSDEP_H_
#define __RECV_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index f813ce056..4d7d80465 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_CMD_H__
#define __RTL8188E_CMD_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 5e0ac31ef..4190112a5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_DM_H__
#define __RTL8188E_DM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index e96584a3e..ed3d56538 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_HAL_H__
#define __RTL8188E_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
index c0147e73c..fca6d8c81 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_led.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_LED_H__
#define __RTL8188E_LED_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 5fed30d38..54048bc82 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_RECV_H__
#define __RTL8188E_RECV_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index beeee4a6b..fb82f663b 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*******************************************************************************/
#ifndef __RTL8188E_SPEC_H__
#define __RTL8188E_SPEC_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 0b96d42e2..65a63df20 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_XMIT_H__
#define __RTL8188E_XMIT_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index e85bf1ff0..e81ee92b0 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_ANDROID_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
index 6128ccce9..b820684bc 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ap.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_AP_H_
#define __RTW_AP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 9e9f5f4af..08ca59217 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_CMD_H_
#define __RTW_CMD_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 971bf457f..7ed4cada7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_DEBUG_H__
#define __RTW_DEBUG_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index 904fea1fa..5dd73841d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_EEPROM_H__
#define __RTW_EEPROM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 5660eed71..9bfb10c30 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index 52151dc44..5c34e567d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_EVENT_H_
#define _RTW_EVENT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
index beb210b37..b45483fd0 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ht.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_HT_H_
#define _RTW_HT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index ee2cb54a7..3a652df4b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_IOCTL_H_
#define _RTW_IOCTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
index 8fa3858cb..da4949f94 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_IOCTL_RTL_H_
#define _RTW_IOCTL_RTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
index fa9d655ea..b6e14a8b7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_IOCTL_SET_H_
#define __RTW_IOCTL_SET_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
index 68aae7f0b..1f324e68d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_iol.h
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_IOL_H_
#define __RTW_IOL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 4c992573e..5d8bce0f5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_MLME_H_
#define __RTW_MLME_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 44711332b..27382ff24 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_MLME_EXT_H_
#define __RTW_MLME_EXT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 30fd17f23..02b300217 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/*****************************************************************************
*
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index a493d4c37..9680e2eab 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_PWRCTRL_H_
#define __RTW_PWRCTRL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
index bbee1ddc0..45a77f6f8 100644
--- a/drivers/staging/rtl8188eu/include/rtw_qos.h
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_QOS_H_
#define _RTW_QOS_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index eb1ac3d03..b0373b621 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_RECV_H_
#define _RTW_RECV_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 35f61be12..66896af02 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_RF_H_
#define __RTW_RF_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index a1aebe6c8..ca1247bce 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_SECURITY_H_
#define __RTW_SECURITY_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
index 3a62ed010..ce027dfde 100644
--- a/drivers/staging/rtl8188eu/include/rtw_sreset.h
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_SRESET_C_
#define _RTW_SRESET_C_
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index b7c20883d..a0853bab3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_XMIT_H_
#define _RTW_XMIT_H_
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index d4e78326f..42a035123 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __STA_INFO_H_
#define __STA_INFO_H_
diff --git a/drivers/staging/rtl8188eu/include/usb_hal.h b/drivers/staging/rtl8188eu/include/usb_hal.h
index 8a65995d5..b1bf07a90 100644
--- a/drivers/staging/rtl8188eu/include/usb_hal.h
+++ b/drivers/staging/rtl8188eu/include/usb_hal.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __USB_HAL_H__
#define __USB_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 4fdc536cb..220733314 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __USB_OPS_LINUX_H__
#define __USB_OPS_LINUX_H__
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 6cb5beca1..e7c512183 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _WIFI_H_
#define _WIFI_H_
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index 85b99da49..560966cd7 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __WLAN_BSSDEF_H__
#define __WLAN_BSSDEF_H__
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index 13965f248..f96ca6af9 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __XMIT_OSDEP_H_
#define __XMIT_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 911980495..5672f014c 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _IOCTL_LINUX_C_
@@ -2120,13 +2115,13 @@ static u8 set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -2158,12 +2153,12 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
DBG_88E("%s\n", __func__);
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index 08bfa76f4..bc756267c 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 63bb87593..d976e5e18 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -155,7 +155,7 @@ static void mon_setup(struct net_device *dev)
dev->netdev_ops = &mon_netdev_ops;
dev->destructor = free_netdev;
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211;
/*
* Use a locally administered address (IEEE 802)
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 7986e6785..ae2caff03 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _OS_INTFS_C_
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index f090bef59..764250b4b 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index d4734baff..0c44914ea 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 5f3337c28..41e1b1d15 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <linux/module.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 794cc1143..11d51a301 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define pr_fmt(fmt) "R8188EU: " fmt
@@ -65,7 +60,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_device *pusbd;
pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
- if (pdvobjpriv == NULL)
+ if (!pdvobjpriv)
return NULL;
pdvobjpriv->pusbintf = usb_intf;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 0fea338d7..ce1e1a135 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#define _USB_OPS_LINUX_C_
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 1593e280e..221e27506 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _XMIT_OSDEP_C_
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index ccdcebeeb..32fe7352d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1792,7 +1792,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
__skb_queue_tail(&ring->queue, skb);
pdesc->OWN = 1;
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index);
return 0;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index cfab71549..62154e3f4 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1991,7 +1991,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
return 2;
if (!time_after(jiffies,
- ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+ dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
return 0;
if (!time_after(jiffies,
ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index f18fc0b67..051c2be84 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -746,7 +746,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
// Indicate packets
if(index>REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
kfree(prxbIndicateArray);
return;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index ae1274cfb..d70559576 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -249,7 +249,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
ieee->seq_ctrl[0]++;
/* avoid watchdog triggers */
- ieee->dev->trans_start = jiffies;
+ netif_trans_update(ieee->dev);
ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
//dev_kfree_skb_any(skb);//edit by thomas
}
@@ -302,7 +302,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
ieee->seq_ctrl[0]++;
/* avoid watchdog triggers */
- ieee->dev->trans_start = jiffies;
+ netif_trans_update(ieee->dev);
ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
}else{
@@ -1737,7 +1737,7 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 2;
if(!time_after(jiffies,
- ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+ dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
return 0;
if(!time_after(jiffies,
@@ -2205,7 +2205,7 @@ static void ieee80211_resume_tx(struct ieee80211_device *ieee)
ieee->dev, ieee->rate);
//(i+1)<ieee->tx_pending.txb->nr_frags);
ieee->stats.tx_packets++;
- ieee->dev->trans_start = jiffies;
+ netif_trans_update(ieee->dev);
}
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 148d0d455..6033502ef 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -75,7 +75,7 @@ static void RxPktPendingTimeout(unsigned long data)
// Indicate packets
if(index > REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
return;
}
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 5c3bb3be2..d733fb2ad 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -194,7 +194,7 @@ void phy_RF8256_Config_ParaFile(struct net_device *dev)
break;
}
- /*----Restore RFENV control type----*/;
+ /*----Restore RFENV control type----*/
switch (eRFPath) {
case RF90_PATH_A:
case RF90_PATH_C:
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 849a95ef7..8c1d73719 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -235,7 +235,6 @@ static void CamResetAllEntry(struct net_device *dev)
*/
ulcommand |= BIT(31) | BIT(30);
write_nic_dword(dev, RWCAM, ulcommand);
-
}
@@ -298,6 +297,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
return 0;
}
+
/* as 92U has extend page from 4 to 16, so modify functions below. */
void write_nic_byte(struct net_device *dev, int indx, u8 data)
{
@@ -319,14 +319,11 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
if (status < 0)
netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
-
-
}
void write_nic_word(struct net_device *dev, int indx, u16 data)
{
-
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -345,13 +342,11 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
if (status < 0)
netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
-
}
void write_nic_dword(struct net_device *dev, int indx, u32 data)
{
-
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -372,7 +367,6 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
if (status < 0)
netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
status);
-
}
@@ -738,7 +732,6 @@ void rtl8192_update_msr(struct net_device *dev)
* master (see the create BSS/IBSS func)
*/
if (priv->ieee80211->state == IEEE80211_LINKED) {
-
if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -773,11 +766,10 @@ static void rtl8192_rx_isr(struct urb *urb);
static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
{
-
return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
+ pstats->RxBufShift);
-
}
+
static int rtl8192_rx_initiate(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -874,6 +866,7 @@ void rtl8192_set_rxconf(struct net_device *dev)
write_nic_dword(dev, RCR, rxconf);
}
+
/* wait to be removed */
void rtl8192_rx_enable(struct net_device *dev)
{
@@ -943,9 +936,9 @@ inline u16 ieeerate2rtlrate(int rate)
return 11;
default:
return 3;
-
}
}
+
static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
inline u16 rtl8192_rate2rate(short rate)
{
@@ -1050,7 +1043,7 @@ static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->tx_lock, flags);
- memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ *(struct net_device **)(skb->cb) = dev;
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(dev, skb);
@@ -1100,7 +1093,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
if (!skb)
return;
- dev = (struct net_device *)(skb->cb);
+ dev = *(struct net_device **)(skb->cb);
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
queue_index = tcb_desc->queue_index;
@@ -1108,7 +1101,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
if (tcb_desc->queue_index != TXCMD_QUEUE) {
if (tx_urb->status == 0) {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
priv->stats.txoktotal++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
priv->stats.txbytesunicast +=
@@ -1149,7 +1142,6 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
return; /* avoid further processing AMSDU */
}
}
-
}
static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
@@ -1272,11 +1264,10 @@ static void rtl8192_update_cap(struct net_device *dev, u16 cap)
priv->slot_time = slot_time;
write_nic_byte(dev, SLOT_TIME, slot_time);
}
-
}
+
static void rtl8192_net_update(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
@@ -1303,9 +1294,6 @@ static void rtl8192_net_update(struct net_device *dev)
write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
}
-
-
-
}
/* temporary hw beacon is not used any more.
@@ -1315,6 +1303,7 @@ void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
{
}
+
inline u8 rtl8192_IsWirelessBMode(u16 rate)
{
if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
@@ -1715,7 +1704,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
return -1;
}
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
atomic_inc(&priv->tx_pending[tcb_desc->queue_index]);
return 0;
}
@@ -1737,7 +1726,6 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
#ifndef JACKSON_NEW_RX
for (i = 0; i < (MAX_RX_URB + 1); i++) {
-
priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
priv->rx_urb[i]->transfer_buffer =
@@ -1782,8 +1770,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
netdev_dbg(dev, "End of initendpoints\n");
return 0;
-
}
+
#ifdef THOMAS_BEACON
static void rtl8192_usb_deleteendpoints(struct net_device *dev)
{
@@ -1820,7 +1808,6 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
}
kfree(priv->rx_urb);
priv->rx_urb = NULL;
-
}
#else
kfree(priv->rx_urb);
@@ -1888,6 +1875,7 @@ static void rtl8192_update_beacon(struct work_struct *work)
net->bssht.bdRT2RTLongSlotTime;
rtl8192_update_cap(dev, net->capability);
}
+
/*
* background support to run QoS activate functionality
*/
@@ -1992,7 +1980,6 @@ static int rtl8192_handle_beacon(struct net_device *dev,
rtl8192_qos_handle_probe_response(priv, 1, network);
schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
-
}
/*
@@ -2007,7 +1994,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
u32 size = sizeof(struct ieee80211_qos_parameters);
int set_qos_param = 0;
- if ((priv == NULL) || (network == NULL))
+ if (!priv || !network)
return 0;
if (priv->ieee80211->state != IEEE80211_LINKED)
@@ -2182,6 +2169,7 @@ static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
}
return ret;
}
+
static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2223,8 +2211,8 @@ static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
priv->ieee80211->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
rtl8192_refresh_supportrate(priv);
-
}
+
/* init priv variables here. only non_zero value should be initialized here. */
static void rtl8192_init_priv_variable(struct net_device *dev)
{
@@ -2432,6 +2420,7 @@ static inline u16 endian_swap(u16 *data)
*data = (tmp >> 8) | (tmp << 8);
return *data;
}
+
static void rtl8192_read_eeprom_info(struct net_device *dev)
{
u16 wEPROM_ID = 0;
@@ -2627,7 +2616,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
default:
priv->CustomerID = RT_CID_DEFAULT;
break;
-
}
switch (priv->CustomerID) {
@@ -2642,7 +2630,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
default:
priv->LedStrategy = SW_LED_MODE0;
break;
-
}
@@ -2676,7 +2663,6 @@ static short rtl8192_get_channel_map(struct net_device *dev)
static short rtl8192_init(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
memset(&(priv->stats), 0, sizeof(struct Stats));
@@ -2797,8 +2783,6 @@ static void rtl8192_hwconfig(struct net_device *dev)
/* Set Tx Antenna including Feedback control */
/* Set Auto Rate fallback control */
-
-
}
@@ -3027,7 +3011,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
bMaskByte2);
for (i = 0; i < CCKTxBBGainTableLength; i++) {
-
if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
priv->cck_present_attentuation_20Mdefault = (u8)i;
break;
@@ -3037,7 +3020,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
priv->cck_present_attentuation_difference = 0;
priv->cck_present_attentuation =
priv->cck_present_attentuation_20Mdefault;
-
}
}
write_nic_byte(dev, 0x87, 0x0);
@@ -3222,7 +3204,6 @@ static RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
} else {
return RESET_TYPE_NORESET;
}
-
}
static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
@@ -3250,7 +3231,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
-
for (EntryId = 0; EntryId < 4; EntryId++) {
MacAddr = CAM_CONST_ADDR[EntryId];
setKey(dev, EntryId, EntryId,
@@ -3259,7 +3239,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
}
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) {
-
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
(u8 *)dev->dev_addr, 0, NULL);
@@ -3267,7 +3246,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
MacAddr, 0, NULL);
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) {
-
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
(u8 *)dev->dev_addr, 0, NULL);
@@ -3301,6 +3279,7 @@ static void CamRestoreAllEntry(struct net_device *dev)
CAM_CONST_ADDR[0], 0, NULL);
}
}
+
/* This function is used to fix Tx/Rx stop bug temporarily.
* This function will do "system reset" to NIC when Tx or Rx is stuck.
* The method checking Tx/Rx stuck of this function is supported by FW,
@@ -3468,7 +3447,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
/* for AP roaming */
if (priv->ieee80211->state == IEEE80211_LINKED &&
priv->ieee80211->iw_mode == IW_MODE_INFRA) {
-
rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
if ((TotalRxBcnNum + TotalRxDataNum) == 0) {
#ifdef TODO
@@ -3485,7 +3463,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
priv->ieee80211->link_change(dev);
queue_work(priv->ieee80211->wq,
&priv->ieee80211->associate_procedure_wq);
-
}
}
priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3510,7 +3487,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
-
}
static void watch_dog_timer_callback(unsigned long data)
@@ -3521,6 +3497,7 @@ static void watch_dog_timer_callback(unsigned long data)
mod_timer(&priv->watch_dog_timer,
jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME));
}
+
static int _rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3560,7 +3537,6 @@ static int rtl8192_open(struct net_device *dev)
ret = rtl8192_up(dev);
up(&priv->wx_sem);
return ret;
-
}
@@ -3587,7 +3563,6 @@ static int rtl8192_close(struct net_device *dev)
up(&priv->wx_sem);
return ret;
-
}
int rtl8192_down(struct net_device *dev)
@@ -3649,7 +3624,6 @@ void rtl8192_commit(struct net_device *dev)
rtl8192_rtx_disable(dev);
reset_status = _rtl8192_up(dev);
-
}
static void rtl8192_restart(struct work_struct *work)
@@ -4111,7 +4085,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
(((priv->undecorated_smoothed_pwdb) * (Rx_Smooth_Factor - 1)) +
(pprevious_stats->RxPWDBAll)) / (Rx_Smooth_Factor);
}
-
}
/* Check EVM */
@@ -4159,8 +4132,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
}
}
}
-
-
}
/*-----------------------------------------------------------------------------
@@ -4201,6 +4172,7 @@ static u8 rtl819x_evm_dbtopercentage(char value)
ret_val = 100;
return ret_val;
}
+
/* We want good-looking for signal strength/quality */
static long rtl819x_signal_scale_mapping(long currsig)
{
@@ -4542,7 +4514,6 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
bpacket_match_bssid, bpacket_toself,
bPacketBeacon, bToSelfBA);
rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
-
}
/**
@@ -4758,7 +4729,6 @@ static void query_rxdesc_status(struct sk_buff *skb,
RT_TRACE(COMP_RXDESC,
"driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
driver_info->FirstAGGR, driver_info->PartAggr);
-
}
skb_pull(skb, sizeof(rx_desc_819x_usb));
@@ -4822,7 +4792,6 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
netdev_dbg(dev, "actual_length: %d\n", skb->len);
dev_kfree_skb_any(skb);
}
-
}
static void rtl819xusb_process_received_packet(
@@ -4898,7 +4867,6 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
};
if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
-
query_rx_cmdpkt_desc_status(skb, &stats);
/* prfd->queue_id = 1; */
@@ -4937,7 +4905,6 @@ static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
info->out_pipe);
dev_kfree_skb(skb);
break;
-
}
}
}
@@ -4971,7 +4938,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
dev = alloc_ieee80211(sizeof(struct r8192_priv));
- if (dev == NULL)
+ if (!dev)
return -ENOMEM;
usb_set_intfdata(intf, dev);
@@ -5034,7 +5001,6 @@ fail:
*/
static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
{
-
cancel_work_sync(&priv->reset_wq);
cancel_delayed_work(&priv->watch_dog_wq);
cancel_delayed_work(&priv->update_beacon_wq);
@@ -5191,13 +5157,12 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
write_nic_dword(dev, RWCAM, TargetCommand);
} else {
/* Key Material */
- if (KeyContent != NULL) {
+ if (KeyContent) {
write_nic_dword(dev, WCAMI, (u32)(*(KeyContent + i - 2)));
write_nic_dword(dev, RWCAM, TargetCommand);
}
}
}
-
}
/***************************************************************************
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index f828e6441..837704de3 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -30,7 +30,6 @@
static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
-
#ifndef ENETDOWN
#define ENETDOWN 1
#endif
@@ -44,7 +43,6 @@ static int r8192_wx_get_freq(struct net_device *dev,
return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b);
}
-
static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -53,8 +51,6 @@ static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b);
}
-
-
static int r8192_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -64,8 +60,6 @@ static int r8192_wx_get_rate(struct net_device *dev,
return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra);
}
-
-
static int r8192_wx_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -82,7 +76,6 @@ static int r8192_wx_set_rate(struct net_device *dev,
return ret;
}
-
static int r8192_wx_set_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -148,7 +141,6 @@ static int r8192_wx_force_reset(struct net_device *dev,
}
-
static int r8192_wx_set_rawtx(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -301,7 +293,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
/* range->min_r_time; */ /* Minimal retry lifetime */
/* range->max_r_time; */ /* Maximal retry lifetime */
-
for (i = 0, val = 0; i < 14; i++) {
/* Include only legal frequencies for some countries */
@@ -326,7 +317,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
return 0;
}
-
static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -396,9 +386,6 @@ static int r8192_wx_set_essid(struct net_device *dev,
return ret;
}
-
-
-
static int r8192_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -415,7 +402,6 @@ static int r8192_wx_get_essid(struct net_device *dev,
return ret;
}
-
static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -439,7 +425,6 @@ static int r8192_wx_get_name(struct net_device *dev,
return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
}
-
static int r8192_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -493,7 +478,6 @@ static int r8192_wx_set_wap(struct net_device *dev,
}
-
static int r8192_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -503,7 +487,6 @@ static int r8192_wx_get_wap(struct net_device *dev,
return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra);
}
-
static int r8192_wx_get_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
@@ -695,7 +678,6 @@ static int r8192_wx_get_retry(struct net_device *dev,
wrqu->retry.value = priv->retry_data;
}
-
return 0;
}
@@ -711,7 +693,6 @@ static int r8192_wx_get_sens(struct net_device *dev,
return 0;
}
-
static int r8192_wx_set_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -862,7 +843,6 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
return -1;
}
-
static iw_handler r8192_wx_handlers[] = {
NULL, /* SIOCSIWCOMMIT */
r8192_wx_get_name, /* SIOCGIWNAME */
@@ -949,7 +929,6 @@ static const struct iw_priv_args r8192_private_args[] = {
};
-
static iw_handler r8192_private_handler[] = {
r8192_wx_set_crcmon,
r8192_wx_set_scan_type,
@@ -985,7 +964,6 @@ struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
return wstats;
}
-
struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
diff --git a/drivers/staging/rtl8712/basic_types.h b/drivers/staging/rtl8712/basic_types.h
index 7561bed5d..f5c023189 100644
--- a/drivers/staging/rtl8712/basic_types.h
+++ b/drivers/staging/rtl8712/basic_types.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 29e47e150..ae79047ac 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/ethernet.h b/drivers/staging/rtl8712/ethernet.h
index fad173f40..039da36fa 100644
--- a/drivers/staging/rtl8712/ethernet.h
+++ b/drivers/staging/rtl8712/ethernet.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 7914bdab7..b6f93af70 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
@@ -201,8 +197,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
0x0000ffff);
memcpy(ppayload, ptr, dump_imem_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- dump_imem_sz + TXDESC_SIZE,
- (u8 *)ptx_desc);
+ dump_imem_sz + TXDESC_SIZE,
+ (u8 *)ptx_desc);
ptr += dump_imem_sz;
imem_sz -= dump_imem_sz;
} while (imem_sz > 0);
@@ -230,7 +226,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
0x0000ffff);
memcpy(ppayload, ptr, dump_emem_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- dump_emem_sz + TXDESC_SIZE, (u8 *)ptx_desc);
+ dump_emem_sz + TXDESC_SIZE,
+ (u8 *)ptx_desc);
ptr += dump_emem_sz;
emem_sz -= dump_emem_sz;
} while (emem_sz > 0);
@@ -282,7 +279,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
ptx_desc->txdw0 |= cpu_to_le32(BIT(28));
memcpy(ppayload, &fwhdr.fwpriv, fwhdr.fw_priv_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc);
+ fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc);
/* polling dmem code done */
i = 100;
@@ -297,7 +294,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
tmp8 = r8712_read8(padapter, 0x1025000A);
if (tmp8 & BIT(4)) /* When boot from EEPROM,
- & FW need more time to read EEPROM */
+ * & FW need more time to read EEPROM
+ */
i = 60;
else /* boot from EFUSE */
i = 30;
@@ -332,7 +330,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
r8712_write32(padapter, RCR, (val32 | BIT(26))); /* Enable RX TCP
- Checksum offload */
+ * Checksum offload
+ */
netdev_info(padapter->pnetdev, "2 RCR=0x%x\n",
r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
@@ -346,7 +345,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
r8712_write8(padapter, 0x102500BD, r8712_read8(padapter, 0x102500BD) |
BIT(7)); /* enable usb rx aggregation */
r8712_write8(padapter, 0x102500D9, 1); /* TH=1 => means that invalidate
- * usb rx aggregation */
+ * usb rx aggregation
+ */
r8712_write8(padapter, 0x1025FE5B, 0x04); /* 1.7ms/4 */
/* Fix the RX FIFO issue(USB error) */
r8712_write8(padapter, 0x1025fe5C, r8712_read8(padapter, 0x1025fe5C)
@@ -367,7 +367,8 @@ uint rtl8712_hal_deinit(struct _adapter *padapter)
r8712_write8(padapter, SYS_FUNC_EN + 1, 0x70);
r8712_write8(padapter, PMC_FSM, 0x06); /* Enable Loader Data Keep */
r8712_write8(padapter, SYS_ISO_CTRL, 0xF9); /* Isolation signals from
- * CORE, PLL */
+ * CORE, PLL
+ */
r8712_write8(padapter, SYS_ISO_CTRL + 1, 0xe8); /* Enable EFUSE 1.2V */
r8712_write8(padapter, AFE_PLL_CTRL, 0x00); /* Disable AFE PLL. */
r8712_write8(padapter, LDOA15_CTRL, 0x54); /* Disable A15V */
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index d13b4d53c..8918654b4 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index e4e4bdee7..af7c4a477 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -153,7 +153,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
buff = NULL;
if (authmode == _WPA_IE_ID_) {
buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
- if (buff == NULL)
+ if (!buff)
return;
p = buff;
p += sprintf(p, "ASSOCINFO(ReqIEs=");
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index ab19112ea..57211f7e6 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -389,7 +389,7 @@ static int netdev_open(struct net_device *pnetdev)
padapter->bup = true;
if (rtl871x_hal_init(padapter) != _SUCCESS)
goto netdev_open_error;
- if (r8712_initmac == NULL)
+ if (!r8712_initmac)
/* Use the mac address stored in the Efuse */
memcpy(pnetdev->dev_addr,
padapter->eeprompriv.mac_addr, ETH_ALEN);
@@ -413,7 +413,7 @@ static int netdev_open(struct net_device *pnetdev)
}
if (start_drv_threads(padapter) != _SUCCESS)
goto netdev_open_error;
- if (padapter->dvobjpriv.inirp_init == NULL)
+ if (!padapter->dvobjpriv.inirp_init)
goto netdev_open_error;
else
padapter->dvobjpriv.inirp_init(padapter);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 076d5083c..ad041c96f 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -57,9 +57,6 @@ struct __queue {
spin_lock_init(&((pqueue)->lock)); \
} while (0)
-#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
-
static inline u32 _down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 50f400234..13c018340 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -135,7 +135,7 @@ static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -149,7 +149,7 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -165,7 +165,7 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -178,7 +178,7 @@ static u8 write_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -194,7 +194,7 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -207,7 +207,7 @@ static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -227,7 +227,7 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
{
struct cmd_obj *pcmd_r;
- if (pcmd == NULL)
+ if (!pcmd)
return pcmd;
pcmd_r = NULL;
@@ -416,7 +416,7 @@ _next:
/* free all cmd_obj resources */
do {
pcmd = r8712_dequeue_cmd(&(pcmdpriv->cmd_queue));
- if (pcmd == NULL)
+ if (!pcmd)
break;
r8712_free_cmd_obj(pcmd);
} while (1);
@@ -431,7 +431,7 @@ void r8712_event_handle(struct _adapter *padapter, uint *peventbuf)
void (*event_callback)(struct _adapter *dev, u8 *pbuf);
struct evt_priv *pevt_priv = &(padapter->evtpriv);
- if (peventbuf == NULL)
+ if (!peventbuf)
goto _abort_event_;
evt_sz = (u16)(le32_to_cpu(*peventbuf) & 0xffff);
evt_seq = (u8)((le32_to_cpu(*peventbuf) >> 24) & 0x7f);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index d187508dd..f25b34c7d 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -204,7 +204,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
phead = &defrag_q->queue;
plist = phead->next;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
list_del_init(&prframe->u.list);
pfhdr = &prframe->u.hdr;
curfragnum = 0;
@@ -219,7 +219,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
plist = &defrag_q->queue;
plist = plist->next;
while (!end_of_queue_search(phead, plist)) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = container_of(plist, union recv_frame, u.list);
pnfhdr = &pnextrframe->u.hdr;
/*check the fragment sequence (2nd ~n fragment frame) */
if (curfragnum != pnfhdr->attrib.frag_num) {
@@ -492,7 +492,7 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
phead = &ppending_recvframe_queue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = container_of(plist, union recv_frame, u.list);
pnextattrib = &pnextrframe->u.hdr.attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
plist = plist->next;
@@ -525,14 +525,14 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
if (list_empty(phead))
return true;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
pattrib = &prframe->u.hdr.attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication.
* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
plist = plist->next;
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index b21a60e9f..7e0b94503 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -169,8 +169,8 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
xmitframe_phead = &pframe_queue->queue;
xmitframe_plist = xmitframe_phead->next;
if (!end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist,
- struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist,
+ struct xmit_frame, list);
list_del_init(&pxmitframe->list);
ptxservq->qcnt--;
phwxmit->txcmdcnt++;
@@ -209,8 +209,8 @@ static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
sta_phead = &phwxmit->sta_queue->queue;
sta_plist = sta_phead->next;
while (!end_of_queue_search(sta_phead, sta_plist)) {
- ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq,
- tx_pending);
+ ptxservq = container_of(sta_plist, struct tx_servq,
+ tx_pending);
pframe_queue = &ptxservq->sta_pending;
pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit,
ptxservq, pframe_queue);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 86136cc73..aed03cfbb 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -225,10 +225,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psurveyPara = kmalloc(sizeof(*psurveyPara), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
@@ -258,10 +258,10 @@ u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pbsetdataratepara = kmalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
- if (pbsetdataratepara == NULL) {
+ if (!pbsetdataratepara) {
kfree(ph2c);
return _FAIL;
}
@@ -280,10 +280,10 @@ u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetchplanpara = kmalloc(sizeof(*psetchplanpara), GFP_ATOMIC);
- if (psetchplanpara == NULL) {
+ if (!psetchplanpara) {
kfree(ph2c);
return _FAIL;
}
@@ -301,10 +301,10 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pssetbasicratepara = kmalloc(sizeof(*pssetbasicratepara), GFP_ATOMIC);
- if (pssetbasicratepara == NULL) {
+ if (!pssetbasicratepara) {
kfree(ph2c);
return _FAIL;
}
@@ -322,10 +322,10 @@ u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
- if (pwriteptmparm == NULL) {
+ if (!pwriteptmparm) {
kfree(ph2c);
return _FAIL;
}
@@ -342,10 +342,10 @@ u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
- if (pwriteptmparm == NULL) {
+ if (!pwriteptmparm) {
kfree(ph2c);
return _FAIL;
}
@@ -362,10 +362,10 @@ u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriterfparm = kmalloc(sizeof(*pwriterfparm), GFP_ATOMIC);
- if (pwriterfparm == NULL) {
+ if (!pwriterfparm) {
kfree(ph2c);
return _FAIL;
}
@@ -383,10 +383,10 @@ u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
prdrfparm = kmalloc(sizeof(*prdrfparm), GFP_ATOMIC);
- if (prdrfparm == NULL) {
+ if (!prdrfparm) {
kfree(ph2c);
return _FAIL;
}
@@ -427,7 +427,7 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = _CreateBss_CMD_;
@@ -457,7 +457,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
/* for hidden ap to set fw_state here */
@@ -587,10 +587,10 @@ u8 r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pdisconnect_cmd = kmalloc(sizeof(*pdisconnect_cmd), GFP_ATOMIC);
- if (pdisconnect_cmd == NULL)
+ if (!pdisconnect_cmd)
return _FAIL;
pdisconnect = kmalloc(sizeof(*pdisconnect), GFP_ATOMIC);
- if (pdisconnect == NULL) {
+ if (!pdisconnect) {
kfree(pdisconnect_cmd);
return _FAIL;
}
@@ -609,10 +609,10 @@ u8 r8712_setopmode_cmd(struct _adapter *padapter,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetop = kmalloc(sizeof(*psetop), GFP_ATOMIC);
- if (psetop == NULL) {
+ if (!psetop) {
kfree(ph2c);
return _FAIL;
}
@@ -633,15 +633,15 @@ u8 r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
struct sta_info *sta = (struct sta_info *)psta;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetstakey_para = kmalloc(sizeof(*psetstakey_para), GFP_ATOMIC);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
return _FAIL;
}
psetstakey_rsp = kmalloc(sizeof(*psetstakey_rsp), GFP_ATOMIC);
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
return _FAIL;
@@ -673,10 +673,10 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetrfintfsparm = kmalloc(sizeof(*psetrfintfsparm), GFP_ATOMIC);
- if (psetrfintfsparm == NULL) {
+ if (!psetrfintfsparm) {
kfree(ph2c);
return _FAIL;
}
@@ -695,10 +695,10 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetrttblparm = kmalloc(sizeof(*psetrttblparm), GFP_ATOMIC);
- if (psetrttblparm == NULL) {
+ if (!psetrttblparm) {
kfree(ph2c);
return _FAIL;
}
@@ -716,10 +716,10 @@ u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
struct SetMacAddr_param *psetMacAddr_para;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetMacAddr_para = kmalloc(sizeof(*psetMacAddr_para), GFP_ATOMIC);
- if (psetMacAddr_para == NULL) {
+ if (!psetMacAddr_para) {
kfree(ph2c);
return _FAIL;
}
@@ -738,15 +738,15 @@ u8 r8712_setassocsta_cmd(struct _adapter *padapter, u8 *mac_addr)
struct set_assocsta_rsp *psetassocsta_rsp = NULL;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetassocsta_para = kmalloc(sizeof(*psetassocsta_para), GFP_ATOMIC);
- if (psetassocsta_para == NULL) {
+ if (!psetassocsta_para) {
kfree(ph2c);
return _FAIL;
}
psetassocsta_rsp = kmalloc(sizeof(*psetassocsta_rsp), GFP_ATOMIC);
- if (psetassocsta_rsp == NULL) {
+ if (!psetassocsta_rsp) {
kfree(ph2c);
kfree(psetassocsta_para);
return _FAIL;
@@ -766,10 +766,10 @@ u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
struct addBaReq_parm *paddbareq_parm;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
paddbareq_parm = kmalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
- if (paddbareq_parm == NULL) {
+ if (!paddbareq_parm) {
kfree(ph2c);
return _FAIL;
}
@@ -787,10 +787,10 @@ u8 r8712_wdg_wk_cmd(struct _adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pdrvintcmd_param = kmalloc(sizeof(*pdrvintcmd_param), GFP_ATOMIC);
- if (pdrvintcmd_param == NULL) {
+ if (!pdrvintcmd_param) {
kfree(ph2c);
return _FAIL;
}
@@ -961,10 +961,10 @@ u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
param = kzalloc(sizeof(*param), GFP_ATOMIC);
- if (param == NULL) {
+ if (!param) {
kfree(ph2c);
return _FAIL;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 1b9e24900..e205adf24 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -399,7 +399,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
- if (pwep == NULL)
+ if (!pwep)
return -ENOMEM;
pwep->KeyLength = wep_key_len;
pwep->Length = wep_key_len +
@@ -1060,8 +1060,8 @@ static int r8711_wx_set_wap(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned))
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_bssid = pnetwork->network.MacAddress;
if (!memcmp(dst_bssid, temp->sa_data, ETH_ALEN)) {
@@ -1216,7 +1216,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
ret = -E2BIG;
break;
}
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
ev = translate_scan(padapter, a, pnetwork, ev, stop);
plist = plist->next;
}
@@ -1271,8 +1271,8 @@ static int r8711_wx_set_essid(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned))
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_ssid = pnetwork->network.Ssid.Ssid;
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
@@ -1793,7 +1793,7 @@ static int r871x_wx_set_enc_ext(struct net_device *dev,
param_len = sizeof(struct ieee_param) + pext->key_len;
param = kzalloc(param_len, GFP_ATOMIC);
- if (param == NULL)
+ if (!param)
return -ENOMEM;
param->cmd = IEEE_CMD_SET_ENCRYPTION;
eth_broadcast_addr(param->sta_addr);
@@ -1986,7 +1986,7 @@ static int r871x_get_ap_info(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!mac_pton(data, bssid)) {
netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
(u8 *)data);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index f772675ae..56760cda8 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -34,12 +34,6 @@
#include "usb_osintf.h"
#include "usb_ops.h"
-#define IS_MAC_ADDRESS_BROADCAST(addr) \
-( \
- ((addr[0] == 0xff) && (addr[1] == 0xff) && \
- (addr[2] == 0xff) && (addr[3] == 0xff) && \
- (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
-)
static u8 validate_ssid(struct ndis_802_11_ssid *ssid)
{
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 62d4ae85a..772bf9fa9 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -155,7 +155,7 @@ static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue,
phead = &scanned_queue->queue;
plist = phead->next;
while (plist != phead) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
plist = plist->next;
if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
@@ -176,7 +176,7 @@ static void _free_network_queue(struct _adapter *padapter)
phead = &scanned_queue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
plist = plist->next;
_free_network(pmlmepriv, pnetwork);
}
@@ -304,7 +304,7 @@ struct wlan_network *r8712_get_oldest_wlan_network(
while (1) {
if (end_of_queue_search(phead, plist) == true)
break;
- pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pwlan = container_of(plist, struct wlan_network, list);
if (pwlan->fixed != true) {
if (oldest == NULL ||
time_after((unsigned long)oldest->last_scanned,
@@ -390,7 +390,7 @@ static void update_scanned_network(struct _adapter *adapter,
if (end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (is_same_network(&pnetwork->network, target))
break;
if ((oldest == ((struct wlan_network *)0)) ||
@@ -1135,8 +1135,8 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
}
return _FAIL;
}
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
if (pnetwork == NULL)
return _FAIL;
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
@@ -1205,7 +1205,7 @@ sint r8712_set_auth(struct _adapter *adapter,
return _FAIL;
psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
- if (psetauthparm == NULL) {
+ if (!psetauthparm) {
kfree(pcmd);
return _FAIL;
}
@@ -1234,7 +1234,7 @@ sint r8712_set_key(struct _adapter *adapter,
if (!pcmd)
return _FAIL;
psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
ret = _FAIL;
goto err_free_cmd;
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 616ca3965..23c143890 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -142,7 +142,7 @@ void r8712_free_recvframe_queue(struct __queue *pframequeue,
phead = &pframequeue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = container_of(plist, union recv_frame, u.list);
plist = plist->next;
r8712_free_recvframe(precvframe, pfree_recv_queue);
}
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index e90c00de7..e11ce2896 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -216,8 +216,8 @@ void r8712_free_all_stainfo(struct _adapter *padapter)
phead = &(pstapriv->sta_hash[index]);
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist,
- struct sta_info, hash_list);
+ psta = container_of(plist,
+ struct sta_info, hash_list);
plist = plist->next;
if (pbcmc_stainfo != psta)
r8712_free_stainfo(padapter, psta);
@@ -241,7 +241,7 @@ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
phead = &(pstapriv->sta_hash[index]);
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
if ((!memcmp(psta->hwaddr, hwaddr, ETH_ALEN))) {
/* if found the matched address */
break;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index c6d952f5d..99256baaf 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -848,7 +848,7 @@ void r8712_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
phead = &pframequeue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxmitframe = container_of(plist, struct xmit_frame, list);
plist = plist->next;
r8712_free_xmitframe(pxmitpriv, pxmitframe);
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 454cdf6c7..6f1234570 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -504,7 +504,7 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
u8 *palloc_buf, *pIo_buf;
palloc_buf = kmalloc((u32)len + 16, GFP_ATOMIC);
- if (palloc_buf == NULL)
+ if (!palloc_buf)
return -ENOMEM;
pIo_buf = palloc_buf + 16 - ((addr_t)(palloc_buf) & 0x0f);
if (requesttype == 0x01) {
diff --git a/drivers/staging/rtl8723au/Kconfig b/drivers/staging/rtl8723au/Kconfig
index 435f3594d..277c1ab69 100644
--- a/drivers/staging/rtl8723au/Kconfig
+++ b/drivers/staging/rtl8723au/Kconfig
@@ -1,5 +1,5 @@
config R8723AU
- tristate "Realtek RTL8723AU Wireless LAN NIC driver"
+ tristate "Realtek RTL8723AU Wireless LAN NIC driver (deprecated)"
depends on USB && WLAN && RFKILL
select WIRELESS_EXT
select WEXT_PRIV
@@ -7,7 +7,10 @@ config R8723AU
default n
---help---
This option adds the Realtek RTL8723AU USB device such as found in
- the Lenovo Yogi 13 tablet. If built as a module, it will be called r8723au.
+ the Lenovo Yoga 13 tablet. If built as a module, it will be called r8723au.
+
+ Note: This driver is deprecated and scheduled to be removed in a
+ future kernel release. Please use rtl8xxxu instead.
if R8723AU
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index f68e27702..aad686da3 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -1719,7 +1719,8 @@ void stop_ap_mode23a(struct rtw_adapter *padapter)
}
spin_unlock_bh(&pacl_node_q->lock);
- DBG_8723A("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
+ DBG_8723A("%s, free acl_node_queue, num =%d\n",
+ __func__, pacl_list->num);
rtw_sta_flush23a(padapter);
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index f4fff385a..7dd1540eb 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -2113,10 +2113,10 @@ static int on_action_public23a(struct rtw_adapter *padapter,
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe,
skb->len, 0))
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 989ed0726..150dabc2a 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -211,31 +211,6 @@ u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
return cnt;
}
-int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue)
-{
- spin_lock_bh(&queue->lock);
-
- list_del_init(&precvbuf->list);
- list_add(&precvbuf->list, get_list_head(queue));
-
- spin_unlock_bh(&queue->lock);
-
- return _SUCCESS;
-}
-
-int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue)
-{
- unsigned long irqL;
-
- spin_lock_irqsave(&queue->lock, irqL);
-
- list_del_init(&precvbuf->list);
-
- list_add_tail(&precvbuf->list, get_list_head(queue));
- spin_unlock_irqrestore(&queue->lock, irqL);
- return _SUCCESS;
-}
-
struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue)
{
unsigned long irqL;
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
index cc2b84be9..694cf17f8 100644
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -304,21 +304,11 @@ inline void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch)
adapter_to_dvobj(adapter)->oper_channel = ch;
}
-inline u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter)
-{
- return adapter_to_dvobj(adapter)->oper_bwmode;
-}
-
inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw)
{
adapter_to_dvobj(adapter)->oper_bwmode = bw;
}
-inline u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter)
-{
- return adapter_to_dvobj(adapter)->oper_ch_offset;
-}
-
inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset)
{
adapter_to_dvobj(adapter)->oper_ch_offset = offset;
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index 8221320d4..ba32ade5e 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -1175,8 +1175,6 @@ int InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary)
/* Let last entry point to the start entry of ring buffer */
status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
- if (status != _SUCCESS)
- return status;
return status;
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
index ce0d8d894..24c0ff3d8 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
@@ -465,7 +465,7 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
break;
}
- /*----Restore RFENV control type----*/;
+ /*----Restore RFENV control type----*/
switch (eRFPath) {
case RF_PATH_A:
PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
diff --git a/drivers/staging/rtl8723au/include/ieee80211.h b/drivers/staging/rtl8723au/include/ieee80211.h
index 3aa40a325..634102e1b 100644
--- a/drivers/staging/rtl8723au/include/ieee80211.h
+++ b/drivers/staging/rtl8723au/include/ieee80211.h
@@ -266,7 +266,7 @@ join_res:
/* Represent channel details, subset of ieee80211_channel */
struct rtw_ieee80211_channel {
- /* enum ieee80211_band band; */
+ /* enum nl80211_band band; */
/* u16 center_freq; */
u16 hw_value;
u32 flags;
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
index ea2a6c914..0e7d3da91 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -461,9 +461,7 @@ void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen);
u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter);
void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch);
-u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter);
void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw);
-u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter);
void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset);
void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
index dc784be3d..85a5edb45 100644
--- a/drivers/staging/rtl8723au/include/rtw_recv.h
+++ b/drivers/staging/rtl8723au/include/rtw_recv.h
@@ -279,8 +279,6 @@ int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *qu
u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter);
-int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue);
-int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue);
struct recv_buf *rtw_dequeue_recvbuf23a(struct rtw_queue *queue);
void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext);
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 12d18440e..0da559d92 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -39,7 +39,7 @@ static const u32 rtw_cipher_suites[] = {
}
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -48,7 +48,7 @@ static const u32 rtw_cipher_suites[] = {
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -143,15 +143,15 @@ static void rtw_5g_rates_init(struct ieee80211_rate *rates)
}
static struct ieee80211_supported_band *
-rtw_spt_band_alloc(enum ieee80211_band band)
+rtw_spt_band_alloc(enum nl80211_band band)
{
struct ieee80211_supported_band *spt_band = NULL;
int n_channels, n_bitrates;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
n_channels = RTW_2G_CHANNELS_NUM;
n_bitrates = RTW_G_RATES_NUM;
- } else if (band == IEEE80211_BAND_5GHZ) {
+ } else if (band == NL80211_BAND_5GHZ) {
n_channels = RTW_5G_CHANNELS_NUM;
n_bitrates = RTW_A_RATES_NUM;
} else {
@@ -176,10 +176,10 @@ rtw_spt_band_alloc(enum ieee80211_band band)
spt_band->n_channels = n_channels;
spt_band->n_bitrates = n_bitrates;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
rtw_2g_channels_init(spt_band->channels);
rtw_2g_rates_init(spt_band->bitrates);
- } else if (band == IEEE80211_BAND_5GHZ) {
+ } else if (band == NL80211_BAND_5GHZ) {
rtw_5g_channels_init(spt_band->channels);
rtw_5g_rates_init(spt_band->bitrates);
}
@@ -257,10 +257,10 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter,
channel = pnetwork->network.DSConfig;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -322,11 +322,11 @@ void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter)
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq =
ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq =
ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2360,10 +2360,10 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
channel = pmlmeext->cur_channel;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len,
0);
@@ -2392,10 +2392,10 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
channel = pmlmeext->cur_channel;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
mgmt.frame_control =
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
@@ -3109,7 +3109,7 @@ static struct cfg80211_ops rtw_cfg80211_ops = {
};
static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
- enum ieee80211_band band, u8 rf_type)
+ enum nl80211_band band, u8 rf_type)
{
#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
@@ -3133,7 +3133,7 @@ static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
/*
- *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ *hw->wiphy->bands[NL80211_BAND_2GHZ]
*base on ant_num
*rx_mask: RX mask
*if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -3173,19 +3173,19 @@ void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter)
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
{
- bands = wiphy->bands[IEEE80211_BAND_2GHZ];
+ bands = wiphy->bands[NL80211_BAND_2GHZ];
if (bands)
rtw_cfg80211_init_ht_capab(&bands->ht_cap,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
rf_type);
}
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
{
- bands = wiphy->bands[IEEE80211_BAND_5GHZ];
+ bands = wiphy->bands[NL80211_BAND_5GHZ];
if (bands)
rtw_cfg80211_init_ht_capab(&bands->ht_cap,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
rf_type);
}
}
@@ -3224,11 +3224,11 @@ static void rtw_cfg80211_preinit_wiphy(struct rtw_adapter *padapter,
wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
- wiphy->bands[IEEE80211_BAND_2GHZ] =
- rtw_spt_band_alloc(IEEE80211_BAND_2GHZ);
+ wiphy->bands[NL80211_BAND_2GHZ] =
+ rtw_spt_band_alloc(NL80211_BAND_2GHZ);
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
- wiphy->bands[IEEE80211_BAND_5GHZ] =
- rtw_spt_band_alloc(IEEE80211_BAND_5GHZ);
+ wiphy->bands[NL80211_BAND_5GHZ] =
+ rtw_spt_band_alloc(NL80211_BAND_5GHZ);
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
@@ -3313,8 +3313,8 @@ void rtw_wdev_free(struct wireless_dev *wdev)
if (!wdev)
return;
- kfree(wdev->wiphy->bands[IEEE80211_BAND_2GHZ]);
- kfree(wdev->wiphy->bands[IEEE80211_BAND_5GHZ]);
+ kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
+ kfree(wdev->wiphy->bands[NL80211_BAND_5GHZ]);
wiphy_free(wdev->wiphy);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 27b3a5b7d..cf83efffb 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -532,6 +532,7 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
{
struct rtw_adapter *if1 = NULL;
struct dvobj_priv *dvobj;
+ struct usb_device *udev;
int status = _FAIL;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_init\n");
@@ -544,6 +545,10 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
goto exit;
}
+ udev = dvobj->pusbdev;
+ dev_warn(&udev->dev, "WARNING: The rtl8723au driver is deprecated!");
+ dev_warn(&udev->dev, "Please use the rtl8xxxu driver for this device!");
+
if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
if (!if1) {
DBG_8723A("rtw_init_primary_adapter Failed!\n");
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index a780185a3..0f0cd4a03 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -49,7 +49,7 @@ static int ms_parse_err_code(struct rtsx_chip *chip)
}
static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
- u8 tpc, u8 cnt, u8 cfg)
+ u8 tpc, u8 cnt, u8 cfg)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
@@ -2691,7 +2691,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
if ((log_blk < ms_start_idx[seg_no]) ||
- (log_blk >= ms_start_idx[seg_no+1])) {
+ (log_blk >= ms_start_idx[seg_no + 1])) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
@@ -3836,7 +3836,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
start_page = (u8)(start_sector & ms_card->page_off);
for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) {
- if (log_blk < ms_start_idx[seg_no+1])
+ if (log_blk < ms_start_idx[seg_no + 1])
break;
}
@@ -4264,7 +4264,7 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
memset(buf1, 0, 32);
rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb);
for (i = 0; i < 8; i++)
- buf1[8+i] = buf2[4+i];
+ buf1[8 + i] = buf2[4 + i];
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf1, 32);
@@ -4399,10 +4399,10 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
- buf[i] = buf[4+i];
+ buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
- buf[8+i] = 0;
+ buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA,
32, WAIT_INT, buf, 32);
@@ -4511,10 +4511,10 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
- buf[i] = buf[4+i];
+ buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
- buf[8+i] = 0;
+ buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf, 32);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 437436f5d..231833a30 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -628,11 +628,6 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
}
-static inline u8 double_depth(u8 depth)
-{
- return (depth > 1) ? (depth - 1) : depth;
-}
-
int switch_ssc_clock(struct rtsx_chip *chip, int clk)
{
int retval;
@@ -1184,22 +1179,6 @@ int check_card_wp(struct rtsx_chip *chip, unsigned int lun)
return 0;
}
-int check_card_fail(struct rtsx_chip *chip, unsigned int lun)
-{
- if (chip->card_fail & chip->lun2card[lun])
- return 1;
-
- return 0;
-}
-
-int check_card_ejected(struct rtsx_chip *chip, unsigned int lun)
-{
- if (chip->card_ejected & chip->lun2card[lun])
- return 1;
-
- return 0;
-}
-
u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun)
{
if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 8f2cf9a4e..56df9a431 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1024,8 +1024,6 @@ int detect_card_cd(struct rtsx_chip *chip, int card);
int check_card_exist(struct rtsx_chip *chip, unsigned int lun);
int check_card_ready(struct rtsx_chip *chip, unsigned int lun);
int check_card_wp(struct rtsx_chip *chip, unsigned int lun);
-int check_card_fail(struct rtsx_chip *chip, unsigned int lun);
-int check_card_ejected(struct rtsx_chip *chip, unsigned int lun);
void eject_card(struct rtsx_chip *chip, unsigned int lun);
u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index c0ce659a5..bcc4b666d 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -43,14 +43,6 @@ static void rtsx_calibration(struct rtsx_chip *chip)
rtsx_write_phy_register(chip, 0x00, 0x0288);
}
-void rtsx_disable_card_int(struct rtsx_chip *chip)
-{
- u32 reg = rtsx_readl(chip, RTSX_BIER);
-
- reg &= ~(XD_INT_EN | SD_INT_EN | MS_INT_EN);
- rtsx_writel(chip, RTSX_BIER, reg);
-}
-
void rtsx_enable_card_int(struct rtsx_chip *chip)
{
u32 reg = rtsx_readl(chip, RTSX_BIER);
@@ -1447,12 +1439,6 @@ delink_stage:
rtsx_delink_stage(chip);
}
-void rtsx_undo_delink(struct rtsx_chip *chip)
-{
- chip->auto_delink_allowed = 0;
- rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x00);
-}
-
/**
* rtsx_stop_cmd - stop command transfer and DMA transfer
* @chip: Realtek's card reader chip
@@ -2000,27 +1986,6 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
return STATUS_SUCCESS;
}
-int rtsx_check_link_ready(struct rtsx_chip *chip)
-{
- int retval;
- u8 val;
-
- retval = rtsx_read_register(chip, IRQSTAT0, &val);
- if (retval) {
- rtsx_trace(chip);
- return retval;
- }
-
- dev_dbg(rtsx_dev(chip), "IRQSTAT0: 0x%x\n", val);
- if (val & LINK_RDY_INT) {
- dev_dbg(rtsx_dev(chip), "Delinked!\n");
- rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
- return STATUS_FAIL;
- }
-
- return STATUS_SUCCESS;
-}
-
static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
{
u32 ultmp;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index c295b1eed..c08164f32 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -950,7 +950,6 @@ do { \
int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl);
int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl);
-void rtsx_disable_card_int(struct rtsx_chip *chip);
void rtsx_enable_card_int(struct rtsx_chip *chip);
void rtsx_enable_bus_int(struct rtsx_chip *chip);
void rtsx_disable_bus_int(struct rtsx_chip *chip);
@@ -958,7 +957,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip);
int rtsx_init_chip(struct rtsx_chip *chip);
void rtsx_release_chip(struct rtsx_chip *chip);
void rtsx_polling_func(struct rtsx_chip *chip);
-void rtsx_undo_delink(struct rtsx_chip *chip);
void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
@@ -975,7 +973,6 @@ int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val);
int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
-int rtsx_check_link_ready(struct rtsx_chip *chip);
void rtsx_enter_ss(struct rtsx_chip *chip);
void rtsx_exit_ss(struct rtsx_chip *chip);
int rtsx_pre_handle_interrupt(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 87d697623..6219e0475 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -1928,9 +1928,9 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
tuning_cmd = sd_sdr_tuning_rx_cmd;
} else {
- if (CHK_MMC_DDR52(sd_card))
+ if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = mmc_ddr_tunning_rx_cmd;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2054,9 +2054,9 @@ static int sd_tuning_tx(struct rtsx_chip *chip)
tuning_cmd = sd_sdr_tuning_tx_cmd;
} else {
- if (CHK_MMC_DDR52(sd_card))
+ if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = sd_ddr_tuning_tx_cmd;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2678,9 +2678,9 @@ RTY_SD_RST:
}
j++;
- if (j < 3)
+ if (j < 3) {
goto RTY_SD_RST;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2690,9 +2690,9 @@ RTY_SD_RST:
SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
k++;
- if (k < 3)
+ if (k < 3) {
goto RTY_SD_RST;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index 36f849fbb..cab26e736 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -165,7 +165,6 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
break;
}
return ret;
-
}
int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
@@ -210,7 +209,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
/* internal sanity check: there IS a partial byte in the buffer! */
skein_assert(length != 0);
/* partial byte bit mask */
- mask = (u8) (1u << (7 - (msg_bit_cnt & 7)));
+ mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
/* apply bit padding on final byte (in the buffer) */
up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask);
diff --git a/drivers/staging/skein/skein_base.c b/drivers/staging/skein/skein_base.c
index 25a01ca76..c24a57396 100644
--- a/drivers/staging/skein/skein_base.c
+++ b/drivers/staging/skein/skein_base.c
@@ -58,7 +58,7 @@ int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -98,7 +98,7 @@ int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -171,7 +171,7 @@ int skein_256_update(struct skein_256_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_256_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_256_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_256_BLOCK_BYTES;
skein_256_process_block(ctx, msg, n,
SKEIN_256_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_256_BLOCK_BYTES;
@@ -205,7 +205,7 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -219,19 +219,19 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_256_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
if (n >= SKEIN_256_BLOCK_BYTES)
n = SKEIN_256_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -282,7 +282,7 @@ int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -326,7 +326,7 @@ int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -398,7 +398,7 @@ int skein_512_update(struct skein_512_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_512_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_512_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_512_BLOCK_BYTES;
skein_512_process_block(ctx, msg, n,
SKEIN_512_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_512_BLOCK_BYTES;
@@ -432,7 +432,7 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -446,19 +446,19 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_512_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
if (n >= SKEIN_512_BLOCK_BYTES)
n = SKEIN_512_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -506,7 +506,7 @@ int skein_1024_init(struct skein_1024_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -547,7 +547,7 @@ int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -620,7 +620,7 @@ int skein_1024_update(struct skein_1024_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_1024_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_1024_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_1024_BLOCK_BYTES;
skein_1024_process_block(ctx, msg, n,
SKEIN_1024_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_1024_BLOCK_BYTES;
@@ -654,7 +654,7 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -668,19 +668,19 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
if (n >= SKEIN_1024_BLOCK_BYTES)
n = SKEIN_1024_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -702,7 +702,7 @@ int skein_256_final_pad(struct skein_256_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -724,7 +724,7 @@ int skein_512_final_pad(struct skein_512_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -746,7 +746,7 @@ int skein_1024_final_pad(struct skein_1024_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -775,19 +775,19 @@ int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_256_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
if (n >= SKEIN_256_BLOCK_BYTES)
n = SKEIN_256_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -812,19 +812,19 @@ int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_512_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
if (n >= SKEIN_512_BLOCK_BYTES)
n = SKEIN_512_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -849,19 +849,19 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
if (n >= SKEIN_1024_BLOCK_BYTES)
n = SKEIN_1024_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
diff --git a/drivers/staging/skein/skein_base.h b/drivers/staging/skein/skein_base.h
index 3c7f8ad36..dc464f334 100644
--- a/drivers/staging/skein/skein_base.h
+++ b/drivers/staging/skein/skein_base.h
@@ -32,7 +32,7 @@
/* below two prototype assume we are handed aligned data */
#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt)
#define skein_get64_lsb_first(dst64, src08, w_cnt) \
- memcpy(dst64, src08, 8*(w_cnt))
+ memcpy(dst64, src08, 8 * (w_cnt))
#define skein_swap64(w64) (w64)
enum {
@@ -48,17 +48,17 @@ enum {
#define SKEIN_1024_STATE_WORDS 16
#define SKEIN_MAX_STATE_WORDS 16
-#define SKEIN_256_STATE_BYTES (8*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BYTES (8*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BYTES (8*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_STATE_BYTES (8 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_STATE_BYTES (8 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_STATE_BYTES (8 * SKEIN_1024_STATE_WORDS)
-#define SKEIN_256_STATE_BITS (64*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BITS (64*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BITS (64*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_STATE_BITS (64 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_STATE_BITS (64 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_STATE_BITS (64 * SKEIN_1024_STATE_WORDS)
-#define SKEIN_256_BLOCK_BYTES (8*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_BLOCK_BYTES (8*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_BLOCK_BYTES (8*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_BLOCK_BYTES (8 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_BLOCK_BYTES (8 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_BLOCK_BYTES (8 * SKEIN_1024_STATE_WORDS)
struct skein_ctx_hdr {
size_t hash_bit_len; /* size of hash result, in bits */
@@ -84,11 +84,6 @@ struct skein_1024_ctx { /* 1024-bit Skein hash context structure */
u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
};
-static inline u64 rotl_64(u64 x, u8 N)
-{
- return (x << N) | (x >> (64 - N));
-}
-
/* Skein APIs for (incremental) "straight hashing" */
int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len);
int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len);
@@ -162,13 +157,13 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) /* 127 final blk flag */
/* tweak word tweak[1]: flag bit definition(s) */
-#define SKEIN_T1_FLAG_FIRST (((u64) 1) << SKEIN_T1_POS_FIRST)
-#define SKEIN_T1_FLAG_FINAL (((u64) 1) << SKEIN_T1_POS_FINAL)
-#define SKEIN_T1_FLAG_BIT_PAD (((u64) 1) << SKEIN_T1_POS_BIT_PAD)
+#define SKEIN_T1_FLAG_FIRST (((u64)1) << SKEIN_T1_POS_FIRST)
+#define SKEIN_T1_FLAG_FINAL (((u64)1) << SKEIN_T1_POS_FINAL)
+#define SKEIN_T1_FLAG_BIT_PAD (((u64)1) << SKEIN_T1_POS_BIT_PAD)
/* tweak word tweak[1]: tree level bit field mask */
#define SKEIN_T1_TREE_LVL_MASK (((u64)0x7F) << SKEIN_T1_POS_TREE_LVL)
-#define SKEIN_T1_TREE_LEVEL(n) (((u64) (n)) << SKEIN_T1_POS_TREE_LVL)
+#define SKEIN_T1_TREE_LEVEL(n) (((u64)(n)) << SKEIN_T1_POS_TREE_LVL)
/* tweak word tweak[1]: block type field */
#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */
@@ -181,7 +176,7 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_BLK_TYPE_OUT (63) /* output stage */
#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */
-#define SKEIN_T1_BLK_TYPE(T) (((u64) (SKEIN_BLK_TYPE_##T)) << \
+#define SKEIN_T1_BLK_TYPE(T) (((u64)(SKEIN_BLK_TYPE_##T)) << \
SKEIN_T1_POS_BLK_TYPE)
#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY) /* for MAC and KDF */
#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG) /* config block */
@@ -204,11 +199,11 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian)*/
#endif
-#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64) (hi32)) << 32))
+#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64)(hi32)) << 32))
#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE)
#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22)
-#define SKEIN_CFG_STR_LEN (4*8)
+#define SKEIN_CFG_STR_LEN (4 * 8)
/* bit field definitions in config block tree_info word */
#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0)
@@ -327,9 +322,9 @@ enum {
#define SKEIN_512_ROUNDS_TOTAL (72)
#define SKEIN_1024_ROUNDS_TOTAL (80)
#else /* allow command-line define in range 8*(5..14) */
-#define SKEIN_256_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/100) + 5) % 10) + 5))
-#define SKEIN_512_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/10) + 5) % 10) + 5))
-#define SKEIN_1024_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS) + 5) % 10) + 5))
+#define SKEIN_256_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 100) + 5) % 10) + 5))
+#define SKEIN_512_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 10) + 5) % 10) + 5))
+#define SKEIN_1024_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS) + 5) % 10) + 5))
#endif
#endif /* ifndef _SKEIN_H_ */
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
index 45b47327e..59a0a8a82 100644
--- a/drivers/staging/skein/skein_block.c
+++ b/drivers/staging/skein/skein_block.c
@@ -15,6 +15,7 @@
************************************************************************/
#include <linux/string.h>
+#include <linux/bitops.h>
#include "skein_base.h"
#include "skein_block.h"
@@ -59,10 +60,10 @@
#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
} while (0)
@@ -120,10 +121,10 @@
#if !(SKEIN_USE_ASM & 512)
#undef RCNT
-#define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
+#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
+#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10)
#else
#define SKEIN_UNROLL_512 (0)
#endif
@@ -136,15 +137,16 @@
#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
X##p4 += X##p5; \
- X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 = rol64(X##p5, ROT##_2); \
X##p5 ^= X##p4; \
- X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3);\
+ X##p6 += X##p7; \
+ X##p7 = rol64(X##p7, ROT##_3); \
X##p7 ^= X##p6; \
} while (0)
@@ -200,7 +202,7 @@
} while (0)
#define R512_UNROLL_R(NN) \
((SKEIN_UNROLL_512 == 0 && \
- SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
+ SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \
(SKEIN_UNROLL_512 > (NN)))
#if (SKEIN_UNROLL_512 > 14)
@@ -210,7 +212,7 @@
#if !(SKEIN_USE_ASM & 1024)
#undef RCNT
-#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
+#define RCNT (SKEIN_1024_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10)
#else
@@ -226,28 +228,28 @@
pF, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
X##p4 += X##p5; \
- X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 = rol64(X##p5, ROT##_2); \
X##p5 ^= X##p4; \
X##p6 += X##p7; \
- X##p7 = rotl_64(X##p7, ROT##_3); \
+ X##p7 = rol64(X##p7, ROT##_3); \
X##p7 ^= X##p6; \
X##p8 += X##p9; \
- X##p9 = rotl_64(X##p9, ROT##_4); \
+ X##p9 = rol64(X##p9, ROT##_4); \
X##p9 ^= X##p8; \
X##pA += X##pB; \
- X##pB = rotl_64(X##pB, ROT##_5); \
+ X##pB = rol64(X##pB, ROT##_5); \
X##pB ^= X##pA; \
X##pC += X##pD; \
- X##pD = rotl_64(X##pD, ROT##_6); \
+ X##pD = rol64(X##pD, ROT##_6); \
X##pD ^= X##pC; \
X##pE += X##pF; \
- X##pF = rotl_64(X##pF, ROT##_7); \
+ X##pF = rol64(X##pF, ROT##_7); \
X##pF ^= X##pE; \
} while (0)
@@ -311,28 +313,28 @@
#define R1024_8_ROUNDS(R) \
do { \
R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_0, 8*(R) + 1); \
+ 13, 14, 15, R1024_0, 8 * (R) + 1); \
R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_1, 8*(R) + 2); \
+ 05, 08, 01, R1024_1, 8 * (R) + 2); \
R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_2, 8*(R) + 3); \
+ 11, 10, 09, R1024_2, 8 * (R) + 3); \
R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_3, 8*(R) + 4); \
- I1024(2*(R)); \
+ 03, 12, 07, R1024_3, 8 * (R) + 4); \
+ I1024(2 * (R)); \
R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_4, 8*(R) + 5); \
+ 13, 14, 15, R1024_4, 8 * (R) + 5); \
R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_5, 8*(R) + 6); \
+ 05, 08, 01, R1024_5, 8 * (R) + 6); \
R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_6, 8*(R) + 7); \
+ 11, 10, 09, R1024_6, 8 * (R) + 7); \
R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_7, 8*(R) + 8); \
- I1024(2*(R)+1); \
+ 03, 12, 07, R1024_7, 8 * (R) + 8); \
+ I1024(2 * (R) + 1); \
} while (0)
#define R1024_UNROLL_R(NN) \
((SKEIN_UNROLL_1024 == 0 && \
- SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
+ SKEIN_1024_ROUNDS_TOTAL / 8 > (NN)) || \
(SKEIN_UNROLL_1024 > (NN)))
#if (SKEIN_UNROLL_1024 > 14)
@@ -351,10 +353,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
size_t r;
#if SKEIN_UNROLL_256
/* key schedule: chaining vars + tweak + "rot"*/
- u64 kw[WCNT+4+RCNT*2];
+ u64 kw[WCNT + 4 + (RCNT * 2)];
#else
/* key schedule words : chaining vars + tweak */
- u64 kw[WCNT+4];
+ u64 kw[WCNT + 4];
#endif
u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
u64 w[WCNT]; /* local copy of input block */
@@ -460,9 +462,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_256_process_block_code_size(void)
{
- return ((u8 *) skein_256_process_block_code_size) -
- ((u8 *) skein_256_process_block);
+ return ((u8 *)skein_256_process_block_code_size) -
+ ((u8 *)skein_256_process_block);
}
+
unsigned int skein_256_unroll_cnt(void)
{
return SKEIN_UNROLL_256;
@@ -480,9 +483,11 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
};
size_t r;
#if SKEIN_UNROLL_512
- u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/
+ /* key sched: chaining vars + tweak + "rot"*/
+ u64 kw[WCNT + 4 + RCNT * 2];
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT + 4];
#endif
u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */
u64 w[WCNT]; /* local copy of input block */
@@ -543,7 +548,6 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
for (r = 1;
r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
-
R512_8_ROUNDS(0);
#if R512_UNROLL_R(1)
@@ -609,9 +613,10 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_512_process_block_code_size(void)
{
- return ((u8 *) skein_512_process_block_code_size) -
- ((u8 *) skein_512_process_block);
+ return ((u8 *)skein_512_process_block_code_size) -
+ ((u8 *)skein_512_process_block);
}
+
unsigned int skein_512_unroll_cnt(void)
{
return SKEIN_UNROLL_512;
@@ -629,9 +634,11 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
};
size_t r;
#if (SKEIN_UNROLL_1024 != 0)
- u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */
+ /* key sched: chaining vars + tweak + "rot" */
+ u64 kw[WCNT + 4 + (RCNT * 2)];
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT + 4];
#endif
/* local copy of vars, for speed */
@@ -771,9 +778,10 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_1024_process_block_code_size(void)
{
- return ((u8 *) skein_1024_process_block_code_size) -
- ((u8 *) skein_1024_process_block);
+ return ((u8 *)skein_1024_process_block_code_size) -
+ ((u8 *)skein_1024_process_block);
}
+
unsigned int skein_1024_unroll_cnt(void)
{
return SKEIN_UNROLL_1024;
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index e29b9abaa..11f5e530a 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -27,7 +27,7 @@ static int skein256_init(struct shash_desc *desc)
}
static int skein256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc),
data, len);
@@ -62,7 +62,7 @@ static int skein512_init(struct shash_desc *desc)
}
static int skein512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc),
data, len);
@@ -97,7 +97,7 @@ static int skein1024_init(struct shash_desc *desc)
}
static int skein1024_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
data, len);
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
index 8e0a0b77e..615e46757 100644
--- a/drivers/staging/skein/threefish_api.h
+++ b/drivers/staging/skein/threefish_api.h
@@ -52,7 +52,7 @@ enum threefish_size {
*/
struct threefish_key {
u64 state_size;
- u64 key[SKEIN_MAX_STATE_WORDS+1]; /* max number of key words*/
+ u64 key[SKEIN_MAX_STATE_WORDS + 1]; /* max number of key words*/
u64 tweak[3];
};
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index e19ac4368..a95563fad 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -512,622 +512,622 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= k0 + t1;
b3 -= k1 + 18;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k2;
b1 -= k3 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k4 + t0;
b3 -= k0 + 17;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k1;
b1 -= k2 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k3 + t2;
b3 -= k4 + 16;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k0;
b1 -= k1 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k2 + t1;
b3 -= k3 + 15;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k4;
b1 -= k0 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k1 + t0;
b3 -= k2 + 14;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k3;
b1 -= k4 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k0 + t2;
b3 -= k1 + 13;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k2;
b1 -= k3 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k4 + t1;
b3 -= k0 + 12;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k1;
b1 -= k2 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k3 + t0;
b3 -= k4 + 11;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k0;
b1 -= k1 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k2 + t2;
b3 -= k3 + 10;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k4;
b1 -= k0 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k1 + t1;
b3 -= k2 + 9;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k3;
b1 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k0 + t0;
b3 -= k1 + 8;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k2;
b1 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k4 + t2;
b3 -= k0 + 7;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k1;
b1 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k3 + t1;
b3 -= k4 + 6;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k0;
b1 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k2 + t0;
b3 -= k3 + 5;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k4;
b1 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k1 + t2;
b3 -= k2 + 4;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k3;
b1 -= k4 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k0 + t1;
b3 -= k1 + 3;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k2;
b1 -= k3 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k4 + t0;
b3 -= k0 + 2;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k1;
b1 -= k2 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k3 + t2;
b3 -= k4 + 1;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k0;
b1 -= k1 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k2 + t1;
b3 -= k3;
@@ -2125,1226 +2125,1226 @@ void threefish_decrypt_512(struct threefish_key *key_ctx, u64 *input,
b7 -= k7 + 18;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k5 + t0;
b7 -= k6 + 17;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k3;
b5 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k8;
b1 -= k0;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k4 + t2;
b7 -= k5 + 16;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k2;
b5 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k7;
b1 -= k8;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k3 + t1;
b7 -= k4 + 15;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k1;
b5 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k8;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k6;
b1 -= k7;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k2 + t0;
b7 -= k3 + 14;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k0;
b5 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k5;
b1 -= k6;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k1 + t2;
b7 -= k2 + 13;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k8;
b5 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k4;
b1 -= k5;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k0 + t1;
b7 -= k1 + 12;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k7;
b5 -= k8 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k3;
b1 -= k4;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k8 + t0;
b7 -= k0 + 11;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k6;
b5 -= k7 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k2;
b1 -= k3;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k7 + t2;
b7 -= k8 + 10;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k5;
b5 -= k6 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k1;
b1 -= k2;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k6 + t1;
b7 -= k7 + 9;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k4;
b5 -= k5 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k0;
b1 -= k1;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k5 + t0;
b7 -= k6 + 8;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k3;
b5 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k8;
b1 -= k0;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k4 + t2;
b7 -= k5 + 7;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k2;
b5 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k7;
b1 -= k8;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k3 + t1;
b7 -= k4 + 6;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k1;
b5 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k8;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k6;
b1 -= k7;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k2 + t0;
b7 -= k3 + 5;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k0;
b5 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k5;
b1 -= k6;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k1 + t2;
b7 -= k2 + 4;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k8;
b5 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k4;
b1 -= k5;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k0 + t1;
b7 -= k1 + 3;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k7;
b5 -= k8 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k3;
b1 -= k4;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k8 + t0;
b7 -= k0 + 2;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k6;
b5 -= k7 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k2;
b1 -= k3;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k7 + t2;
b7 -= k8 + 1;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k5;
b5 -= k6 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k1;
b1 -= k2;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k6 + t1;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k4;
b5 -= k5 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k0;
b1 -= k1;
@@ -5521,2722 +5521,2722 @@ void threefish_decrypt_1024(struct threefish_key *key_ctx, u64 *input,
b14 -= k0 + t0;
b15 -= k1 + 20;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k16 + t2;
b15 -= k0 + 19;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k14;
b13 -= k15 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k12;
b11 -= k13;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k10;
b9 -= k11;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k8;
b7 -= k9;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k6;
b5 -= k7;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k2;
b1 -= k3;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k15 + t1;
b15 -= k16 + 18;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k13;
b13 -= k14 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k11;
b11 -= k12;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k9;
b9 -= k10;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k7;
b7 -= k8;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k5;
b5 -= k6;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k1;
b1 -= k2;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k14 + t0;
b15 -= k15 + 17;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k12;
b13 -= k13 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k10;
b11 -= k11;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k8;
b9 -= k9;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k6;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k4;
b5 -= k5;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k0;
b1 -= k1;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k13 + t2;
b15 -= k14 + 16;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k11;
b13 -= k12 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k9;
b11 -= k10;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k7;
b9 -= k8;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k5;
b7 -= k6;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k3;
b5 -= k4;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k16;
b1 -= k0;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k12 + t1;
b15 -= k13 + 15;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k10;
b13 -= k11 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k8;
b11 -= k9;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k6;
b9 -= k7;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k4;
b7 -= k5;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k2;
b5 -= k3;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k15;
b1 -= k16;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k11 + t0;
b15 -= k12 + 14;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k9;
b13 -= k10 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k7;
b11 -= k8;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k5;
b9 -= k6;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k3;
b7 -= k4;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k1;
b5 -= k2;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k16;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k14;
b1 -= k15;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k10 + t2;
b15 -= k11 + 13;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k8;
b13 -= k9 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k6;
b11 -= k7;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k4;
b9 -= k5;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k2;
b7 -= k3;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k0;
b5 -= k1;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k15;
b3 -= k16;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k13;
b1 -= k14;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k9 + t1;
b15 -= k10 + 12;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k7;
b13 -= k8 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k5;
b11 -= k6;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k3;
b9 -= k4;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k1;
b7 -= k2;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k16;
b5 -= k0;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k14;
b3 -= k15;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k12;
b1 -= k13;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k8 + t0;
b15 -= k9 + 11;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k6;
b13 -= k7 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k4;
b11 -= k5;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k2;
b9 -= k3;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k0;
b7 -= k1;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k15;
b5 -= k16;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k13;
b3 -= k14;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k11;
b1 -= k12;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k7 + t2;
b15 -= k8 + 10;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k5;
b13 -= k6 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k3;
b11 -= k4;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k1;
b9 -= k2;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k16;
b7 -= k0;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k14;
b5 -= k15;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k12;
b3 -= k13;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k10;
b1 -= k11;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k6 + t1;
b15 -= k7 + 9;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k4;
b13 -= k5 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k2;
b11 -= k3;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k0;
b9 -= k1;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k15;
b7 -= k16;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k13;
b5 -= k14;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k11;
b3 -= k12;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k9;
b1 -= k10;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k5 + t0;
b15 -= k6 + 8;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k3;
b13 -= k4 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k1;
b11 -= k2;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k16;
b9 -= k0;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k14;
b7 -= k15;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k12;
b5 -= k13;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k10;
b3 -= k11;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k8;
b1 -= k9;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k4 + t2;
b15 -= k5 + 7;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k2;
b13 -= k3 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k0;
b11 -= k1;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k15;
b9 -= k16;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k13;
b7 -= k14;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k11;
b5 -= k12;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k9;
b3 -= k10;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k7;
b1 -= k8;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k3 + t1;
b15 -= k4 + 6;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k1;
b13 -= k2 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k16;
b11 -= k0;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k14;
b9 -= k15;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k12;
b7 -= k13;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k10;
b5 -= k11;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k8;
b3 -= k9;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k6;
b1 -= k7;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k2 + t0;
b15 -= k3 + 5;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k0;
b13 -= k1 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k15;
b11 -= k16;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k13;
b9 -= k14;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k11;
b7 -= k12;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k9;
b5 -= k10;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k5;
b1 -= k6;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k1 + t2;
b15 -= k2 + 4;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k16;
b13 -= k0 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k14;
b11 -= k15;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k12;
b9 -= k13;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k10;
b7 -= k11;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k8;
b5 -= k9;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k4;
b1 -= k5;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k0 + t1;
b15 -= k1 + 3;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k15;
b13 -= k16 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k13;
b11 -= k14;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k11;
b9 -= k12;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k9;
b7 -= k10;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k7;
b5 -= k8;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k3;
b1 -= k4;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k16 + t0;
b15 -= k0 + 2;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k14;
b13 -= k15 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k12;
b11 -= k13;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k10;
b9 -= k11;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k8;
b7 -= k9;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k6;
b5 -= k7;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k2;
b1 -= k3;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k15 + t2;
b15 -= k16 + 1;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k13;
b13 -= k14 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k11;
b11 -= k12;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k9;
b9 -= k10;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k7;
b7 -= k8;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k5;
b5 -= k6;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k1;
b1 -= k2;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k14 + t1;
b15 -= k15;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k12;
b13 -= k13 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k10;
b11 -= k11;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k8;
b9 -= k9;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k6;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k4;
b5 -= k5;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k0;
b1 -= k1;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 3a2094f72..c831ba3ed 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -896,6 +896,7 @@ static void slic_upr_start(struct adapter *adapter)
{
struct slic_upr *upr;
__iomem struct slic_regs *slic_regs = adapter->slic_regs;
+
upr = adapter->upr_list;
if (!upr)
return;
@@ -1142,7 +1143,7 @@ static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
/*
* Compute a checksum of the EEPROM according to RFC 1071.
*/
-static u16 slic_eeprom_cksum(void *eeprom, unsigned len)
+static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
{
u16 *wp = eeprom;
u32 checksum = 0;
@@ -1853,6 +1854,11 @@ static void slic_xmit_build_request(struct adapter *adapter,
ihcmd->u.slic_buffers.totlen = skb->len;
phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
+ kfree_skb(skb);
+ dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
+ return;
+ }
ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
ihcmd->u.slic_buffers.bufs[0].length = skb->len;
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 95f7cae3c..f80ee7766 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -306,7 +306,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
unsigned int input, request;
unsigned int tmpClock, ret;
const int max_OD = 3;
- int max_d;
+ int max_d = 6;
if (getChipType() == SM750LE) {
/* SM750LE don't have prgrammable PLL and M/N values to work on.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index a22fb0751..97ca4ecca 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -263,7 +263,7 @@ static struct notifier_block vt_notifier_block = {
static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
{
pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
- return (u_char) (scr_readw(pos) >> 8);
+ return (scr_readw(pos) & ~vc->vc_hi_font_mask) >> 8;
}
static void speakup_date(struct vc_data *vc)
@@ -473,8 +473,10 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
w = scr_readw(pos);
c = w & 0xff;
- if (w & vc->vc_hi_font_mask)
+ if (w & vc->vc_hi_font_mask) {
+ w &= ~vc->vc_hi_font_mask;
c |= 0x100;
+ }
ch = inverse_translate(vc, c, 0);
*attribs = (w & 0xff00) >> 8;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 41ef099b7..0149edc1e 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -150,7 +150,7 @@ static void __speakup_paste_selection(struct work_struct *work)
add_wait_queue(&vc->paste_wait, &wait);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty_throttled(tty)) {
schedule();
continue;
}
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 1b399214e..3ad7ff0bc 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -6,6 +6,7 @@
#ifndef __sparc__
#include <linux/serial.h>
#endif
+#include <linux/serial_core.h>
/*
* this is cut&paste from 8250.h. Get rid of the structure, the definitions
@@ -16,7 +17,7 @@ struct old_serial_port {
unsigned int baud_base;
unsigned int port;
unsigned int irq;
- unsigned int flags; /* unused */
+ upf_t flags; /* unused */
};
/* countdown values for serial timeouts in us */
diff --git a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
index b0498ff32..c2359de17 100644
--- a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
+++ b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
@@ -50,20 +50,6 @@ Description: This field is used to tell s-Par which type of recovery tool
commission the guest.
Users: sparmaintainer@unisys.com
-What: guest/chipsetready
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This entry is used by Unisys application software on the guest
- to acknowledge completion of specific events for integration
- purposes, but these acknowledgements are not required for the
- guest to operate correctly. The interface accepts one of two
- strings: MODULES_LOADED to indicate that the s-Par driver
- modules have been loaded successfully, or CALLHOMEDISK_MOUNTED,
- which indicates that the disk used to support call home services
- has been successfully mounted.
-Users: sparmaintainer@unisys.com
-
What: parahotplug/deviceenabled
Date: 7/18/2014
KernelVersion: TBD
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
index c2d8dd4a2..1146c1cf5 100644
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -137,12 +137,6 @@ called automatically by the visorbus driver at appropriate times:
* The resume() function is the "book-end" to pause(), and is described above.
-If/when a function driver creates a Linux device (that needs to be accessed
-from usermode), it calls visorbus_registerdevnode(), passing the major and
-minor number of the device. (Of course not all function drivers will need
-to do this.) This simply creates the appropriate "devmajorminor" sysfs entry
-described below, so that a hotplug script can use it to create a device node.
-
2.1.3. sysfs Advertised Information
-----------------------------------
@@ -197,19 +191,6 @@ The following files exist under /sys/devices/visorbus<x>/vbus<x>:dev<y>:
if the appropriate function driver has not
been loaded yet.
- devmajorminor
-
- <devname> if applicable, each file here identifies (via
- ... its file contents) the
- "<major>:<minor>" needed for a device node to
- enable access from usermode. There is exactly
- one file here for each different device node
- that can be accessed (from usermode). Note
- that this info is provided by a particular
- function driver, so these will not exist
- until AFTER the appropriate function driver
- controlling this device class is loaded.
-
channel properties of the device channel (all in
ascii text format)
diff --git a/drivers/staging/unisys/Documentation/proc-entries.txt b/drivers/staging/unisys/Documentation/proc-entries.txt
deleted file mode 100644
index 426f92b1c..000000000
--- a/drivers/staging/unisys/Documentation/proc-entries.txt
+++ /dev/null
@@ -1,93 +0,0 @@
- s-Par Proc Entries
-This document describes the proc entries created by the Unisys s-Par modules.
-
-Support Module Entries
-These entries are provided primarily for debugging.
-
-/proc/uislib/info: This entry contains debugging information for the
-uislib module, including bus information and memory usage.
-
-/proc/visorchipset/controlvm: This directory contains debugging
-entries for the controlvm channel used by visorchipset.
-
-/proc/uislib/platform: This entry is used to display the platform
-number this node is in the system. For some guests, this may be
-invalid.
-
-/proc/visorchipset/chipsetready: This entry is written to by scripts
-to signify that any user level activity has been completed before the
-guest can be considered running and is shown as running in the s-Par
-UI.
-
-Device Entries
-These entries provide status of the devices shared by a service partition.
-
-/proc/uislib/vbus: this is a directory containing entries for each
-virtual bus. Each numbered sub-directory contains an info entry, which
-describes the devices that appear on that bus.
-
-/proc/uislib/cycles_before_wait: This entry is used to tune
-performance, by setting the number of cycles we wait before going idle
-when in polling mode. A longer time will reduce message latency but
-spend more processing time polling.
-
-/proc/uislib/smart_wakeup: This entry is used to tune performance, by
-enabling or disabling smart wakeup.
-
-/proc/virthba/info: This entry contains debugging information for the
-virthba module, including interrupt information and memory usage.
-
-/proc/virthba/enable_ints: This entry controls interrupt use by the
-virthba module. Writing a 0 to this entry will disable interrupts.
-
-/proc/virtnic/info: This entry contains debugging information for the
-virtnic module, including interrupt information, send and receive
-counts, and other device information.
-
-/proc/virtnic/ethX: This is a directory containing entries for each
-virtual NIC. Each named subdirectory contains two entries,
-clientstring and zone.
-
-/proc/virtpci/info: This entry contains debugging information for the
-virtpci module, including virtual PCI bus information and device
-locations.
-
-/proc/virtnic/enable_ints: This entry controls interrupt use by the
-virtnic module. Writing a 0 to this entry will disable interrupts.
-
-Visorconinclient, visordiag, visornoop, visorserialclient, and
-visorvideoclient Entries
-
-The entries in proc for these modules all follow the same
-pattern. Each module has its own proc directory with the same name,
-e.g. visordiag presents a /proc/visordiag directory. Inside of the
-module's directory are a device directory, which contains one numbered
-directory for each device provided by that module. Each device has a
-diag entry that presents the device number and visorbus name for that
-device. The module directory also has a driver/diag entry, which
-reports the corresponding s-Par version number of the driver.
-
-Automated Installation Entries
-
-These entries are used to pass information between the s-Par platform
-and the Linux-based installation and recovery tool. These values are
-read/write, however, the guest can only reset them to 0, or report an
-error status through the installer entry. The values are only set via
-s-Par's firmware interface, to help prevent accidentally booting into
-the tool.
-
-/proc/visorchipset/boottotool: This entry instructs s-Par that the
-next reboot will launch the installation and recovery tool. If set to
-0, the next boot will happen according to the UEFI boot manager
-settings.
-
-/proc/visorchipset/toolaction: This entry indicates the installation
-and recovery tool mode requested for the next boot.
-
-/proc/visorchipset/installer: this entry is used by the installation
-and recovery tool to pass status and result information back to the
-s-Par firmware.
-
-/proc/visorchipset/partition: This directory contains the guest
-partition configuration data for each virtual bus, for use during
-installation and at runtime for s-Par service partitions.
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
index cc46e37e6..1f0425bf3 100644
--- a/drivers/staging/unisys/MAINTAINERS
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -2,5 +2,4 @@ Unisys s-Par drivers
M: David Kershner <sparmaintainer@unisys.com>
S: Maintained
F: Documentation/s-Par/overview.txt
-F: Documentation/s-Par/proc-entries.txt
F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index 5af59a5fc..db4e6b287 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -76,9 +76,9 @@ enum channel_clientstate {
};
static inline const u8 *
-ULTRA_CHANNELCLI_STRING(u32 v)
+ULTRA_CHANNELCLI_STRING(u32 state)
{
- switch (v) {
+ switch (state) {
case CHANNELCLI_DETACHED:
return (const u8 *)("DETACHED");
case CHANNELCLI_DISABLED:
@@ -411,7 +411,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
mb(); /* required for channel synch */
}
if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) {
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state;
* come out of it
*/
@@ -459,7 +459,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
mb(); /* required for channel synch */
return 0;
}
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state; come out of it */
pr_info("%s Channel OS client acquire now successful\n", id);
writeb(0, &hdr->cli_error_os);
@@ -472,7 +472,7 @@ spar_channel_client_release_os(void __iomem *ch, u8 *id)
{
struct channel_header __iomem *hdr = ch;
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state; come out of it */
pr_info("%s Channel OS client error state cleared\n", id);
writeb(0, &hdr->cli_error_os);
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 880d9f04c..5ccf81485 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -253,48 +253,6 @@ struct uiscmdrsp_scsi {
/* SCSI device version for no disk inquiry result */
#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
-/* Windows and Linux want different things for a non-existent lun. So, we'll let
- * caller pass in the peripheral qualifier and type.
- * NOTE:[4] SCSI returns (n-4); so we return length-1-4 or length-5.
- */
-
-#define SET_NO_DISK_INQUIRY_RESULT(buf, len, lun, lun0notpresent, notpresent) \
- do { \
- memset(buf, 0, \
- MINNUM(len, \
- (unsigned int)NO_DISK_INQUIRY_RESULT_LEN)); \
- buf[2] = (u8)SCSI_SPC2_VER; \
- if (lun == 0) { \
- buf[0] = (u8)lun0notpresent; \
- buf[3] = (u8)DEV_HISUPPORT; \
- } else \
- buf[0] = (u8)notpresent; \
- buf[4] = (u8)( \
- MINNUM(len, \
- (unsigned int)NO_DISK_INQUIRY_RESULT_LEN) - 5);\
- if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
- buf[8] = 'D'; \
- buf[9] = 'E'; \
- buf[10] = 'L'; \
- buf[11] = 'L'; \
- buf[16] = 'P'; \
- buf[17] = 'S'; \
- buf[18] = 'E'; \
- buf[19] = 'U'; \
- buf[20] = 'D'; \
- buf[21] = 'O'; \
- buf[22] = ' '; \
- buf[23] = 'D'; \
- buf[24] = 'E'; \
- buf[25] = 'V'; \
- buf[26] = 'I'; \
- buf[27] = 'C'; \
- buf[28] = 'E'; \
- buf[30] = ' '; \
- buf[31] = '.'; \
- } \
- } while (0)
-
/* Struct & Defines to support sense information. */
/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 2a64a9ce0..9baf1ec70 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -61,54 +61,55 @@ struct visor_channeltype_descriptor {
const char *name;
};
-/** Information provided by each visor driver when it registers with the
- * visorbus driver.
+/**
+ * struct visor_driver - Information provided by each visor driver when it
+ * registers with the visorbus driver.
+ * @name: Name of the visor driver.
+ * @version: The numbered version of the driver (x.x.xxx).
+ * @vertag: A human readable version string.
+ * @owner: The module owner.
+ * @channel_types: Types of channels handled by this driver, ending with
+ * a zero GUID. Our specialized BUS.match() method knows
+ * about this list, and uses it to determine whether this
+ * driver will in fact handle a new device that it has
+ * detected.
+ * @probe: Called when a new device comes online, by our probe()
+ * function specified by driver.probe() (triggered
+ * ultimately by some call to driver_register(),
+ * bus_add_driver(), or driver_attach()).
+ * @remove: Called when a new device is removed, by our remove()
+ * function specified by driver.remove() (triggered
+ * ultimately by some call to device_release_driver()).
+ * @channel_interrupt: Called periodically, whenever there is a possiblity
+ * that "something interesting" may have happened to the
+ * channel.
+ * @pause: Called to initiate a change of the device's state. If
+ * the return valu`e is < 0, there was an error and the
+ * state transition will NOT occur. If the return value
+ * is >= 0, then the state transition was INITIATED
+ * successfully, and complete_func() will be called (or
+ * was just called) with the final status when either the
+ * state transition fails or completes successfully.
+ * @resume: Behaves similar to pause.
+ * @driver: Private reference to the device driver. For use by bus
+ * driver only.
+ * @version_attr: Private version field. For use by bus driver only.
*/
struct visor_driver {
const char *name;
const char *version;
const char *vertag;
- const char *build_date;
- const char *build_time;
struct module *owner;
-
- /** Types of channels handled by this driver, ending with 0 GUID.
- * Our specialized BUS.match() method knows about this list, and
- * uses it to determine whether this driver will in fact handle a
- * new device that it has detected.
- */
struct visor_channeltype_descriptor *channel_types;
-
- /** Called when a new device comes online, by our probe() function
- * specified by driver.probe() (triggered ultimately by some call
- * to driver_register() / bus_add_driver() / driver_attach()).
- */
int (*probe)(struct visor_device *dev);
-
- /** Called when a new device is removed, by our remove() function
- * specified by driver.remove() (triggered ultimately by some call
- * to device_release_driver()).
- */
void (*remove)(struct visor_device *dev);
-
- /** Called periodically, whenever there is a possibility that
- * "something interesting" may have happened to the channel state.
- */
void (*channel_interrupt)(struct visor_device *dev);
-
- /** Called to initiate a change of the device's state. If the return
- * valu`e is < 0, there was an error and the state transition will NOT
- * occur. If the return value is >= 0, then the state transition was
- * INITIATED successfully, and complete_func() will be called (or was
- * just called) with the final status when either the state transition
- * fails or completes successfully.
- */
int (*pause)(struct visor_device *dev,
visorbus_state_complete_func complete_func);
int (*resume)(struct visor_device *dev,
visorbus_state_complete_func complete_func);
- /** These fields are for private use by the bus driver only. */
+ /* These fields are for private use by the bus driver only. */
struct device_driver driver;
struct driver_attribute version_attr;
};
@@ -116,48 +117,58 @@ struct visor_driver {
#define to_visor_driver(x) ((x) ? \
(container_of(x, struct visor_driver, driver)) : (NULL))
-/** A device type for things "plugged" into the visorbus bus */
+/**
+ * struct visor_device - A device type for things "plugged" into the visorbus
+ * bus
+ * visorchannel: Points to the channel that the device is
+ * associated with.
+ * channel_type_guid: Identifies the channel type to the bus driver.
+ * device: Device struct meant for use by the bus driver
+ * only.
+ * list_all: Used by the bus driver to enumerate devices.
+ * periodic_work: Device work queue. Private use by bus driver
+ * only.
+ * being_removed: Indicates that the device is being removed from
+ * the bus. Private bus driver use only.
+ * visordriver_callback_lock: Used by the bus driver to lock when handling
+ * channel events.
+ * pausing: Indicates that a change towards a paused state.
+ * is in progress. Only modified by the bus driver.
+ * resuming: Indicates that a change towards a running state
+ * is in progress. Only modified by the bus driver.
+ * chipset_bus_no: Private field used by the bus driver.
+ * chipset_dev_no: Private field used the bus driver.
+ * state: Used to indicate the current state of the
+ * device.
+ * inst: Unique GUID for this instance of the device.
+ * name: Name of the device.
+ * pending_msg_hdr: For private use by bus driver to respond to
+ * hypervisor requests.
+ * vbus_hdr_info: A pointer to header info. Private use by bus
+ * driver.
+ * partition_uuid: Indicates client partion id. This should be the
+ * same across all visor_devices in the current
+ * guest. Private use by bus driver only.
+ */
struct visor_device {
- /** visor driver can use the visorchannel member with the functions
- * defined in visorchannel.h to access the channel
- */
struct visorchannel *visorchannel;
uuid_le channel_type_guid;
- u64 channel_bytes;
-
- /** These fields are for private use by the bus driver only.
- * A notable exception is that the visor driver can use
- * visor_get_drvdata() and visor_set_drvdata() to retrieve or stash
- * private visor driver specific data within the device member.
- */
+ /* These fields are for private use by the bus driver only. */
struct device device;
struct list_head list_all;
struct periodic_work *periodic_work;
bool being_removed;
- bool responded_to_device_create;
- struct kobject kobjdevmajorminor; /* visorbus<x>/dev<y>/devmajorminor/*/
- struct {
- int major, minor;
- void *attr; /* private use by devmajorminor_attr.c you can
- * change this constant to whatever you want
- */
- } devnodes[5];
- /* the code will detect and behave appropriately) */
struct semaphore visordriver_callback_lock;
bool pausing;
bool resuming;
u32 chipset_bus_no;
u32 chipset_dev_no;
struct visorchipset_state state;
- uuid_le type;
uuid_le inst;
u8 *name;
- u8 *description;
struct controlvm_message_header *pending_msg_hdr;
void *vbus_hdr_info;
- u32 switch_no;
- u32 internal_port_no;
uuid_le partition_uuid;
};
@@ -174,8 +185,6 @@ int visorbus_write_channel(struct visor_device *dev,
unsigned long nbytes);
int visorbus_clear_channel(struct visor_device *dev,
unsigned long offset, u8 ch, unsigned long nbytes);
-int visorbus_registerdevnode(struct visor_device *dev,
- const char *name, int major, int minor);
void visorbus_enable_channel_interrupts(struct visor_device *dev);
void visorbus_disable_channel_interrupts(struct visor_device *dev);
#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 533bb5b3d..3a147dbbd 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -33,6 +33,9 @@ static int visorbus_forcenomatch;
static int visorbus_debugref;
#define SERIALLOOPBACKCHANADDR (100 * 1024 * 1024)
+/* Display string that is guaranteed to be no longer the 99 characters*/
+#define LINESIZE 99
+
#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
#define POLLJIFFIES_TESTWORK 100
#define POLLJIFFIES_NORMALCHANNEL 10
@@ -182,7 +185,6 @@ static int
visorbus_match(struct device *xdev, struct device_driver *xdrv)
{
uuid_le channel_type;
- int rc = 0;
int i;
struct visor_device *dev;
struct visor_driver *drv;
@@ -190,26 +192,23 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
dev = to_visor_device(xdev);
drv = to_visor_driver(xdrv);
channel_type = visorchannel_get_uuid(dev->visorchannel);
- if (visorbus_forcematch) {
- rc = 1;
- goto away;
- }
- if (visorbus_forcenomatch)
- goto away;
+ if (visorbus_forcematch)
+ return 1;
+ if (visorbus_forcenomatch)
+ return 0;
if (!drv->channel_types)
- goto away;
+ return 0;
+
for (i = 0;
(uuid_le_cmp(drv->channel_types[i].guid, NULL_UUID_LE) != 0) ||
(drv->channel_types[i].name);
i++)
if (uuid_le_cmp(drv->channel_types[i].guid,
- channel_type) == 0) {
- rc = i + 1;
- goto away;
- }
-away:
- return rc;
+ channel_type) == 0)
+ return i + 1;
+
+ return 0;
}
/** This is called when device_unregister() is called for the bus device
@@ -243,180 +242,6 @@ visorbus_release_device(struct device *xdev)
kfree(dev);
}
-/* Implement publishing of device node attributes under:
- *
- * /sys/bus/visorbus<x>/dev<y>/devmajorminor
- *
- */
-
-#define to_devmajorminor_attr(_attr) \
- container_of(_attr, struct devmajorminor_attribute, attr)
-#define to_visor_device_from_kobjdevmajorminor(obj) \
- container_of(obj, struct visor_device, kobjdevmajorminor)
-
-struct devmajorminor_attribute {
- struct attribute attr;
- int slot;
- ssize_t (*show)(struct visor_device *, int slot, char *buf);
- ssize_t (*store)(struct visor_device *, int slot, const char *buf,
- size_t count);
-};
-
-static ssize_t DEVMAJORMINOR_ATTR(struct visor_device *dev, int slot, char *buf)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
-
- if (slot < 0 || slot >= maxdevnodes)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d:%d\n",
- dev->devnodes[slot].major, dev->devnodes[slot].minor);
-}
-
-static ssize_t
-devmajorminor_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
- struct devmajorminor_attribute *devmajorminor_attr =
- to_devmajorminor_attr(attr);
- struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
- ssize_t ret = 0;
-
- if (devmajorminor_attr->show)
- ret = devmajorminor_attr->show(dev,
- devmajorminor_attr->slot, buf);
- return ret;
-}
-
-static ssize_t
-devmajorminor_attr_store(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
-{
- struct devmajorminor_attribute *devmajorminor_attr =
- to_devmajorminor_attr(attr);
- struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
- ssize_t ret = 0;
-
- if (devmajorminor_attr->store)
- ret = devmajorminor_attr->store(dev,
- devmajorminor_attr->slot,
- buf, count);
- return ret;
-}
-
-static int register_devmajorminor_attributes(struct visor_device *dev);
-
-static int
-devmajorminor_create_file(struct visor_device *dev, const char *name,
- int major, int minor)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
- struct devmajorminor_attribute *myattr = NULL;
- int x = -1, rc = 0, slot = -1;
-
- register_devmajorminor_attributes(dev);
- for (slot = 0; slot < maxdevnodes; slot++)
- if (!dev->devnodes[slot].attr)
- break;
- if (slot == maxdevnodes) {
- rc = -ENOMEM;
- goto away;
- }
- myattr = kzalloc(sizeof(*myattr), GFP_KERNEL);
- if (!myattr) {
- rc = -ENOMEM;
- goto away;
- }
- myattr->show = DEVMAJORMINOR_ATTR;
- myattr->store = NULL;
- myattr->slot = slot;
- myattr->attr.name = name;
- myattr->attr.mode = S_IRUGO;
- dev->devnodes[slot].attr = myattr;
- dev->devnodes[slot].major = major;
- dev->devnodes[slot].minor = minor;
- x = sysfs_create_file(&dev->kobjdevmajorminor, &myattr->attr);
- if (x < 0) {
- rc = x;
- goto away;
- }
- kobject_uevent(&dev->device.kobj, KOBJ_ONLINE);
-away:
- if (rc < 0) {
- kfree(myattr);
- myattr = NULL;
- dev->devnodes[slot].attr = NULL;
- }
- return rc;
-}
-
-static void
-devmajorminor_remove_file(struct visor_device *dev, int slot)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
- struct devmajorminor_attribute *myattr = NULL;
-
- if (slot < 0 || slot >= maxdevnodes)
- return;
- myattr = (struct devmajorminor_attribute *)(dev->devnodes[slot].attr);
- if (!myattr)
- return;
- sysfs_remove_file(&dev->kobjdevmajorminor, &myattr->attr);
- kobject_uevent(&dev->device.kobj, KOBJ_OFFLINE);
- dev->devnodes[slot].attr = NULL;
- kfree(myattr);
-}
-
-static void
-devmajorminor_remove_all_files(struct visor_device *dev)
-{
- int i = 0;
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
-
- for (i = 0; i < maxdevnodes; i++)
- devmajorminor_remove_file(dev, i);
-}
-
-static const struct sysfs_ops devmajorminor_sysfs_ops = {
- .show = devmajorminor_attr_show,
- .store = devmajorminor_attr_store,
-};
-
-static struct kobj_type devmajorminor_kobj_type = {
- .sysfs_ops = &devmajorminor_sysfs_ops
-};
-
-static int
-register_devmajorminor_attributes(struct visor_device *dev)
-{
- int rc = 0, x = 0;
-
- if (dev->kobjdevmajorminor.parent)
- goto away; /* already registered */
- x = kobject_init_and_add(&dev->kobjdevmajorminor,
- &devmajorminor_kobj_type, &dev->device.kobj,
- "devmajorminor");
- if (x < 0) {
- rc = x;
- goto away;
- }
-
- kobject_uevent(&dev->kobjdevmajorminor, KOBJ_ADD);
-
-away:
- return rc;
-}
-
-static void
-unregister_devmajorminor_attributes(struct visor_device *dev)
-{
- if (!dev->kobjdevmajorminor.parent)
- return; /* already unregistered */
- devmajorminor_remove_all_files(dev);
-
- kobject_del(&dev->kobjdevmajorminor);
- kobject_put(&dev->kobjdevmajorminor);
- dev->kobjdevmajorminor.parent = NULL;
-}
-
/* begin implementation of specific channel attributes to appear under
* /sys/bus/visorbus<x>/dev<y>/channel
*/
@@ -427,7 +252,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
if (!vdev->visorchannel)
return 0;
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_physaddr(vdev->visorchannel));
}
@@ -449,7 +274,7 @@ static ssize_t clientpartition_show(struct device *dev,
if (!vdev->visorchannel)
return 0;
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_clientpartition(vdev->visorchannel));
}
@@ -457,24 +282,24 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
- char s[99];
+ char typeid[LINESIZE];
if (!vdev->visorchannel)
return 0;
return snprintf(buf, PAGE_SIZE, "%s\n",
- visorchannel_id(vdev->visorchannel, s));
+ visorchannel_id(vdev->visorchannel, typeid));
}
static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
- char s[99];
+ char zoneid[LINESIZE];
if (!vdev->visorchannel)
return 0;
return snprintf(buf, PAGE_SIZE, "%s\n",
- visorchannel_zoneid(vdev->visorchannel, s));
+ visorchannel_zoneid(vdev->visorchannel, zoneid));
}
static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
@@ -541,7 +366,7 @@ static ssize_t partition_handle_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", handle);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
}
static ssize_t partition_guid_show(struct device *dev,
@@ -566,7 +391,7 @@ static ssize_t channel_addr_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", addr);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
}
static ssize_t channel_bytes_show(struct device *dev,
@@ -575,7 +400,7 @@ static ssize_t channel_bytes_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", nbytes);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
}
static ssize_t channel_id_show(struct device *dev,
@@ -598,9 +423,9 @@ static ssize_t client_bus_info_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
struct visorchannel *channel = vdev->visorchannel;
- int i, x, remain = PAGE_SIZE;
+ int i, shift, remain = PAGE_SIZE;
unsigned long off;
- char *p = buf;
+ char *pos = buf;
u8 *partition_name;
struct ultra_vbus_deviceinfo dev_info;
@@ -608,44 +433,45 @@ static ssize_t client_bus_info_show(struct device *dev,
if (channel) {
if (vdev->name)
partition_name = vdev->name;
- x = snprintf(p, remain,
- "Client device / client driver info for %s partition (vbus #%d):\n",
- partition_name, vdev->chipset_dev_no);
- p += x;
- remain -= x;
- x = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- chp_info),
- &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string(&dev_info, p,
- remain, -1);
- p += x;
- remain -= x;
+ shift = snprintf(pos, remain,
+ "Client device / client driver info for %s eartition (vbus #%d):\n",
+ partition_name, vdev->chipset_dev_no);
+ pos += shift;
+ remain -= shift;
+ shift = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ chp_info),
+ &dev_info, sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string(&dev_info, pos,
+ remain, -1);
+ pos += shift;
+ remain -= shift;
}
- x = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- bus_info),
- &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string(&dev_info, p,
- remain, -1);
- p += x;
- remain -= x;
+ shift = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ bus_info),
+ &dev_info, sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string(&dev_info, pos,
+ remain, -1);
+ pos += shift;
+ remain -= shift;
}
off = offsetof(struct spar_vbus_channel_protocol, dev_info);
i = 0;
while (off + sizeof(dev_info) <=
visorchannel_get_nbytes(channel)) {
- x = visorchannel_read(channel,
- off, &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string
- (&dev_info, p, remain, i);
- p += x;
- remain -= x;
+ shift = visorchannel_read(channel,
+ off, &dev_info,
+ sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string
+ (&dev_info, pos, remain, i);
+ pos += shift;
+ remain -= shift;
}
off += sizeof(dev_info);
i++;
@@ -752,36 +578,28 @@ dev_stop_periodic_work(struct visor_device *dev)
static int
visordriver_probe_device(struct device *xdev)
{
- int rc;
+ int res;
struct visor_driver *drv;
struct visor_device *dev;
drv = to_visor_driver(xdev->driver);
dev = to_visor_device(xdev);
+
+ if (!drv->probe)
+ return -ENODEV;
+
down(&dev->visordriver_callback_lock);
dev->being_removed = false;
- /*
- * ensure that the dev->being_removed flag is cleared before
- * we start the probe
- */
- wmb();
- get_device(&dev->device);
- if (!drv->probe) {
- up(&dev->visordriver_callback_lock);
- rc = -ENODEV;
- goto away;
+
+ res = drv->probe(dev);
+ if (res >= 0) {
+ /* success: reference kept via unmatched get_device() */
+ get_device(&dev->device);
+ fix_vbus_dev_info(dev);
}
- rc = drv->probe(dev);
- if (rc < 0)
- goto away;
- fix_vbus_dev_info(dev);
up(&dev->visordriver_callback_lock);
- rc = 0;
-away:
- if (rc != 0)
- put_device(&dev->device);
- return rc;
+ return res;
}
/** This is called when device_unregister() is called for each child device
@@ -798,21 +616,12 @@ visordriver_remove_device(struct device *xdev)
drv = to_visor_driver(xdev->driver);
down(&dev->visordriver_callback_lock);
dev->being_removed = true;
- /*
- * ensure that the dev->being_removed flag is set before we start the
- * actual removal
- */
- wmb();
- if (drv) {
- if (drv->remove)
- drv->remove(dev);
- }
+ if (drv->remove)
+ drv->remove(dev);
up(&dev->visordriver_callback_lock);
dev_stop_periodic_work(dev);
- devmajorminor_remove_all_files(dev);
put_device(&dev->device);
-
return 0;
}
@@ -928,14 +737,6 @@ visorbus_clear_channel(struct visor_device *dev, unsigned long offset, u8 ch,
}
EXPORT_SYMBOL_GPL(visorbus_clear_channel);
-int
-visorbus_registerdevnode(struct visor_device *dev,
- const char *name, int major, int minor)
-{
- return devmajorminor_create_file(dev, name, major, minor);
-}
-EXPORT_SYMBOL_GPL(visorbus_registerdevnode);
-
/** We don't really have a real interrupt, so for now we just call the
* interrupt function periodically...
*/
@@ -970,7 +771,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
static int
create_visor_device(struct visor_device *dev)
{
- int rc;
+ int err;
u32 chipset_bus_no = dev->chipset_bus_no;
u32 chipset_dev_no = dev->chipset_dev_no;
@@ -992,8 +793,8 @@ create_visor_device(struct visor_device *dev)
if (!dev->periodic_work) {
POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
DIAG_SEVERITY_ERR);
- rc = -EINVAL;
- goto away;
+ err = -EINVAL;
+ goto err_put;
}
/* bus_id must be a unique name with respect to this bus TYPE
@@ -1019,36 +820,25 @@ create_visor_device(struct visor_device *dev)
* claim the device. The device will be linked onto
* bus_type.klist_devices regardless (use bus_for_each_dev).
*/
- rc = device_add(&dev->device);
- if (rc < 0) {
+ err = device_add(&dev->device);
+ if (err < 0) {
POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
DIAG_SEVERITY_ERR);
- goto away;
- }
-
- rc = register_devmajorminor_attributes(dev);
- if (rc < 0) {
- POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no,
- DIAG_SEVERITY_ERR);
- goto away_unregister;
+ goto err_put;
}
list_add_tail(&dev->list_all, &list_all_device_instances);
- return 0;
-
-away_unregister:
- device_unregister(&dev->device);
+ return 0; /* success: reference kept via unmatched get_device() */
-away:
+err_put:
put_device(&dev->device);
- return rc;
+ return err;
}
static void
remove_visor_device(struct visor_device *dev)
{
list_del(&dev->list_all);
- unregister_devmajorminor_attributes(dev);
put_device(&dev->device);
device_unregister(&dev->device);
}
@@ -1477,24 +1267,24 @@ struct channel_size_info {
int
visorbus_init(void)
{
- int rc = 0;
+ int err;
- POSTCODE_LINUX_3(DRIVER_ENTRY_PC, rc, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
bus_device_info_init(&clientbus_driverinfo,
"clientbus", "visorbus",
VERSION, NULL);
- rc = create_bus_type();
- if (rc < 0) {
+ err = create_bus_type();
+ if (err < 0) {
POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
- goto away;
+ goto error;
}
periodic_dev_workqueue = create_singlethread_workqueue("visorbus_dev");
if (!periodic_dev_workqueue) {
POSTCODE_LINUX_2(CREATE_WORKQUEUE_PC, DIAG_SEVERITY_ERR);
- rc = -ENOMEM;
- goto away;
+ err = -ENOMEM;
+ goto error;
}
/* This enables us to receive notifications when devices appear for
@@ -1504,13 +1294,11 @@ visorbus_init(void)
&chipset_responders,
&chipset_driverinfo);
- rc = 0;
+ return 0;
-away:
- if (rc)
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
- POSTCODE_SEVERITY_ERR);
- return rc;
+error:
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ return err;
}
void
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index b68a904ac..43373582c 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -40,7 +40,6 @@ struct visorchannel {
bool requested;
struct channel_header chan_hdr;
uuid_le guid;
- ulong size;
bool needs_lock; /* channel creator knows if more than one */
/* thread will be inserting or removing */
spinlock_t insert_lock; /* protect head writes in chan_hdr */
@@ -134,8 +133,6 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
}
channel->nbytes = channel_bytes;
-
- channel->size = channel_bytes;
channel->guid = guid;
return channel;
@@ -186,7 +183,7 @@ EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
ulong
visorchannel_get_nbytes(struct visorchannel *channel)
{
- return channel->size;
+ return channel->nbytes;
}
EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 5fbda7b21..5ba5936e2 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -59,14 +59,13 @@
*/
static int visorchipset_major;
static int visorchipset_visorbusregwait = 1; /* default is on */
-static int visorchipset_holdchipsetready;
static unsigned long controlvm_payload_bytes_buffered;
static u32 dump_vhba_bus;
static int
visorchipset_open(struct inode *inode, struct file *file)
{
- unsigned minor_number = iminor(inode);
+ unsigned int minor_number = iminor(inode);
if (minor_number)
return -ENODEV;
@@ -90,9 +89,6 @@ static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
static unsigned long most_recent_message_jiffies;
static int visorbusregistered;
-#define MAX_CHIPSET_EVENTS 2
-static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
-
struct parser_context {
unsigned long allocbytes;
unsigned long param_bytes;
@@ -107,7 +103,6 @@ static DEFINE_SEMAPHORE(notifier_lock);
static struct cdev file_cdev;
static struct visorchannel **file_controlvm_channel;
-static struct controlvm_message_header g_chipset_msg_hdr;
static struct controlvm_message_packet g_devicechangestate_packet;
static LIST_HEAD(bus_info_list);
@@ -156,8 +151,6 @@ struct putfile_active_buffer {
/* a payload from a controlvm message, containing a file data buffer */
struct parser_context *parser_ctx;
/* points within data area of parser_ctx to next byte of data */
- u8 *pnext;
- /* # bytes left from <pnext> to the end of this data buffer */
size_t bytes_remaining;
};
@@ -171,14 +164,10 @@ struct putfile_request {
/* header from original TransmitFile request */
struct controlvm_message_header controlvm_header;
- u64 file_request_number; /* from original TransmitFile request */
/* link to next struct putfile_request */
struct list_head next_putfile_request;
- /* most-recent sequence number supplied via a controlvm message */
- u64 data_sequence_number;
-
/* head of putfile_buffer_entry list, which describes the data to be
* supplied as putfile data;
* - this list is added to when controlvm messages come in that supply
@@ -274,11 +263,6 @@ static ssize_t remaining_steps_store(struct device *dev,
const char *buf, size_t count);
static DEVICE_ATTR_RW(remaining_steps);
-static ssize_t chipsetready_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_WO(chipsetready);
-
static ssize_t devicedisabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
@@ -303,16 +287,6 @@ static struct attribute_group visorchipset_install_group = {
.attrs = visorchipset_install_attrs
};
-static struct attribute *visorchipset_guest_attrs[] = {
- &dev_attr_chipsetready.attr,
- NULL
-};
-
-static struct attribute_group visorchipset_guest_group = {
- .name = "guest",
- .attrs = visorchipset_guest_attrs
-};
-
static struct attribute *visorchipset_parahotplug_attrs[] = {
&dev_attr_devicedisabled.attr,
&dev_attr_deviceenabled.attr,
@@ -326,7 +300,6 @@ static struct attribute_group visorchipset_parahotplug_group = {
static const struct attribute_group *visorchipset_dev_groups[] = {
&visorchipset_install_group,
- &visorchipset_guest_group,
&visorchipset_parahotplug_group,
NULL
};
@@ -359,8 +332,7 @@ static struct parser_context *
parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
{
int allocbytes = sizeof(struct parser_context) + bytes;
- struct parser_context *rc = NULL;
- struct parser_context *ctx = NULL;
+ struct parser_context *ctx;
if (retry)
*retry = false;
@@ -374,15 +346,13 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
> MAX_CONTROLVM_PAYLOAD_BYTES) {
if (retry)
*retry = true;
- rc = NULL;
- goto cleanup;
+ return NULL;
}
ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
if (!ctx) {
if (retry)
*retry = true;
- rc = NULL;
- goto cleanup;
+ return NULL;
}
ctx->allocbytes = allocbytes;
@@ -393,35 +363,27 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
if (local) {
void *p;
- if (addr > virt_to_phys(high_memory - 1)) {
- rc = NULL;
- goto cleanup;
- }
+ if (addr > virt_to_phys(high_memory - 1))
+ goto err_finish_ctx;
p = __va((unsigned long)(addr));
memcpy(ctx->data, p, bytes);
} else {
void *mapping = memremap(addr, bytes, MEMREMAP_WB);
- if (!mapping) {
- rc = NULL;
- goto cleanup;
- }
+ if (!mapping)
+ goto err_finish_ctx;
memcpy(ctx->data, mapping, bytes);
memunmap(mapping);
}
ctx->byte_stream = true;
- rc = ctx;
-cleanup:
- if (rc) {
- controlvm_payload_bytes_buffered += ctx->param_bytes;
- } else {
- if (ctx) {
- parser_done(ctx);
- ctx = NULL;
- }
- }
- return rc;
+ controlvm_payload_bytes_buffered += ctx->param_bytes;
+
+ return ctx;
+
+err_finish_ctx:
+ parser_done(ctx);
+ return NULL;
}
static uuid_le
@@ -523,7 +485,7 @@ static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u8 tool_action;
+ u8 tool_action = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -541,10 +503,11 @@ static ssize_t toolaction_store(struct device *dev,
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- tool_action),
- &tool_action, sizeof(u8));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action),
+ &tool_action, sizeof(u8));
if (ret)
return ret;
@@ -576,10 +539,11 @@ static ssize_t boottotool_store(struct device *dev,
return -EINVAL;
efi_spar_indication.boot_to_tool = val;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- efi_spar_ind), &(efi_spar_indication),
- sizeof(struct efi_spar_indication));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind), &(efi_spar_indication),
+ sizeof(struct efi_spar_indication));
if (ret)
return ret;
@@ -589,7 +553,7 @@ static ssize_t boottotool_store(struct device *dev,
static ssize_t error_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u32 error;
+ u32 error = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -607,10 +571,11 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &error))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_error),
- &error, sizeof(u32));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
+ &error, sizeof(u32));
if (ret)
return ret;
return count;
@@ -619,12 +584,13 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u32 text_id;
+ u32 text_id = 0;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
+ visorchannel_read
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
}
@@ -637,10 +603,11 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
if (ret)
return ret;
return count;
@@ -649,7 +616,7 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
static ssize_t remaining_steps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u16 remaining_steps;
+ u16 remaining_steps = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -668,10 +635,11 @@ static ssize_t remaining_steps_store(struct device *dev,
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
+ &remaining_steps, sizeof(u16));
if (ret)
return ret;
return count;
@@ -717,26 +685,6 @@ struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
}
EXPORT_SYMBOL(visorbus_get_device_by_id);
-static u8
-check_chipset_events(void)
-{
- int i;
- u8 send_msg = 1;
- /* Check events to determine if response should be sent */
- for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
- send_msg &= chipset_events[i];
- return send_msg;
-}
-
-static void
-clear_chipset_events(void)
-{
- int i;
- /* Clear chipset_events */
- for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
- chipset_events[i] = 0;
-}
-
void
visorchipset_register_busdev(
struct visorchipset_busdev_notifiers *notifiers,
@@ -772,7 +720,7 @@ chipset_init(struct controlvm_message *inmsg)
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (chipset_inited) {
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_respond;
}
chipset_inited = 1;
POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
@@ -789,7 +737,7 @@ chipset_init(struct controlvm_message *inmsg)
*/
features |= ULTRA_CHIPSET_FEATURE_REPLY;
-cleanup:
+out_respond:
if (inmsg->hdr.flags.response_expected)
controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
}
@@ -970,28 +918,31 @@ bus_epilog(struct visor_device *bus_info,
u32 cmd, struct controlvm_message_header *msg_hdr,
int response, bool need_response)
{
- bool notified = false;
struct controlvm_message_header *pmsg_hdr = NULL;
+ down(&notifier_lock);
+
if (!bus_info) {
/* relying on a valid passed in response code */
/* be lazy and re-use msg_hdr for this failure, is this ok?? */
pmsg_hdr = msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (bus_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = bus_info->pending_msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (need_response) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
- response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto away;
+ POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
+ bus_info->chipset_bus_no,
+ POSTCODE_SEVERITY_ERR);
+ goto out_unlock;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -999,37 +950,27 @@ bus_epilog(struct visor_device *bus_info,
bus_info->pending_msg_hdr = pmsg_hdr;
}
- down(&notifier_lock);
if (response == CONTROLVM_RESP_SUCCESS) {
switch (cmd) {
case CONTROLVM_BUS_CREATE:
if (busdev_notifiers.bus_create) {
(*busdev_notifiers.bus_create) (bus_info);
- notified = true;
+ goto out_unlock;
}
break;
case CONTROLVM_BUS_DESTROY:
if (busdev_notifiers.bus_destroy) {
(*busdev_notifiers.bus_destroy) (bus_info);
- notified = true;
+ goto out_unlock;
}
break;
}
}
-away:
- if (notified)
- /* The callback function just called above is responsible
- * for calling the appropriate visorchipset_busdev_responders
- * function, which will call bus_responder()
- */
- ;
- else
- /*
- * Do not kfree(pmsg_hdr) as this is the failure path.
- * The success path ('notified') will call the responder
- * directly and kfree() there.
- */
- bus_responder(cmd, pmsg_hdr, response);
+
+out_respond_and_unlock:
+ bus_responder(cmd, pmsg_hdr, response);
+
+out_unlock:
up(&notifier_lock);
}
@@ -1040,30 +981,30 @@ device_epilog(struct visor_device *dev_info,
bool need_response, bool for_visorbus)
{
struct visorchipset_busdev_notifiers *notifiers;
- bool notified = false;
struct controlvm_message_header *pmsg_hdr = NULL;
notifiers = &busdev_notifiers;
+ down(&notifier_lock);
if (!dev_info) {
/* relying on a valid passed in response code */
/* be lazy and re-use msg_hdr for this failure, is this ok?? */
pmsg_hdr = msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (dev_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = dev_info->pending_msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (need_response) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto away;
+ goto out_respond_and_unlock;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -1071,13 +1012,12 @@ device_epilog(struct visor_device *dev_info,
dev_info->pending_msg_hdr = pmsg_hdr;
}
- down(&notifier_lock);
if (response >= 0) {
switch (cmd) {
case CONTROLVM_DEVICE_CREATE:
if (notifiers->device_create) {
(*notifiers->device_create) (dev_info);
- notified = true;
+ goto out_unlock;
}
break;
case CONTROLVM_DEVICE_CHANGESTATE:
@@ -1087,7 +1027,7 @@ device_epilog(struct visor_device *dev_info,
segment_state_running.operating) {
if (notifiers->device_resume) {
(*notifiers->device_resume) (dev_info);
- notified = true;
+ goto out_unlock;
}
}
/* ServerNotReady / ServerLost / SegmentStateStandby */
@@ -1099,32 +1039,23 @@ device_epilog(struct visor_device *dev_info,
*/
if (notifiers->device_pause) {
(*notifiers->device_pause) (dev_info);
- notified = true;
+ goto out_unlock;
}
}
break;
case CONTROLVM_DEVICE_DESTROY:
if (notifiers->device_destroy) {
(*notifiers->device_destroy) (dev_info);
- notified = true;
+ goto out_unlock;
}
break;
}
}
-away:
- if (notified)
- /* The callback function just called above is responsible
- * for calling the appropriate visorchipset_busdev_responders
- * function, which will call device_responder()
- */
- ;
- else
- /*
- * Do not kfree(pmsg_hdr) as this is the failure path.
- * The success path ('notified') will call the responder
- * directly and kfree() there.
- */
- device_responder(cmd, pmsg_hdr, response);
+
+out_respond_and_unlock:
+ device_responder(cmd, pmsg_hdr, response);
+
+out_unlock:
up(&notifier_lock);
}
@@ -1142,14 +1073,14 @@ bus_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_bus_epilog;
}
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto cleanup;
+ goto out_bus_epilog;
}
INIT_LIST_HEAD(&bus_info->list_all);
@@ -1169,7 +1100,7 @@ bus_create(struct controlvm_message *inmsg)
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
kfree(bus_info);
bus_info = NULL;
- goto cleanup;
+ goto out_bus_epilog;
}
bus_info->visorchannel = visorchannel;
if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
@@ -1179,7 +1110,7 @@ bus_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
-cleanup:
+out_bus_epilog:
bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
rc, inmsg->hdr.flags.response_expected == 1);
}
@@ -1231,8 +1162,9 @@ bus_configure(struct controlvm_message *inmsg,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
} else {
- visorchannel_set_clientpartition(bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
+ visorchannel_set_clientpartition
+ (bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
bus_info->partition_uuid = parser_id_get(parser_ctx);
parser_param_start(parser_ctx, PARSERSTRING_NAME);
bus_info->name = parser_string_get(parser_ctx);
@@ -1260,14 +1192,14 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- goto cleanup;
+ goto out_respond;
}
if (bus_info->state.created == 0) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- goto cleanup;
+ goto out_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
@@ -1275,7 +1207,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
@@ -1283,7 +1215,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto cleanup;
+ goto out_respond;
}
dev_info->chipset_bus_no = bus_no;
@@ -1308,7 +1240,7 @@ my_device_create(struct controlvm_message *inmsg)
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
kfree(dev_info);
dev_info = NULL;
- goto cleanup;
+ goto out_respond;
}
dev_info->visorchannel = visorchannel;
dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -1318,7 +1250,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
-cleanup:
+out_respond:
device_epilog(dev_info, segment_state_running,
CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
inmsg->hdr.flags.response_expected == 1, 1);
@@ -1382,35 +1314,23 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
struct visor_controlvm_payload_info *info)
{
u8 *payload = NULL;
- int rc = CONTROLVM_RESP_SUCCESS;
- if (!info) {
- rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- goto cleanup;
- }
+ if (!info)
+ return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+
memset(info, 0, sizeof(struct visor_controlvm_payload_info));
- if ((offset == 0) || (bytes == 0)) {
- rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- goto cleanup;
- }
+ if ((offset == 0) || (bytes == 0))
+ return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+
payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
- if (!payload) {
- rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
- goto cleanup;
- }
+ if (!payload)
+ return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
info->offset = offset;
info->bytes = bytes;
info->ptr = payload;
-cleanup:
- if (rc < 0) {
- if (payload) {
- memunmap(payload);
- payload = NULL;
- }
- }
- return rc;
+ return CONTROLVM_RESP_SUCCESS;
}
static void
@@ -1490,14 +1410,8 @@ chipset_ready(struct controlvm_message_header *msg_hdr)
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
+ if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, rc);
- if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
- /* Send CHIPSET_READY response when all modules have been loaded
- * and disks mounted for the partition
- */
- g_chipset_msg_hdr = *msg_hdr;
- }
}
static void
@@ -1726,9 +1640,10 @@ parahotplug_process_message(struct controlvm_message *inmsg)
* initialization.
*/
parahotplug_request_kickoff(req);
- controlvm_respond_physdev_changestate(&inmsg->hdr,
- CONTROLVM_RESP_SUCCESS,
- inmsg->cmd.device_change_state.state);
+ controlvm_respond_physdev_changestate
+ (&inmsg->hdr,
+ CONTROLVM_RESP_SUCCESS,
+ inmsg->cmd.device_change_state.state);
parahotplug_request_destroy(req);
} else {
/* For disable messages, add the request to the
@@ -1840,8 +1755,9 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
break;
default:
if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr,
- -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
+ controlvm_respond
+ (&inmsg.hdr,
+ -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
break;
}
@@ -1885,31 +1801,11 @@ controlvm_periodic_work(struct work_struct *work)
struct controlvm_message inmsg;
bool got_command = false;
bool handle_command_failed = false;
- static u64 poll_count;
/* make sure visorbus server is registered for controlvm callbacks */
if (visorchipset_visorbusregwait && !visorbusregistered)
goto cleanup;
- poll_count++;
- if (poll_count >= 250)
- ; /* keep going */
- else
- goto cleanup;
-
- /* Check events to determine if response to CHIPSET_READY
- * should be sent
- */
- if (visorchipset_holdchipsetready &&
- (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
- if (check_chipset_events() == 1) {
- controlvm_respond(&g_chipset_msg_hdr, 0);
- clear_chipset_events();
- memset(&g_chipset_msg_hdr, 0,
- sizeof(struct controlvm_message_header));
- }
- }
-
while (visorchannel_signalremove(controlvm_channel,
CONTROLVM_QUEUE_RESPONSE,
&inmsg))
@@ -1979,8 +1875,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
u16 local_crash_msg_count;
/* make sure visorbus is registered for controlvm callbacks */
- if (visorchipset_visorbusregwait && !visorbusregistered)
- goto cleanup;
+ if (visorchipset_visorbusregwait && !visorbusregistered) {
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
+ return;
+ }
POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
@@ -2057,13 +1956,6 @@ setup_crash_devices_work_queue(struct work_struct *work)
return;
}
POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
- return;
-
-cleanup:
-
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
-
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
}
static void
@@ -2135,25 +2027,6 @@ device_resume_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-static ssize_t chipsetready_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- char msgtype[64];
-
- if (sscanf(buf, "%63s", msgtype) != 1)
- return -EINVAL;
-
- if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
- chipset_events[0] = 1;
- return count;
- } else if (!strcmp(msgtype, "MODULES_LOADED")) {
- chipset_events[1] = 1;
- return count;
- }
- return -EINVAL;
-}
-
/* The parahotplug/devicedisabled interface gets called by our support script
* when an SR-IOV device has been shut down. The ID is passed to the script
* and then passed back when the device has been removed.
@@ -2205,10 +2078,11 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
if (!*file_controlvm_channel)
return -ENXIO;
- visorchannel_read(*file_controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- gp_control_channel),
- &addr, sizeof(addr));
+ visorchannel_read
+ (*file_controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ gp_control_channel),
+ &addr, sizeof(addr));
if (!addr)
return -ENXIO;
@@ -2308,16 +2182,25 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
return 0;
}
+static void
+visorchipset_file_cleanup(dev_t major_dev)
+{
+ if (file_cdev.ops)
+ cdev_del(&file_cdev);
+ file_cdev.ops = NULL;
+ unregister_chrdev_region(major_dev, 1);
+}
+
static int
visorchipset_init(struct acpi_device *acpi_device)
{
- int rc = 0;
+ int err = -ENODEV;
u64 addr;
uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
addr = controlvm_get_channel_address();
if (!addr)
- return -ENODEV;
+ goto error;
memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
@@ -2325,24 +2208,19 @@ visorchipset_init(struct acpi_device *acpi_device)
controlvm_channel = visorchannel_create_with_lock(addr, 0,
GFP_KERNEL, uuid);
if (!controlvm_channel)
- return -ENODEV;
+ goto error;
+
if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
visorchannel_get_header(controlvm_channel))) {
initialize_controlvm_payload();
} else {
- visorchannel_destroy(controlvm_channel);
- controlvm_channel = NULL;
- return -ENODEV;
+ goto error_destroy_channel;
}
major_dev = MKDEV(visorchipset_major, 0);
- rc = visorchipset_file_init(major_dev, &controlvm_channel);
- if (rc < 0) {
- POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
- goto cleanup;
- }
-
- memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
+ err = visorchipset_file_init(major_dev, &controlvm_channel);
+ if (err < 0)
+ goto error_destroy_payload;
/* if booting in a crash kernel */
if (is_kdump_kernel())
@@ -2359,27 +2237,33 @@ visorchipset_init(struct acpi_device *acpi_device)
visorchipset_platform_device.dev.devt = major_dev;
if (platform_device_register(&visorchipset_platform_device) < 0) {
POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
- rc = -ENODEV;
- goto cleanup;
+ err = -ENODEV;
+ goto error_cancel_work;
}
POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
- rc = visorbus_init();
-cleanup:
- if (rc) {
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
- POSTCODE_SEVERITY_ERR);
- }
- return rc;
-}
+ err = visorbus_init();
+ if (err < 0)
+ goto error_unregister;
-static void
-visorchipset_file_cleanup(dev_t major_dev)
-{
- if (file_cdev.ops)
- cdev_del(&file_cdev);
- file_cdev.ops = NULL;
- unregister_chrdev_region(major_dev, 1);
+ return 0;
+
+error_unregister:
+ platform_device_unregister(&visorchipset_platform_device);
+
+error_cancel_work:
+ cancel_delayed_work_sync(&periodic_controlvm_work);
+ visorchipset_file_cleanup(major_dev);
+
+error_destroy_payload:
+ destroy_controlvm_payload_info(&controlvm_payload_info);
+
+error_destroy_channel:
+ visorchannel_destroy(controlvm_channel);
+
+error:
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ return err;
}
static int
@@ -2392,8 +2276,6 @@ visorchipset_exit(struct acpi_device *acpi_device)
cancel_delayed_work_sync(&periodic_controlvm_work);
destroy_controlvm_payload_info(&controlvm_payload_info);
- memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
-
visorchannel_destroy(controlvm_channel);
visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
@@ -2425,7 +2307,7 @@ static __init uint32_t visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
- if (cpu_has_hypervisor) {
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
/* check the ID */
cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
return (ebx == UNISYS_SPAR_ID_EBX) &&
@@ -2460,12 +2342,8 @@ module_param_named(major, visorchipset_major, int, S_IRUGO);
MODULE_PARM_DESC(visorchipset_major,
"major device number to use for the device node");
module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_visorbusreqwait,
+MODULE_PARM_DESC(visorchipset_visorbusregwait,
"1 to have the module wait for the visor bus to register");
-module_param_named(holdchipsetready, visorchipset_holdchipsetready,
- int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_holdchipsetready,
- "1 to hold response to CHIPSET_READY");
module_init(init_unisys);
module_exit(exit_unisys);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index e93bb1dbf..6a4570d10 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -52,6 +52,8 @@ static int visorhba_resume(struct visor_device *dev,
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
size_t len, loff_t *offset);
+static int set_no_disk_inquiry_result(unsigned char *buf,
+ size_t len, bool is_lun0);
static struct dentry *visorhba_debugfs_dir;
static const struct file_operations debugfs_info_fops = {
.read = info_debugfs_read,
@@ -83,12 +85,6 @@ static struct visor_driver visorhba_driver = {
MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
-struct visor_thread_info {
- struct task_struct *task;
- struct completion has_stopped;
- int id;
-};
-
struct visordisk_info {
u32 valid;
u32 channel, id, lun; /* Disk Path */
@@ -135,7 +131,7 @@ struct visorhba_devdata {
struct visordisk_info head;
unsigned int max_buff_len;
int devnum;
- struct visor_thread_info threadinfo;
+ struct task_struct *thread;
int thread_wait_ms;
};
@@ -152,28 +148,36 @@ static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
(iter->lun == match->lun))
/**
* visor_thread_start - starts a thread for the device
- * @thrinfo: The thread to start
* @threadfn: Function the thread starts
* @thrcontext: Context to pass to the thread, i.e. devdata
* @name: string describing name of thread
*
* Starts a thread for the device.
*
- * Return 0 on success;
+ * Return the task_struct * denoting the thread on success,
+ * or NULL on failure
*/
-static int visor_thread_start(struct visor_thread_info *thrinfo,
- int (*threadfn)(void *),
- void *thrcontext, char *name)
+static struct task_struct *visor_thread_start
+(int (*threadfn)(void *), void *thrcontext, char *name)
{
- /* used to stop the thread */
- init_completion(&thrinfo->has_stopped);
- thrinfo->task = kthread_run(threadfn, thrcontext, "%s", name);
- if (IS_ERR(thrinfo->task)) {
- thrinfo->id = 0;
- return PTR_ERR(thrinfo->task);
+ struct task_struct *task;
+
+ task = kthread_run(threadfn, thrcontext, "%s", name);
+ if (IS_ERR(task)) {
+ pr_err("visorbus failed to start thread\n");
+ return NULL;
}
- thrinfo->id = thrinfo->task->pid;
- return 0;
+ return task;
+}
+
+/**
+ * visor_thread_stop - stops the thread if it is running
+ */
+static void visor_thread_stop(struct task_struct *task)
+{
+ if (!task)
+ return; /* no thread running */
+ kthread_stop(task);
}
/**
@@ -231,16 +235,17 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata,
int del)
{
unsigned long flags;
- void *sent = NULL;
+ void *sent;
- if (del < MAX_PENDING_REQUESTS) {
- spin_lock_irqsave(&devdata->privlock, flags);
- sent = devdata->pending[del].sent;
+ if (del >= MAX_PENDING_REQUESTS)
+ return NULL;
- devdata->pending[del].cmdtype = 0;
- devdata->pending[del].sent = NULL;
- spin_unlock_irqrestore(&devdata->privlock, flags);
- }
+ spin_lock_irqsave(&devdata->privlock, flags);
+ sent = devdata->pending[del].sent;
+
+ devdata->pending[del].cmdtype = 0;
+ devdata->pending[del].sent = NULL;
+ spin_unlock_irqrestore(&devdata->privlock, flags);
return sent;
}
@@ -681,7 +686,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
/* Stop using the IOVM response queue (queue should be drained
* by the end)
*/
- kthread_stop(devdata->threadinfo.task);
+ visor_thread_stop(devdata->thread);
/* Fail commands that weren't completed */
spin_lock_irqsave(&devdata->privlock, flags);
@@ -772,6 +777,24 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
}
}
+static int set_no_disk_inquiry_result(unsigned char *buf,
+ size_t len, bool is_lun0)
+{
+ if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
+ return -EINVAL;
+ memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
+ buf[2] = SCSI_SPC2_VER;
+ if (is_lun0) {
+ buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
+ buf[3] = DEV_HISUPPORT;
+ } else {
+ buf[0] = DEV_NOT_CAPABLE;
+ }
+ buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
+ strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
+ return 0;
+}
+
/**
* do_scsi_nolinuxstat - scsi command didn't have linuxstat
* @cmdrsp: response from IOVM
@@ -804,10 +827,8 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
* a disk there so we'll present a processor
* there.
*/
- SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
- scsidev->lun,
- DEV_DISK_CAPABLE_NOT_PRESENT,
- DEV_NOT_CAPABLE);
+ set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
+ scsidev->lun == 0);
if (scsi_sg_count(scsicmd) == 0) {
memcpy(scsi_sglist(scsicmd), buf,
@@ -929,14 +950,15 @@ static void process_disk_notify(struct Scsi_Host *shost,
struct diskaddremove *dar;
dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
- if (dar) {
- dar->add = cmdrsp->disknotify.add;
- dar->shost = shost;
- dar->channel = cmdrsp->disknotify.channel;
- dar->id = cmdrsp->disknotify.id;
- dar->lun = cmdrsp->disknotify.lun;
- queue_disk_add_remove(dar);
- }
+ if (!dar)
+ return;
+
+ dar->add = cmdrsp->disknotify.add;
+ dar->shost = shost;
+ dar->channel = cmdrsp->disknotify.channel;
+ dar->id = cmdrsp->disknotify.id;
+ dar->lun = cmdrsp->disknotify.lun;
+ queue_disk_add_remove(dar);
}
/**
@@ -1064,8 +1086,8 @@ static int visorhba_resume(struct visor_device *dev,
if (devdata->serverdown && !devdata->serverchangingstate)
devdata->serverchangingstate = true;
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vhba_incming");
+ devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
+ "vhba_incming");
devdata->serverdown = false;
devdata->serverchangingstate = false;
@@ -1141,8 +1163,8 @@ static int visorhba_probe(struct visor_device *dev)
goto err_scsi_remove_host;
devdata->thread_wait_ms = 2;
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vhba_incoming");
+ devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
+ "vhba_incoming");
scsi_scan_host(scsihost);
@@ -1172,7 +1194,7 @@ static void visorhba_remove(struct visor_device *dev)
return;
scsihost = devdata->scsihost;
- kthread_stop(devdata->threadinfo.task);
+ visor_thread_stop(devdata->thread);
scsi_remove_host(scsihost);
scsi_host_put(scsihost);
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 13c031611..12a357078 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -123,9 +123,9 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_GRAVE, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_BACKSLASH, /* FIXME, '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@@ -173,7 +173,7 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
[88] = KEY_F12,
[90] = KEY_KPLEFTPAREN,
[91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
+ [92] = KEY_KPASTERISK,
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
@@ -467,18 +467,14 @@ handle_locking_key(struct input_dev *visorinput_dev,
break;
default:
led = -1;
- break;
+ return;
}
- if (led >= 0) {
- int old_state = (test_bit(led, visorinput_dev->led) != 0);
-
- if (old_state != desired_state) {
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- __change_bit(led, visorinput_dev->led);
- }
+ if (test_bit(led, visorinput_dev->led) != desired_state) {
+ input_report_key(visorinput_dev, keycode, 1);
+ input_sync(visorinput_dev);
+ input_report_key(visorinput_dev, keycode, 0);
+ input_sync(visorinput_dev);
+ __change_bit(led, visorinput_dev->led);
}
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index be0d05734..fd7c9a6cb 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -109,51 +109,46 @@ struct chanstat {
};
struct visornic_devdata {
- unsigned short enabled; /* 0 disabled 1 enabled to receive */
- unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by
- * IOPART
- */
+ /* 0 disabled 1 enabled to receive */
+ unsigned short enabled;
+ /* NET_RCV_ENABLE/DISABLE acked by IOPART */
+ unsigned short enab_dis_acked;
+
struct visor_device *dev;
struct net_device *netdev;
struct net_device_stats net_stats;
atomic_t interrupt_rcvd;
wait_queue_head_t rsp_queue;
struct sk_buff **rcvbuf;
- u64 incarnation_id; /* lets IOPART know about re-birth */
- unsigned short old_flags; /* flags as they were prior to
- * set_multicast_list
- */
- atomic_t usage; /* count of users */
- int num_rcv_bufs; /* indicates how many rcv buffers
- * the vnic will post
- */
+ /* incarnation_id lets IOPART know about re-birth */
+ u64 incarnation_id;
+ /* flags as they were prior to set_multicast_list */
+ unsigned short old_flags;
+ atomic_t usage; /* count of users */
+
+ /* number of rcv buffers the vnic will post */
+ int num_rcv_bufs;
int num_rcv_bufs_could_not_alloc;
atomic_t num_rcvbuf_in_iovm;
unsigned long alloc_failed_in_if_needed_cnt;
unsigned long alloc_failed_in_repost_rtn_cnt;
- unsigned long max_outstanding_net_xmits; /* absolute max number of
- * outstanding xmits - should
- * never hit this
- */
- unsigned long upper_threshold_net_xmits; /* high water mark for
- * calling netif_stop_queue()
- */
- unsigned long lower_threshold_net_xmits; /* high water mark for calling
- * netif_wake_queue()
- */
- struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
- * xmit buffer list that have been
- * sent to the IOPART end
- */
+
+ /* absolute max number of outstanding xmits - should never hit this */
+ unsigned long max_outstanding_net_xmits;
+ /* high water mark for calling netif_stop_queue() */
+ unsigned long upper_threshold_net_xmits;
+ /* high water mark for calling netif_wake_queue() */
+ unsigned long lower_threshold_net_xmits;
+ /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
+ struct sk_buff_head xmitbufhead;
+
visorbus_state_complete_func server_down_complete_func;
struct work_struct timeout_reset;
- struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for
- * posting/unposting rcv buffers
- */
- struct uiscmdrsp *xmit_cmdrsp; /* used to issue NET_XMIT - there is
- * never more that one xmit in
- * progress at a time
- */
+ /* cmdrsp_rcv is used for posting/unposting rcv buffers */
+ struct uiscmdrsp *cmdrsp_rcv;
+ /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
+ struct uiscmdrsp *xmit_cmdrsp;
+
bool server_down; /* IOPART is down */
bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
bool going_away; /* device is being torn down */
@@ -173,18 +168,10 @@ struct visornic_devdata {
unsigned long n_rcv1; /* # rcvs of 1 buffers */
unsigned long n_rcv2; /* # rcvs of 2 buffers */
unsigned long n_rcvx; /* # rcvs of >2 buffers */
- unsigned long found_repost_rcvbuf_cnt; /* # times we called
- * repost_rcvbuf_cnt
- */
- unsigned long repost_found_skb_cnt; /* # times found the skb */
- unsigned long n_repost_deficit; /* # times we couldn't find
- * all of the rcv buffers
- */
- unsigned long bad_rcv_buf; /* # times we negleted to
- * free the rcv skb because
- * we didn't know where it
- * came from
- */
+ unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */
+ unsigned long repost_found_skb_cnt; /* # of found the skb */
+ unsigned long n_repost_deficit; /* # of lost rcv buffers */
+ unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */
unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
int queuefullmsg_logged;
@@ -209,18 +196,17 @@ static void poll_for_irq(unsigned long v);
* Return value indicates number of entries filled in frags
* Negative values indicate an error.
*/
-static unsigned int
+static int
visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
unsigned int frags_max,
struct phys_info frags[])
{
- unsigned int count = 0, ii, size, offset = 0, numfrags;
+ unsigned int count = 0, frag, size, offset = 0, numfrags;
unsigned int total_count;
numfrags = skb_shinfo(skb)->nr_frags;
- /*
- * Compute the number of fragments this skb has, and if its more than
+ /* Compute the number of fragments this skb has, and if its more than
* frag array can hold, linearize the skb
*/
total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
@@ -257,23 +243,20 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
if ((count + numfrags) > frags_max)
return -EINVAL;
- for (ii = 0; ii < numfrags; ii++) {
+ for (frag = 0; frag < numfrags; frag++) {
count = add_physinfo_entries(page_to_pfn(
- skb_frag_page(&skb_shinfo(skb)->frags[ii])),
- skb_shinfo(skb)->frags[ii].
+ skb_frag_page(&skb_shinfo(skb)->frags[frag])),
+ skb_shinfo(skb)->frags[frag].
page_offset,
- skb_shinfo(skb)->frags[ii].
+ skb_shinfo(skb)->frags[frag].
size, count, frags_max, frags);
- /*
- * add_physinfo_entries only returns
+ /* add_physinfo_entries only returns
* zero if the frags array is out of room
* That should never happen because we
* fail above, if count+numfrags > frags_max.
- * Given that theres no recovery mechanism from putting
- * half a packet in the I/O channel, panic here as this
- * should never happen
*/
- BUG_ON(!count);
+ if (!count)
+ return -EINVAL;
}
}
if (skb_shinfo(skb)->frag_list) {
@@ -299,8 +282,7 @@ static ssize_t enable_ints_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
- /*
- * Don't want to break ABI here by having a debugfs
+ /* Don't want to break ABI here by having a debugfs
* file that no longer exists or is writable, so
* lets just make this a vestigual function
*/
@@ -308,8 +290,7 @@ static ssize_t enable_ints_write(struct file *file,
}
/**
- * visornic_serverdown_complete - IOPART went down, need to pause
- * device
+ * visornic_serverdown_complete - IOPART went down, pause device
* @work: Work queue it was scheduled on
*
* The IO partition has gone down and we need to do some cleanup
@@ -344,7 +325,7 @@ visornic_serverdown_complete(struct visornic_devdata *devdata)
}
/**
- * visornic_serverdown - Command has notified us that IOPARt is down
+ * visornic_serverdown - Command has notified us that IOPART is down
* @devdata: device that is being managed by IOPART
*
* Schedule the work needed to handle the server down request. Make
@@ -356,28 +337,38 @@ visornic_serverdown(struct visornic_devdata *devdata,
visorbus_state_complete_func complete_func)
{
unsigned long flags;
+ int err;
spin_lock_irqsave(&devdata->priv_lock, flags);
- if (!devdata->server_down && !devdata->server_change_state) {
- if (devdata->going_away) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_dbg(&devdata->dev->device,
- "%s aborting because device removal pending\n",
- __func__);
- return -ENODEV;
- }
- devdata->server_change_state = true;
- devdata->server_down_complete_func = complete_func;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- visornic_serverdown_complete(devdata);
- } else if (devdata->server_change_state) {
+ if (devdata->server_change_state) {
dev_dbg(&devdata->dev->device, "%s changing state\n",
__func__);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ if (devdata->server_down) {
+ dev_dbg(&devdata->dev->device, "%s already down\n",
+ __func__);
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ if (devdata->going_away) {
+ dev_dbg(&devdata->dev->device,
+ "%s aborting because device removal pending\n",
+ __func__);
+ err = -ENODEV;
+ goto err_unlock;
}
+ devdata->server_change_state = true;
+ devdata->server_down_complete_func = complete_func;
spin_unlock_irqrestore(&devdata->priv_lock, flags);
+
+ visornic_serverdown_complete(devdata);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ return err;
}
/**
@@ -395,20 +386,19 @@ alloc_rcv_buf(struct net_device *netdev)
/* NOTE: the first fragment in each rcv buffer is pointed to by
* rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
- * in length, so the firstfrag is large enough to hold 1514.
+ * in length, so the first frag is large enough to hold 1514.
*/
skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
if (!skb)
return NULL;
skb->dev = netdev;
- skb->len = RCVPOST_BUF_SIZE;
/* current value of mtu doesn't come into play here; large
* packets will just end up using multiple rcv buffers all of
- * same size
+ * same size.
*/
- skb->data_len = 0; /* dev_alloc_skb already zeroes it out
- * for clarification.
- */
+ skb->len = RCVPOST_BUF_SIZE;
+ /* alloc_skb already zeroes it out for clarification. */
+ skb->data_len = 0;
return skb;
}
@@ -436,8 +426,8 @@ post_skb(struct uiscmdrsp *cmdrsp,
cmdrsp->net.type = NET_RCV_POST;
cmdrsp->cmdtype = CMD_NET_TYPE;
if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp)) {
+ IOCHAN_TO_IOPART,
+ cmdrsp)) {
atomic_inc(&devdata->num_rcvbuf_in_iovm);
devdata->chstat.sent_post++;
} else {
@@ -465,8 +455,8 @@ send_enbdis(struct net_device *netdev, int state,
devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv))
+ IOCHAN_TO_IOPART,
+ devdata->cmdrsp_rcv))
devdata->chstat.sent_enbdis++;
}
@@ -872,8 +862,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (vnic_hit_high_watermark(devdata,
devdata->max_outstanding_net_xmits)) {
- /* too many NET_XMITs queued over to IOVM - need to wait
- */
+ /* extra NET_XMITs queued over to IOVM - need to wait */
devdata->chstat.reject_count++;
if (!devdata->queuefullmsg_logged &&
((devdata->chstat.reject_count & 0x3ff) == 1))
@@ -950,16 +939,12 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
devdata->net_stats.tx_bytes += skb->len;
devdata->chstat.sent_xmit++;
- /* check to see if we have hit the high watermark for
- * netif_stop_queue()
- */
+ /* check if we have hit the high watermark for netif_stop_queue() */
if (vnic_hit_high_watermark(devdata,
devdata->upper_threshold_net_xmits)) {
- /* too many NET_XMITs queued over to IOVM - need to wait */
- netif_stop_queue(netdev); /* calling stop queue - call
- * netif_wake_queue() after lower
- * threshold
- */
+ /* extra NET_XMITs queued over to IOVM - need to wait */
+ /* stop queue - call netif_wake_queue() after lower threshold */
+ netif_stop_queue(netdev);
dev_dbg(&netdev->dev,
"%s busy - invoking iovm flow control\n",
__func__);
@@ -1312,16 +1297,13 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
break;
}
}
+ /* accept pkt, dest matches a multicast addr */
if (found_mc)
- break; /* accept packet, dest
- * matches a multicast
- * address
- */
+ break;
}
+ /* accept packet, h_dest must match vnic mac address */
} else if (skb->pkt_type == PACKET_HOST) {
- break; /* accept packet, h_dest must match vnic
- * mac address
- */
+ break;
} else if (skb->pkt_type == PACKET_OTHERHOST) {
/* something is not right */
dev_err(&devdata->netdev->dev,
@@ -1409,14 +1391,10 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
if (!vbuf)
return -ENOMEM;
- /* for each vnic channel
- * dump out channel specific data
- */
+ /* for each vnic channel dump out channel specific data */
rcu_read_lock();
for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
- /*
- * Only consider netdevs that are visornic, and are open
- */
+ /* Only consider netdevs that are visornic, and are open */
if ((dev->netdev_ops != &visornic_dev_ops) ||
(!netif_queue_stopped(dev)))
continue;
@@ -1643,12 +1621,12 @@ service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
/* ASSERT netdev == vnicinfo->netdev; */
if ((netdev == devdata->netdev) &&
netif_queue_stopped(netdev)) {
- /* check to see if we have crossed
- * the lower watermark for
- * netif_wake_queue()
+ /* check if we have crossed the lower watermark
+ * for netif_wake_queue()
*/
- if (vnic_hit_low_watermark(devdata,
- devdata->lower_threshold_net_xmits)) {
+ if (vnic_hit_low_watermark
+ (devdata,
+ devdata->lower_threshold_net_xmits)) {
/* enough NET_XMITs completed
* so can restart netif queue
*/
@@ -1712,10 +1690,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
send_rcv_posts_if_needed(devdata);
service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
- /*
- * If there aren't any more packets to receive
- * stop the poll
- */
+ /* If there aren't any more packets to receive stop the poll */
if (rx_count < budget)
napi_complete(napi);
@@ -1867,8 +1842,7 @@ static int visornic_probe(struct visor_device *dev)
setup_timer(&devdata->irq_poll_timer, poll_for_irq,
(unsigned long)devdata);
- /*
- * Note: This time has to start running before the while
+ /* Note: This time has to start running before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
*/
@@ -1897,8 +1871,7 @@ static int visornic_probe(struct visor_device *dev)
/* Let's start our threads to get responses */
netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
- /*
- * Note: Interupts have to be enable before the while
+ /* Note: Interupts have to be enable before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
*/
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index df992c3cb..ba9fe3bc2 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -17,7 +17,7 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/ctype.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/vme.h>
@@ -25,16 +25,11 @@
static const char driver_name[] = "pio2_gpio";
-static struct pio2_card *gpio_to_pio2_card(struct gpio_chip *chip)
-{
- return container_of(chip, struct pio2_card, gc);
-}
-
static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
u8 reg;
int retval;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -71,7 +66,7 @@ static void pio2_gpio_set(struct gpio_chip *chip,
{
u8 reg;
int retval;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -97,10 +92,10 @@ static void pio2_gpio_set(struct gpio_chip *chip,
}
/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
{
int data;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -116,10 +111,11 @@ static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
}
/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+static int pio2_gpio_dir_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
int data;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -205,7 +201,7 @@ int pio2_gpio_init(struct pio2_card *card)
card->gc.set = pio2_gpio_set;
/* This function adds a memory mapped GPIO chip */
- retval = gpiochip_add(&card->gc);
+ retval = gpiochip_add_data(&card->gc, card);
if (retval) {
dev_err(&card->vdev->dev, "Unable to register GPIO\n");
kfree(card->gc.label);
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 1e6c0c4a0..654d072bd 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -36,8 +36,10 @@
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
* 08-07-2003 Bryan YC Fan: Add MAXIM2827/2825 and RFMD2959 support.
- * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and BBvCalculateParameter().
- * cancel the setting of MAC_REG_SOFTPWRCTL on BBbVT3253Init().
+ * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and
+ * BBvCalculateParameter().
+ * cancel the setting of MAC_REG_SOFTPWRCTL on
+ * BBbVT3253Init().
* Add the comments.
* 09-01-2003 Bryan YC Fan: RF & BB tables updated.
* Modified BBvLoopbackOn & BBvLoopbackOff().
@@ -66,7 +68,7 @@
/*--------------------- Static Variables --------------------------*/
#define CB_VT3253_INIT_FOR_RFMD 446
-static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
+static const unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
{0x00, 0x30},
{0x01, 0x00},
{0x02, 0x00},
@@ -516,7 +518,7 @@ static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
};
#define CB_VT3253B0_INIT_FOR_RFMD 256
-static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
+static const unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -777,7 +779,8 @@ static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
#define CB_VT3253B0_AGC_FOR_RFMD2959 195
/* For RFMD2959 */
-static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
+static
+unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
{0xF0, 0x00},
{0xF1, 0x3E},
{0xF0, 0x80},
@@ -977,7 +980,8 @@ static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] =
#define CB_VT3253B0_INIT_FOR_AIROHA2230 256
/* For AIROHA */
-static unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
+static
+unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -2160,9 +2164,13 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* {{ RobertYu:20050223, request by JerryChung */
- /* Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
+ /* Init ANT B select,TX Config CR09 = 0x61->0x45,
+ * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
+ */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
- /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
+ /* Init ANT B select,RX Config CR10 = 0x28->0x2A,
+ * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
+ */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 43a4fb1f3..b4e8c4318 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -77,8 +77,10 @@ BBuGetFrameTime(
void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
-bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char *pbyData);
-bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char byData);
+bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr,
+ unsigned char *pbyData);
+bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr,
+ unsigned char byData);
void BBvSetShortSlotTime(struct vnt_private *);
void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData);
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 3d338122b..afb1e8bde 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -336,7 +336,8 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
- VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0, priv->byCWMaxMin);
+ VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0,
+ priv->byCWMaxMin);
}
priv->byPacketType = CARDbyGetPktType(priv);
@@ -373,9 +374,12 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset);
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32));
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
+ VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST,
+ (u32)qwTSFOffset);
+ VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4,
+ (u32)(qwTSFOffset >> 32));
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL,
+ TFTCTL_TSFSYNCEN);
}
return true;
}
@@ -407,7 +411,8 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
priv->wBeaconInterval = wBeaconInterval;
/* Set NextTBTT */
VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
+ VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4,
+ (u32)(qwNextTBTT >> 32));
MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
return true;
@@ -433,15 +438,19 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_TXPEINV);
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
case RF_AIROHA7230:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE2);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE3);
break;
}
@@ -451,7 +460,8 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
priv->bRadioOff = true;
pr_debug("chester power off\n");
- MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0,
+ LED_ACTSET); /* LED issue */
return bResult;
}
@@ -488,21 +498,24 @@ bool CARDbRadioPowerOn(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1);
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_TXPEINV);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
case RF_AIROHA7230:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 |
- SOFTPWRCTL_SWPE3));
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
break;
}
priv->bRadioOff = false;
pr_debug("chester power on\n");
- MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0,
+ LED_ACTSET); /* LED issue */
return bResult;
}
@@ -717,55 +730,72 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_36 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_36M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_36M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_48 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_48M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_48M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_54 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_72 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72,
+ MAKEWORD(byTxRate, byRsvTime));
/* Set to Page0 */
MACvSelectPage0(priv->PortOffset);
@@ -830,7 +860,8 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
*
* Return Value: none
*/
-void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode)
+void CARDvSetLoopbackMode(struct vnt_private *priv,
+ unsigned short wLoopbackMode)
{
switch (wLoopbackMode) {
case CARD_LB_NONE:
@@ -965,7 +996,8 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
*
* Return Value: none
*/
-void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInterval)
+void CARDvSetFirstNextTBTT(struct vnt_private *priv,
+ unsigned short wBeaconInterval)
{
void __iomem *dwIoBase = priv->PortOffset;
u64 qwNextTBTT = 0;
@@ -993,7 +1025,8 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInter
*
* Return Value: none
*/
-void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, unsigned short wBeaconInterval)
+void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
+ unsigned short wBeaconInterval)
{
void __iomem *dwIoBase = priv->PortOffset;
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 16cca49e6..0203c7fd9 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -38,7 +38,8 @@
* LOBYTE is MAC LB mode, HIBYTE is MII LB mode
*/
#define CARD_LB_NONE MAKEWORD(MAC_LB_NONE, 0)
-#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) /* PHY must ISO, avoid MAC loopback packet go out */
+/* PHY must ISO, avoid MAC loopback packet go out */
+#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0)
#define CARD_LB_PHY MAKEWORD(MAC_LB_EXT, 0)
#define DEFAULT_MSDU_LIFETIME 512 /* ms */
@@ -71,8 +72,10 @@ void CARDvUpdateBasicTopRate(struct vnt_private *);
bool CARDbIsOFDMinBasicRate(struct vnt_private *);
void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *);
-void CARDvSetFirstNextTBTT(struct vnt_private *, unsigned short wBeaconInterval);
-void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF, unsigned short wBeaconInterval);
+void CARDvSetFirstNextTBTT(struct vnt_private *,
+ unsigned short wBeaconInterval);
+void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF,
+ unsigned short wBeaconInterval);
bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 9ac1ef9d0..b7d43a562 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -144,7 +144,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&vnt_supported_5ghz_band;
/* fallthrough */
case RF_RFMD2959:
@@ -159,7 +159,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
&vnt_supported_2ghz_band;
break;
}
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 9fbc71724..2d7f6ae89 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -157,7 +157,8 @@
/* TD_INFO flags control bit */
#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */
-#define TD_FLAGS_PRIV_SKB 0x02 /* check if called from private skb (hostap) */
+/* check if called from private skb (hostap) */
+#define TD_FLAGS_PRIV_SKB 0x02
#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
/*
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index c3eea07ca..494164045 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -812,7 +812,7 @@ static int vnt_int_report_rate(struct vnt_private *priv,
else if (fb_option & FIFOCTL_AUTO_FB_1)
tx_rate = fallback_rate1[tx_rate][retry];
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
idx = tx_rate - RATE_6M;
else
idx = tx_rate;
@@ -1290,7 +1290,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
(conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
set_channel(priv, conf->chandef.chan);
- if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
bb_type = BB_TYPE_11A;
else
bb_type = BB_TYPE_11G;
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 45196c6e9..8e13f7f41 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -47,7 +47,8 @@
*
* Revision History:
* 08-22-2003 Kyle Hsu : Porting MAC functions from sim53
- * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()& MACvEnableBusSusEn()
+ * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()&
+ * MACvEnableBusSusEn()
* 09-18-2003 Jerry Chen : Add MACvSetKeyEntry & MACvDisableKeyEntry
*
*/
@@ -138,7 +139,8 @@ bool MACbIsIntDisable(struct vnt_private *priv)
* Return Value: none
*
*/
-void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
+void MACvSetShortRetryLimit(struct vnt_private *priv,
+ unsigned char byRetryLimit)
{
void __iomem *io_base = priv->PortOffset;
/* set SRT */
@@ -160,7 +162,8 @@ void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit
* Return Value: none
*
*/
-void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
+void MACvSetLongRetryLimit(struct vnt_private *priv,
+ unsigned char byRetryLimit)
{
void __iomem *io_base = priv->PortOffset;
/* set LRT */
@@ -304,7 +307,8 @@ bool MACbSoftwareReset(struct vnt_private *priv)
/*
* Description:
- * save some important register's value, then do reset, then restore register's value
+ * save some important register's value, then do reset, then restore
+ * register's value
*
* Parameters:
* In:
@@ -738,7 +742,8 @@ void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
* Return Value: none
*
*/
-void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime)
+void MACvOneShotTimer1MicroSec(struct vnt_private *priv,
+ unsigned int uDelayTime)
{
void __iomem *io_base = priv->PortOffset;
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 1a2dda09b..e4c3165ae 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1307,7 +1307,7 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
}
if (current_rate > RATE_11M) {
- if (info->band == IEEE80211_BAND_5GHZ) {
+ if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 9ec49e653..ee9927720 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -72,7 +72,8 @@
* Return Value: data read
*
*/
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset)
+unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+ unsigned char byContntOffset)
{
unsigned short wDelay, wNoACK;
unsigned char byWait;
@@ -124,7 +125,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase, (unsigned char)ii);
+ *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,
+ (unsigned char)ii);
pbyEepromRegs++;
}
}
@@ -141,7 +143,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
* Return Value: none
*
*/
-void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress)
+void SROMvReadEtherAddress(void __iomem *dwIoBase,
+ unsigned char *pbyEtherAddress)
{
unsigned char ii;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 9417c935f..882fe54ce 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -138,7 +138,7 @@ static const u16 vnt_frame_time[MAX_RATE] = {
*
*/
unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate)
+ unsigned int frame_length, u16 tx_rate)
{
unsigned int frame_time;
unsigned int preamble;
@@ -195,7 +195,7 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
*
*/
void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
+ u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
{
u32 bit_count;
u32 count = 0;
@@ -355,7 +355,7 @@ void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode)
}
vnt_control_out(priv, MESSAGE_TYPE_SET_ANTMD,
- (u16)antenna_mode, 0, 0, NULL);
+ (u16)antenna_mode, 0, 0, NULL);
}
/*
@@ -383,7 +383,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
u8 data;
status = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
- MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
+ MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
priv->eeprom);
if (status != STATUS_SUCCESS)
return false;
@@ -393,7 +393,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
if ((priv->rf_type == RF_AL2230) ||
- (priv->rf_type == RF_AL2230S)) {
+ (priv->rf_type == RF_AL2230S)) {
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
@@ -457,21 +457,21 @@ int vnt_vt3184_init(struct vnt_private *priv)
memcpy(array, addr, length);
vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBREG, length, array);
+ MESSAGE_REQUEST_BBREG, length, array);
memcpy(array, agc, length_agc);
vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBAGC, length_agc, array);
+ MESSAGE_REQUEST_BBAGC, length_agc, array);
if ((priv->rf_type == RF_VT3226) ||
- (priv->rf_type == RF_VT3342A0)) {
+ (priv->rf_type == RF_VT3342A0)) {
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x23);
+ MAC_REG_ITRTMSET, 0x23);
vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
} else if (priv->rf_type == RF_VT3226D0) {
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x11);
+ MAC_REG_ITRTMSET, 0x11);
vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
}
@@ -482,12 +482,12 @@ int vnt_vt3184_init(struct vnt_private *priv)
/* Fix for TX USB resets from vendors driver */
vnt_control_in(priv, MESSAGE_TYPE_READ, USB_REG4,
- MESSAGE_REQUEST_MEM, sizeof(data), &data);
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
data |= 0x2;
vnt_control_out(priv, MESSAGE_TYPE_WRITE, USB_REG4,
- MESSAGE_REQUEST_MEM, sizeof(data), &data);
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
return true;
}
@@ -814,7 +814,7 @@ void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
priv->bb_pre_ed_index = ed_inx;
dev_dbg(&priv->usb->dev, "%s bb_pre_ed_rssi %d\n",
- __func__, priv->bb_pre_ed_rssi);
+ __func__, priv->bb_pre_ed_rssi);
if (!cr_201 && !cr_206)
return;
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index a0fe288c1..a4299f405 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -153,7 +153,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&vnt_supported_5ghz_band;
/* fallthrough */
case RF_AL2230:
@@ -167,7 +167,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
&vnt_supported_2ghz_band;
break;
}
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index 8d05acbc0..73538fb4e 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -97,7 +97,7 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
else if (context->fb_option == AUTO_FB_1)
tx_rate = fallback_rate1[tx_rate][retry];
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
idx = tx_rate - RATE_6M;
else
idx = tx_rate;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f9afab77b..ac4fecb30 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -238,7 +238,7 @@ static int vnt_init_registers(struct vnt_private *priv)
priv->tx_antenna_mode = ANT_B;
priv->rx_antenna_sel = 1;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_A;
else
priv->rx_antenna_mode = ANT_B;
@@ -248,14 +248,14 @@ static int vnt_init_registers(struct vnt_private *priv)
if (antenna & EEP_ANTENNA_AUX) {
priv->tx_antenna_mode = ANT_A;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_B;
else
priv->rx_antenna_mode = ANT_A;
} else {
priv->tx_antenna_mode = ANT_B;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_A;
else
priv->rx_antenna_mode = ANT_B;
@@ -662,7 +662,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
(conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
vnt_set_channel(priv, conf->chandef.chan->hw_value);
- if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
bb_type = BB_TYPE_11A;
else
bb_type = BB_TYPE_11G;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index b74e32001..aa59e7f14 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -813,7 +813,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
}
if (current_rate > RATE_11M) {
- if (info->band == IEEE80211_BAND_5GHZ) {
+ if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 4846a898d..95faaeb74 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -97,7 +97,7 @@ void vnt_run_command(struct work_struct *work)
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return;
- if (priv->cmd_running != true)
+ if (!priv->cmd_running)
return;
switch (priv->command_state) {
@@ -143,13 +143,13 @@ void vnt_run_command(struct work_struct *work)
if (priv->rx_antenna_sel == 0) {
priv->rx_antenna_sel = 1;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
vnt_set_antenna_mode(priv, ANT_RXA);
else
vnt_set_antenna_mode(priv, ANT_RXB);
} else {
priv->rx_antenna_sel = 0;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
vnt_set_antenna_mode(priv, ANT_RXB);
else
vnt_set_antenna_mode(priv, ANT_RXA);
@@ -174,7 +174,7 @@ int vnt_schedule_command(struct vnt_private *priv, enum vnt_cmd command)
ADD_ONE_WITH_WRAP_AROUND(priv->cmd_enqueue_idx, CMD_Q_SIZE);
priv->free_cmd_queue--;
- if (priv->cmd_running == false)
+ if (!priv->cmd_running)
vnt_cmd_complete(priv);
return true;
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index dce9cee91..73f7fefd3 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -1,6 +1,5 @@
config WILC1000
tristate
- select WIRELESS_EXT
---help---
This module only support IEEE 802.11n WiFi.
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 0a922c7c7..953584248 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -2,6 +2,7 @@
#include <linux/time.h>
#include <linux/kthread.h>
#include <linux/delay.h>
+#include <linux/completion.h>
#include "host_interface.h"
#include "coreconfigurator.h"
#include "wilc_wlan.h"
@@ -230,10 +231,10 @@ bool wilc_optaining_ip;
static u8 P2P_LISTEN_STATE;
static struct task_struct *hif_thread_handler;
static struct message_queue hif_msg_q;
-static struct semaphore hif_sema_thread;
-static struct semaphore hif_sema_driver;
-static struct semaphore hif_sema_wait_response;
-static struct semaphore hif_sema_deinit;
+static struct completion hif_thread_comp;
+static struct completion hif_driver_comp;
+static struct completion hif_wait_response;
+static struct mutex hif_deinit_lock;
static struct timer_list periodic_rssi;
u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
@@ -262,6 +263,7 @@ static struct wilc_vif *join_req_vif;
static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
+static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent);
/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -305,10 +307,10 @@ static void handle_set_channel(struct wilc_vif *vif,
netdev_err(vif->ndev, "Failed to set channel\n");
}
-static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
- struct drv_handler *hif_drv_handler)
+static void handle_set_wfi_drv_handler(struct wilc_vif *vif,
+ struct drv_handler *hif_drv_handler)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_SET_DRV_HANDLER;
@@ -316,24 +318,20 @@ static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
wid.val = (s8 *)hif_drv_handler;
wid.size = sizeof(*hif_drv_handler);
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- hif_drv_handler->handler);
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ hif_drv_handler->handler);
if (!hif_drv_handler->handler)
- up(&hif_sema_driver);
+ complete(&hif_driver_comp);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set driver handler\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_set_operation_mode(struct wilc_vif *vif,
- struct op_mode *hif_op_mode)
+static void handle_set_operation_mode(struct wilc_vif *vif,
+ struct op_mode *hif_op_mode)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_SET_OPERATION_MODE;
@@ -341,23 +339,19 @@ static s32 handle_set_operation_mode(struct wilc_vif *vif,
wid.val = (s8 *)&hif_op_mode->mode;
wid.size = sizeof(u32);
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if ((hif_op_mode->mode) == IDLE_MODE)
- up(&hif_sema_driver);
+ complete(&hif_driver_comp);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set driver handler\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
+static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
char firmware_ip_addr[4] = {0};
@@ -371,22 +365,18 @@ static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
wid.val = (u8 *)ip_addr;
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
host_int_get_ipaddress(vif, firmware_ip_addr, idx);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set IP address\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
+static void handle_get_ip_address(struct wilc_vif *vif, u8 idx)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_IP_ADDRESS;
@@ -394,8 +384,8 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
wid.val = kmalloc(IP_ALEN, GFP_KERNEL);
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
memcpy(get_ip[idx], wid.val, IP_ALEN);
@@ -404,18 +394,14 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
if (memcmp(get_ip[idx], set_ip[idx], IP_ALEN) != 0)
wilc_setup_ipaddress(vif, set_ip[idx], idx);
- if (result != 0) {
+ if (ret)
netdev_err(vif->ndev, "Failed to get IP address\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_get_mac_address(struct wilc_vif *vif,
- struct get_mac_addr *get_mac_addr)
+static void handle_get_mac_address(struct wilc_vif *vif,
+ struct get_mac_addr *get_mac_addr)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_MAC_ADDR;
@@ -423,16 +409,12 @@ static s32 handle_get_mac_address(struct wilc_vif *vif,
wid.val = get_mac_addr->mac_addr;
wid.size = ETH_ALEN;
- result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to get mac address\n");
- result = -EFAULT;
- }
- up(&hif_sema_wait_response);
-
- return result;
+ complete(&hif_wait_response);
}
static s32 handle_cfg_param(struct wilc_vif *vif,
@@ -455,7 +437,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "check value 6 over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -471,7 +453,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Impossible value\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -486,7 +468,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -500,7 +482,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Invalid power mode\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -515,7 +497,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -530,7 +512,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -545,7 +527,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -560,7 +542,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -574,7 +556,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -588,7 +570,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Short slot(2) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -602,7 +584,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "TXOP prot disable\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -617,7 +599,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -632,7 +614,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -646,7 +628,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Site survey disable\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -661,7 +643,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -676,7 +658,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Active time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -691,7 +673,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Passive time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -713,7 +695,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "out of TX rate\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -724,28 +706,24 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
if (result)
netdev_err(vif->ndev, "Error in setting CFG params\n");
-ERRORHANDLER:
+unlock:
mutex_unlock(&hif_drv->cfg_values_lock);
return result;
}
-static s32 Handle_ScanDone(struct wilc_vif *vif,
- enum scan_event enuEvent);
-
-static s32 Handle_Scan(struct wilc_vif *vif,
- struct scan_attr *pstrHostIFscanAttr)
+static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
{
s32 result = 0;
- struct wid strWIDList[5];
- u32 u32WidsCount = 0;
+ struct wid wid_list[5];
+ u32 index = 0;
u32 i;
- u8 *pu8Buffer;
+ u8 *buffer;
u8 valuesize = 0;
u8 *pu8HdnNtwrksWidVal = NULL;
struct host_if_drv *hif_drv = vif->hif_drv;
- hif_drv->usr_scan_req.scan_result = pstrHostIFscanAttr->result;
- hif_drv->usr_scan_req.arg = pstrHostIFscanAttr->arg;
+ hif_drv->usr_scan_req.scan_result = scan_info->result;
+ hif_drv->usr_scan_req.arg = scan_info->arg;
if ((hif_drv->hif_state >= HOST_IF_SCANNING) &&
(hif_drv->hif_state < HOST_IF_CONNECTED)) {
@@ -762,72 +740,70 @@ static s32 Handle_Scan(struct wilc_vif *vif,
hif_drv->usr_scan_req.rcvd_ch_cnt = 0;
- strWIDList[u32WidsCount].id = (u16)WID_SSID_PROBE_REQ;
- strWIDList[u32WidsCount].type = WID_STR;
+ wid_list[index].id = (u16)WID_SSID_PROBE_REQ;
+ wid_list[index].type = WID_STR;
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++)
- valuesize += ((pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len) + 1);
+ for (i = 0; i < scan_info->hidden_network.n_ssids; i++)
+ valuesize += ((scan_info->hidden_network.net_info[i].ssid_len) + 1);
pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL);
- strWIDList[u32WidsCount].val = pu8HdnNtwrksWidVal;
- if (strWIDList[u32WidsCount].val) {
- pu8Buffer = strWIDList[u32WidsCount].val;
+ wid_list[index].val = pu8HdnNtwrksWidVal;
+ if (wid_list[index].val) {
+ buffer = wid_list[index].val;
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.n_ssids;
+ *buffer++ = scan_info->hidden_network.n_ssids;
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) {
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
- memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.net_info[i].ssid, pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len);
- pu8Buffer += pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
+ for (i = 0; i < scan_info->hidden_network.n_ssids; i++) {
+ *buffer++ = scan_info->hidden_network.net_info[i].ssid_len;
+ memcpy(buffer, scan_info->hidden_network.net_info[i].ssid, scan_info->hidden_network.net_info[i].ssid_len);
+ buffer += scan_info->hidden_network.net_info[i].ssid_len;
}
- strWIDList[u32WidsCount].size = (s32)(valuesize + 1);
- u32WidsCount++;
+ wid_list[index].size = (s32)(valuesize + 1);
+ index++;
}
- {
- strWIDList[u32WidsCount].id = WID_INFO_ELEMENT_PROBE;
- strWIDList[u32WidsCount].type = WID_BIN_DATA;
- strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ies;
- strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ies_len;
- u32WidsCount++;
- }
+ wid_list[index].id = WID_INFO_ELEMENT_PROBE;
+ wid_list[index].type = WID_BIN_DATA;
+ wid_list[index].val = scan_info->ies;
+ wid_list[index].size = scan_info->ies_len;
+ index++;
- strWIDList[u32WidsCount].id = WID_SCAN_TYPE;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->type;
- u32WidsCount++;
+ wid_list[index].id = WID_SCAN_TYPE;
+ wid_list[index].type = WID_CHAR;
+ wid_list[index].size = sizeof(char);
+ wid_list[index].val = (s8 *)&scan_info->type;
+ index++;
- strWIDList[u32WidsCount].id = WID_SCAN_CHANNEL_LIST;
- strWIDList[u32WidsCount].type = WID_BIN_DATA;
+ wid_list[index].id = WID_SCAN_CHANNEL_LIST;
+ wid_list[index].type = WID_BIN_DATA;
- if (pstrHostIFscanAttr->ch_freq_list &&
- pstrHostIFscanAttr->ch_list_len > 0) {
+ if (scan_info->ch_freq_list &&
+ scan_info->ch_list_len > 0) {
int i;
- for (i = 0; i < pstrHostIFscanAttr->ch_list_len; i++) {
- if (pstrHostIFscanAttr->ch_freq_list[i] > 0)
- pstrHostIFscanAttr->ch_freq_list[i] = pstrHostIFscanAttr->ch_freq_list[i] - 1;
+ for (i = 0; i < scan_info->ch_list_len; i++) {
+ if (scan_info->ch_freq_list[i] > 0)
+ scan_info->ch_freq_list[i] = scan_info->ch_freq_list[i] - 1;
}
}
- strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ch_freq_list;
- strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ch_list_len;
- u32WidsCount++;
+ wid_list[index].val = scan_info->ch_freq_list;
+ wid_list[index].size = scan_info->ch_list_len;
+ index++;
- strWIDList[u32WidsCount].id = WID_START_SCAN_REQ;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->src;
- u32WidsCount++;
+ wid_list[index].id = WID_START_SCAN_REQ;
+ wid_list[index].type = WID_CHAR;
+ wid_list[index].size = sizeof(char);
+ wid_list[index].val = (s8 *)&scan_info->src;
+ index++;
if (hif_drv->hif_state == HOST_IF_CONNECTED)
scan_while_connected = true;
else if (hif_drv->hif_state == HOST_IF_IDLE)
scan_while_connected = false;
- result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
- u32WidsCount,
+ result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+ index,
wilc_get_vif_idx(vif));
if (result)
@@ -839,13 +815,13 @@ ERRORHANDLER:
Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
}
- kfree(pstrHostIFscanAttr->ch_freq_list);
- pstrHostIFscanAttr->ch_freq_list = NULL;
+ kfree(scan_info->ch_freq_list);
+ scan_info->ch_freq_list = NULL;
- kfree(pstrHostIFscanAttr->ies);
- pstrHostIFscanAttr->ies = NULL;
- kfree(pstrHostIFscanAttr->hidden_network.net_info);
- pstrHostIFscanAttr->hidden_network.net_info = NULL;
+ kfree(scan_info->ies);
+ scan_info->ies = NULL;
+ kfree(scan_info->hidden_network.net_info);
+ scan_info->hidden_network.net_info = NULL;
kfree(pu8HdnNtwrksWidVal);
@@ -1610,7 +1586,7 @@ static int Handle_Key(struct wilc_vif *vif,
&wid, 1,
wilc_get_vif_idx(vif));
}
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
break;
case WPA_RX_GTK:
@@ -1644,10 +1620,10 @@ static int Handle_Key(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
- if (pu8keybuf == NULL) {
+ if (!pu8keybuf) {
ret = -ENOMEM;
goto _WPARxGtk_end_case_;
}
@@ -1673,7 +1649,7 @@ static int Handle_Key(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
}
_WPARxGtk_end_case_:
kfree(pstrHostIFkeyAttr->attr.wpa.key);
@@ -1711,7 +1687,7 @@ _WPARxGtk_end_case_:
strWIDList, 2,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL);
if (!pu8keybuf) {
@@ -1734,7 +1710,7 @@ _WPARxGtk_end_case_:
&wid, 1,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
}
_WPAPtk_end_case_:
@@ -1856,7 +1832,7 @@ static void Handle_Disconnect(struct wilc_vif *vif)
}
}
- up(&hif_drv->sem_test_disconn_block);
+ complete(&hif_drv->comp_test_disconn_block);
}
void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
@@ -1885,7 +1861,7 @@ static void Handle_GetRssi(struct wilc_vif *vif)
result = -EFAULT;
}
- up(&vif->hif_drv->sem_get_rssi);
+ complete(&vif->hif_drv->comp_get_rssi);
}
static s32 Handle_GetStatistics(struct wilc_vif *vif,
@@ -1938,7 +1914,7 @@ static s32 Handle_GetStatistics(struct wilc_vif *vif,
wilc_enable_tcp_ack_filter(false);
if (pstrStatistics != &vif->wilc->dummy_statistics)
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
return 0;
}
@@ -1979,7 +1955,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
return -EFAULT;
}
- up(&hif_drv->sem_inactive_time);
+ complete(&hif_drv->comp_inactive_time);
return result;
}
@@ -2172,7 +2148,7 @@ static void Handle_DelAllSta(struct wilc_vif *vif,
ERRORHANDLER:
kfree(wid.val);
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
}
static void Handle_DelStation(struct wilc_vif *vif,
@@ -2472,7 +2448,7 @@ static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
{
- s32 ret = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_TX_POWER;
@@ -2485,7 +2461,7 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
}
static int hostIFthread(void *pvArg)
@@ -2518,7 +2494,7 @@ static int hostIFthread(void *pvArg)
switch (msg.id) {
case HOST_IF_MSG_SCAN:
- Handle_Scan(msg.vif, &msg.body.scan_info);
+ handle_scan(msg.vif, &msg.body.scan_info);
break;
case HOST_IF_MSG_CONNECT:
@@ -2667,7 +2643,7 @@ static int hostIFthread(void *pvArg)
}
}
- up(&hif_sema_thread);
+ complete(&hif_thread_comp);
return 0;
}
@@ -2730,7 +2706,8 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Request to remove WEP key\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2758,7 +2735,8 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Default key index\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2791,7 +2769,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "STA - WEP Key\n");
- down(&hif_drv->sem_test_key_block);
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2827,7 +2805,8 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
if (result)
netdev_err(vif->ndev, "AP - WEP Key\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2882,8 +2861,8 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
if (result)
netdev_err(vif->ndev, "PTK Key\n");
-
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2950,8 +2929,8 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "RX GTK\n");
-
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2961,14 +2940,8 @@ int wilc_set_pmkid_info(struct wilc_vif *vif,
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
int i;
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_KEY;
@@ -3007,7 +2980,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
return -EFAULT;
}
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3097,8 +3070,8 @@ int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Failed to send message: disconnect\n");
-
- down(&hif_drv->sem_test_disconn_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_disconn_block);
return result;
}
@@ -3110,12 +3083,6 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
{
s32 result = 0;
struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "Driver is null\n");
- return -EFAULT;
- }
wid.id = (u16)WID_ASSOC_RES_INFO;
wid.type = WID_STR;
@@ -3138,12 +3105,6 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
{
int result;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_SET_CHANNEL;
@@ -3219,8 +3180,8 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Failed to send get host ch param\n");
-
- down(&hif_drv->sem_inactive_time);
+ else
+ wait_for_completion(&hif_drv->comp_inactive_time);
*pu32InactiveTime = inactive_time;
@@ -3243,7 +3204,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
return -EFAULT;
}
- down(&hif_drv->sem_get_rssi);
+ wait_for_completion(&hif_drv->comp_get_rssi);
if (!rssi_level) {
netdev_err(vif->ndev, "RSS pointer value is null\n");
@@ -3272,7 +3233,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
}
if (stats != &vif->wilc->dummy_statistics)
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3382,7 +3343,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
scan_while_connected = false;
- sema_init(&hif_sema_wait_response, 0);
+ init_completion(&hif_wait_response);
hif_drv = kzalloc(sizeof(struct host_if_drv), GFP_KERNEL);
if (!hif_drv) {
@@ -3399,15 +3360,15 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
wilc_optaining_ip = false;
if (clients_count == 0) {
- sema_init(&hif_sema_thread, 0);
- sema_init(&hif_sema_driver, 0);
- sema_init(&hif_sema_deinit, 1);
+ init_completion(&hif_thread_comp);
+ init_completion(&hif_driver_comp);
+ mutex_init(&hif_deinit_lock);
}
- sema_init(&hif_drv->sem_test_key_block, 0);
- sema_init(&hif_drv->sem_test_disconn_block, 0);
- sema_init(&hif_drv->sem_get_rssi, 0);
- sema_init(&hif_drv->sem_inactive_time, 0);
+ init_completion(&hif_drv->comp_test_key_block);
+ init_completion(&hif_drv->comp_test_disconn_block);
+ init_completion(&hif_drv->comp_get_rssi);
+ init_completion(&hif_drv->comp_inactive_time);
if (clients_count == 0) {
result = wilc_mq_create(&hif_msg_q);
@@ -3469,7 +3430,7 @@ int wilc_deinit(struct wilc_vif *vif)
return -EFAULT;
}
- down(&hif_sema_deinit);
+ mutex_lock(&hif_deinit_lock);
terminated_handle = hif_drv;
@@ -3479,7 +3440,7 @@ int wilc_deinit(struct wilc_vif *vif)
del_timer_sync(&hif_drv->remain_on_ch_timer);
wilc_set_wfi_drv_handler(vif, 0, 0);
- down(&hif_sema_driver);
+ wait_for_completion(&hif_driver_comp);
if (hif_drv->usr_scan_req.scan_result) {
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL,
@@ -3494,15 +3455,14 @@ int wilc_deinit(struct wilc_vif *vif)
memset(&msg, 0, sizeof(struct host_if_msg));
if (clients_count == 1) {
- del_timer_sync(&periodic_rssi);
msg.id = HOST_IF_MSG_EXIT;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result != 0)
netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
-
- down(&hif_sema_thread);
+ else
+ wait_for_completion(&hif_thread_comp);
wilc_mq_destroy(&hif_msg_q);
}
@@ -3511,7 +3471,7 @@ int wilc_deinit(struct wilc_vif *vif)
clients_count--;
terminated_handle = NULL;
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return result;
}
@@ -3558,25 +3518,25 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
struct host_if_drv *hif_drv = NULL;
struct wilc_vif *vif;
- down(&hif_sema_deinit);
+ mutex_lock(&hif_deinit_lock);
id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif) {
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
hif_drv = vif->hif_drv;
if (!hif_drv || hif_drv == terminated_handle) {
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
if (!hif_drv->usr_conn_req.conn_result) {
netdev_err(vif->ndev, "there is no current Connect Request\n");
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
@@ -3593,7 +3553,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
if (result)
netdev_err(vif->ndev, "synchronous info (%d)\n", result);
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
@@ -3634,12 +3594,6 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3688,12 +3642,6 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3727,12 +3675,6 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
int result = 0;
struct host_if_msg msg;
struct beacon_attr *beacon_info = &msg.body.beacon_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3776,12 +3718,6 @@ int wilc_del_beacon(struct wilc_vif *vif)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
msg.id = HOST_IF_MSG_DEL_BEACON;
msg.vif = vif;
@@ -3798,12 +3734,6 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
int result = 0;
struct host_if_msg msg;
struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3830,12 +3760,6 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
int result = 0;
struct host_if_msg msg;
struct del_sta *del_sta_info = &msg.body.del_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3858,16 +3782,10 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
int result = 0;
struct host_if_msg msg;
struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
u8 zero_addr[ETH_ALEN] = {0};
int i;
u8 assoc_sta = 0;
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_DEL_ALL_STA;
@@ -3887,8 +3805,8 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
-
- down(&hif_sema_wait_response);
+ else
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3899,12 +3817,6 @@ int wilc_edit_station(struct wilc_vif *vif,
int result = 0;
struct host_if_msg msg;
struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3932,12 +3844,6 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
int result = 0;
struct host_if_msg msg;
struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
return 0;
@@ -3962,12 +3868,6 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
int result = 0;
struct host_if_msg msg;
struct set_multicast *multicast_filter_param = &msg.body.multicast_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4141,12 +4041,6 @@ int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4167,12 +4061,6 @@ static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4221,7 +4109,7 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
*tx_power = msg.body.tx_power.tx_pwr;
return ret;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 01f3222a4..8d2dd0db0 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -275,10 +275,10 @@ struct host_if_drv {
struct cfg_param_attr cfg_values;
struct mutex cfg_values_lock;
- struct semaphore sem_test_key_block;
- struct semaphore sem_test_disconn_block;
- struct semaphore sem_get_rssi;
- struct semaphore sem_inactive_time;
+ struct completion comp_test_key_block;
+ struct completion comp_test_disconn_block;
+ struct completion comp_get_rssi;
+ struct completion comp_inactive_time;
struct timer_list scan_timer;
struct timer_list connect_timer;
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 7d9e5ded8..242f82f4d 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -24,7 +24,7 @@ struct wilc_wfi_radiotap_cb_hdr {
static struct net_device *wilc_wfi_mon; /* global monitor netdev */
-static u8 srcAdd[6];
+static u8 srcadd[6];
static u8 bssid[6];
static u8 broadcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/**
@@ -59,9 +59,10 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
/* Get WILC header */
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
-
- /* The packet offset field conain info about what type of managment frame */
- /* we are dealing with and ack status */
+ /*
+ * The packet offset field contain info about what type of management
+ * the frame we are dealing with and ack status
+ */
pkt_offset = GET_PKT_OFFSET(header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
@@ -105,7 +106,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
hdr->hdr.it_present = cpu_to_le32
- (1 << IEEE80211_RADIOTAP_RATE); /* | */
+ (1 << IEEE80211_RADIOTAP_RATE); /* | */
hdr->rate = 5; /* txrate->bitrate / 5; */
}
@@ -127,8 +128,10 @@ struct tx_complete_mon_data {
static void mgmt_tx_complete(void *priv, int status)
{
struct tx_complete_mon_data *pv_data = priv;
-
- /* incase of fully hosting mode, the freeing will be done in response to the cfg packet */
+ /*
+ * in case of fully hosting mode, the freeing will be done
+ * in response to the cfg packet
+ */
kfree(pv_data->buff);
kfree(pv_data);
@@ -225,11 +228,11 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
skb->dev = mon_priv->real_ndev;
/* Identify if Ethernet or MAC header (data or mgmt) */
- memcpy(srcAdd, &skb->data[10], 6);
+ memcpy(srcadd, &skb->data[10], 6);
memcpy(bssid, &skb->data[16], 6);
/* if source address and bssid fields are equal>>Mac header */
/*send it to mgmt frames handler */
- if (!(memcmp(srcAdd, bssid, 6))) {
+ if (!(memcmp(srcadd, bssid, 6))) {
ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
if (ret)
netdev_err(dev, "fail to mgmt tx\n");
@@ -255,7 +258,8 @@ static const struct net_device_ops wilc_wfi_netdev_ops = {
* @date 12 JUL 2012
* @version 1.0
*/
-struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev)
+struct net_device *WILC_WFI_init_mon_interface(const char *name,
+ struct net_device *real_dev)
{
u32 ret = 0;
struct WILC_WFI_mon_priv *priv;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index ce095b020..9a7fa90dc 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -22,6 +22,7 @@
#include <linux/skbuff.h>
#include <linux/semaphore.h>
+#include <linux/completion.h>
static int dev_state_ev_handler(struct notifier_block *this,
unsigned long event, void *ptr);
@@ -30,8 +31,6 @@ static struct notifier_block g_dev_notifier = {
.notifier_call = dev_state_ev_handler
};
-#define IRQ_WAIT 1
-#define IRQ_NO_WAIT 0
static struct semaphore close_exit_sync;
static int wlan_deinit_locks(struct net_device *dev);
@@ -259,10 +258,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
for (i = 0; i < wilc->vif_num; i++) {
if (wilc->vif[i]->mode == STATION_MODE)
- if (!memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
+ if (ether_addr_equal_unaligned(bssid,
+ wilc->vif[i]->bssid))
return wilc->vif[i]->ndev;
if (wilc->vif[i]->mode == AP_MODE)
- if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN))
+ if (ether_addr_equal_unaligned(bssid1,
+ wilc->vif[i]->bssid))
return wilc->vif[i]->ndev;
}
@@ -303,40 +304,27 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
-#define USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
-
static int linux_wlan_txq_task(void *vp)
{
int ret, txq_count;
struct wilc_vif *vif;
struct wilc *wl;
struct net_device *dev = vp;
-#if defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
-#define TX_BACKOFF_WEIGHT_INCR_STEP (1)
-#define TX_BACKOFF_WEIGHT_DECR_STEP (1)
-#define TX_BACKOFF_WEIGHT_MAX (7)
-#define TX_BACKOFF_WEIGHT_MIN (0)
-#define TX_BACKOFF_WEIGHT_UNIT_MS (10)
- int backoff_weight = TX_BACKOFF_WEIGHT_MIN;
-#endif
vif = netdev_priv(dev);
wl = vif->wilc;
- up(&wl->txq_thread_started);
+ complete(&wl->txq_thread_started);
while (1) {
down(&wl->txq_event);
if (wl->close) {
- up(&wl->txq_thread_started);
+ complete(&wl->txq_thread_started);
while (!kthread_should_stop())
schedule();
break;
}
-#if !defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
- ret = wilc_wlan_handle_txq(dev, &txq_count);
-#else
do {
ret = wilc_wlan_handle_txq(dev, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
@@ -345,20 +333,7 @@ static int linux_wlan_txq_task(void *vp)
if (netif_queue_stopped(wl->vif[1]->ndev))
netif_wake_queue(wl->vif[1]->ndev);
}
-
- if (ret == WILC_TX_ERR_NO_BUF) {
- backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
- if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
- backoff_weight = TX_BACKOFF_WEIGHT_MAX;
- } else {
- if (backoff_weight > TX_BACKOFF_WEIGHT_MIN) {
- backoff_weight -= TX_BACKOFF_WEIGHT_DECR_STEP;
- if (backoff_weight < TX_BACKOFF_WEIGHT_MIN)
- backoff_weight = TX_BACKOFF_WEIGHT_MIN;
- }
- }
} while (ret == WILC_TX_ERR_NO_BUF && !wl->close);
-#endif
}
return 0;
}
@@ -449,7 +424,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
struct wilc_vif *vif)
{
unsigned char c_val[64];
- unsigned char mac_add[] = {0x00, 0x80, 0xC2, 0x5E, 0xa2, 0xff};
struct wilc *wilc = vif->wilc;
struct wilc_priv *priv;
struct host_if_drv *hif_drv;
@@ -458,9 +432,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
hif_drv = (struct host_if_drv *)priv->hif_drv;
netdev_dbg(dev, "Host = %p\n", hif_drv);
- wilc_get_mac_address(vif, mac_add);
-
- netdev_dbg(dev, "MAC address is : %pM\n", mac_add);
wilc_get_chipid(wilc, false);
*(int *)c_val = 1;
@@ -622,11 +593,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
0))
goto _fail_;
- memcpy(c_val, mac_add, 6);
-
- if (!wilc_wlan_cfg_set(vif, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
- goto _fail_;
-
c_val[0] = DETECT_PROTECT_REPORT;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
0, 0))
@@ -691,14 +657,6 @@ void wilc1000_wlan_deinit(struct net_device *dev)
wilc_wlan_stop(wl);
wilc_wlan_cleanup(dev);
-#if defined(PLAT_ALLWINNER_A20) || defined(PLAT_ALLWINNER_A23) || defined(PLAT_ALLWINNER_A31)
- if (!wl->dev_irq_num &&
- wl->hif_func->disable_interrupt) {
- mutex_lock(&wl->hif_cs);
- wl->hif_func->disable_interrupt(wl);
- mutex_unlock(&wl->hif_cs);
- }
-#endif
wlan_deinit_locks(dev);
wl->initialized = false;
@@ -727,8 +685,7 @@ static int wlan_init_locks(struct net_device *dev)
sema_init(&wl->cfg_event, 0);
sema_init(&wl->sync_event, 0);
-
- sema_init(&wl->txq_thread_started, 0);
+ init_completion(&wl->txq_thread_started);
return 0;
}
@@ -765,7 +722,7 @@ static int wlan_initialize_threads(struct net_device *dev)
wilc->close = 0;
return -ENOBUFS;
}
- down(&wilc->txq_thread_started);
+ wait_for_completion(&wilc->txq_thread_started);
return 0;
}
@@ -896,25 +853,20 @@ static int mac_init_fn(struct net_device *ndev)
int wilc_mac_open(struct net_device *ndev)
{
struct wilc_vif *vif;
- struct wilc *wilc;
unsigned char mac_add[ETH_ALEN] = {0};
int ret = 0;
int i = 0;
- struct wilc_priv *priv;
struct wilc *wl;
vif = netdev_priv(ndev);
wl = vif->wilc;
if (!wl || !wl->dev) {
- netdev_err(ndev, "wilc1000: SPI device not ready\n");
+ netdev_err(ndev, "device not ready\n");
return -ENODEV;
}
- vif = netdev_priv(ndev);
- wilc = vif->wilc;
- priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
ret = wilc_init_host_int(ndev);
@@ -933,13 +885,13 @@ int wilc_mac_open(struct net_device *ndev)
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
0);
- } else if (!wilc_wlan_get_num_conn_ifcs(wilc)) {
+ } else if (!wilc_wlan_get_num_conn_ifcs(wl)) {
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
- wilc->open_ifcs);
+ wl->open_ifcs);
} else {
- if (memcmp(wilc->vif[i ^ 1]->bssid,
- wilc->vif[i ^ 1]->src_addr, 6))
+ if (memcmp(wl->vif[i ^ 1]->bssid,
+ wl->vif[i ^ 1]->src_addr, 6))
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
0);
@@ -969,12 +921,12 @@ int wilc_mac_open(struct net_device *ndev)
wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
vif->ndev->ieee80211_ptr,
- vif->g_struct_frame_reg[0].frame_type,
- vif->g_struct_frame_reg[0].reg);
+ vif->frame_reg[0].type,
+ vif->frame_reg[0].reg);
wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
vif->ndev->ieee80211_ptr,
- vif->g_struct_frame_reg[1].frame_type,
- vif->g_struct_frame_reg[1].reg);
+ vif->frame_reg[1].type,
+ vif->frame_reg[1].reg);
netif_wake_queue(ndev);
wl->open_ifcs++;
vif->mac_opened = 1;
@@ -991,14 +943,10 @@ static struct net_device_stats *mac_stats(struct net_device *dev)
static void wilc_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
- struct wilc_priv *priv;
- struct host_if_drv *hif_drv;
struct wilc_vif *vif;
int i = 0;
- priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
vif = netdev_priv(dev);
- hif_drv = (struct host_if_drv *)priv->hif_drv;
if (dev->flags & IFF_PROMISC)
return;
@@ -1152,7 +1100,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
s8 rssi;
u32 size = 0, length = 0;
struct wilc_vif *vif;
- struct wilc_priv *priv;
s32 ret = 0;
struct wilc *wilc;
@@ -1176,7 +1123,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
return PTR_ERR(buff);
if (strncasecmp(buff, "RSSI", length) == 0) {
- priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
ret = wilc_get_rssi(vif, &rssi);
netdev_info(ndev, "RSSI :%d\n", rssi);
@@ -1263,8 +1209,8 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
}
vif = netdev_priv(wilc->vif[1]->ndev);
- if ((buff[0] == vif->g_struct_frame_reg[0].frame_type && vif->g_struct_frame_reg[0].reg) ||
- (buff[0] == vif->g_struct_frame_reg[1].frame_type && vif->g_struct_frame_reg[1].reg))
+ if ((buff[0] == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
+ (buff[0] == vif->frame_reg[1].type && vif->frame_reg[1].reg))
WILC_WFI_p2p_rx(wilc->vif[1]->ndev, buff, size);
}
@@ -1280,8 +1226,10 @@ void wilc_netdev_cleanup(struct wilc *wilc)
vif[i] = netdev_priv(wilc->vif[i]->ndev);
}
- if (wilc && wilc->firmware)
+ if (wilc && wilc->firmware) {
release_firmware(wilc->firmware);
+ wilc->firmware = NULL;
+ }
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index d41b8b679..4268e2f29 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -196,9 +196,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
dev_err(&spi->dev,
"can't write data with the following length: %d\n",
len);
- dev_err(&spi->dev,
- "FAILED due to NULL buffer or ZERO length check the following length: %d\n",
- len);
ret = -EINVAL;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 448a5c8c4..51aff4ff7 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -102,7 +102,7 @@ static u8 op_ifcs;
u8 wilc_initialized = 1;
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -241,7 +241,7 @@ static void refresh_scan(void *user_void, u8 all, bool direct_scan)
struct ieee80211_channel *channel;
if (network_info) {
- freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
rssi = get_rssi_avg(network_info);
@@ -409,7 +409,7 @@ static void CfgScanResult(enum scan_event scan_event,
return;
if (network_info) {
- s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+ s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, s32Freq);
if (!channel)
@@ -451,7 +451,7 @@ static void CfgScanResult(enum scan_event scan_event,
} else if (scan_event == SCAN_EVENT_DONE) {
refresh_scan(priv, 1, false);
- down(&(priv->hSemScanReq));
+ mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
cfg80211_scan_done(priv->pstrScanReq, false);
@@ -459,9 +459,9 @@ static void CfgScanResult(enum scan_event scan_event,
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
}
- up(&(priv->hSemScanReq));
+ mutex_unlock(&priv->scan_req_lock);
} else if (scan_event == SCAN_EVENT_ABORTED) {
- down(&(priv->hSemScanReq));
+ mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
update_scan_time();
@@ -471,7 +471,7 @@ static void CfgScanResult(enum scan_event scan_event,
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
}
- up(&(priv->hSemScanReq));
+ mutex_unlock(&priv->scan_req_lock);
}
}
}
@@ -558,11 +558,11 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
if (!pstrWFIDrv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
+ if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
pstrDisconnectNotifInfo->reason = 3;
- } else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
+ else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
pstrDisconnectNotifInfo->reason = 1;
- }
+
cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie,
pstrDisconnectNotifInfo->ie_len, false,
GFP_KERNEL);
@@ -739,18 +739,15 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
sme->key_idx);
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
u8security = ENCRYPT_ENABLED | WPA2 | TKIP;
- } else {
+ else
u8security = ENCRYPT_ENABLED | WPA2 | AES;
- }
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
u8security = ENCRYPT_ENABLED | WPA | TKIP;
- } else {
+ else
u8security = ENCRYPT_ENABLED | WPA | AES;
- }
-
} else {
s32Error = -ENOTSUPP;
netdev_err(dev, "Not supported cipher\n");
@@ -762,11 +759,10 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
|| (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) {
- if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP)
u8security = u8security | TKIP;
- } else {
+ else
u8security = u8security | AES;
- }
}
}
@@ -823,11 +819,22 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
struct wilc_priv *priv;
struct host_if_drv *pstrWFIDrv;
struct wilc_vif *vif;
+ struct wilc *wilc;
u8 NullBssid[ETH_ALEN] = {0};
wilc_connecting = 0;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
+ wilc = vif->wilc;
+
+ if (!wilc)
+ return -EIO;
+
+ if (wilc->close) {
+ /* already disconnected done */
+ cfg80211_disconnected(dev, 0, NULL, 0, true, GFP_KERNEL);
+ return 0;
+ }
pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
if (!pstrWFIDrv->p2p_connect)
@@ -1115,9 +1122,12 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
}
if (key_index >= 0 && key_index <= 3) {
- memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
- priv->WILC_WFI_wep_key_len[key_index] = 0;
- wilc_remove_wep_key(vif, key_index);
+ if (priv->WILC_WFI_wep_key_len[key_index]) {
+ memset(priv->WILC_WFI_wep_key[key_index], 0,
+ priv->WILC_WFI_wep_key_len[key_index]);
+ priv->WILC_WFI_wep_key_len[key_index] = 0;
+ wilc_remove_wep_key(vif, key_index);
+ }
} else {
wilc_remove_key(priv->hif_drv, mac_addr);
}
@@ -1355,9 +1365,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
u8 channel_list_attr_index = 0;
while (index < len) {
- if (buf[index] == GO_INTENT_ATTR_ID) {
+ if (buf[index] == GO_INTENT_ATTR_ID)
buf[index + 3] = (buf[index + 3] & 0x01) | (0x00 << 1);
- }
if (buf[index] == CHANLIST_ATTR_ID)
channel_list_attr_index = index;
@@ -1369,9 +1378,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
if (channel_list_attr_index) {
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
- for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
+ for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
buf[j] = wlan_channel;
- }
break;
}
}
@@ -1409,9 +1417,8 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp
if (channel_list_attr_index) {
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
- for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
+ for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
buf[j] = wlan_channel;
- }
break;
}
}
@@ -1451,7 +1458,7 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
return;
}
} else {
- s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ);
+ s32Freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) {
@@ -1752,15 +1759,15 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
switch (frame_type) {
case PROBE_REQ:
{
- vif->g_struct_frame_reg[0].frame_type = frame_type;
- vif->g_struct_frame_reg[0].reg = reg;
+ vif->frame_reg[0].type = frame_type;
+ vif->frame_reg[0].reg = reg;
}
break;
case ACTION:
{
- vif->g_struct_frame_reg[1].frame_type = frame_type;
- vif->g_struct_frame_reg[1].reg = reg;
+ vif->frame_reg[1].type = frame_type;
+ vif->frame_reg[1].reg = reg;
}
break;
@@ -1797,6 +1804,7 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
wilc_get_rssi(vif, &sinfo->signal);
+ memcpy(mac, priv->au8AssociatedBss, ETH_ALEN);
return 0;
}
@@ -2246,7 +2254,7 @@ static struct wireless_dev *WILC_WFI_CfgAlloc(void)
WILC_WFI_band_2ghz.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
WILC_WFI_band_2ghz.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
return wdev;
@@ -2269,7 +2277,6 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
}
priv = wdev_priv(wdev);
- sema_init(&(priv->SemHandleUpdateStats), 1);
priv->wdev = wdev;
wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID;
#ifdef CONFIG_PM
@@ -2315,7 +2322,7 @@ int wilc_init_host_int(struct net_device *net)
priv->bInP2PlistenState = false;
- sema_init(&(priv->hSemScanReq), 1);
+ mutex_init(&priv->scan_req_lock);
s32Error = wilc_init(net, &priv->hif_drv);
if (s32Error)
netdev_err(net, "Error while initializing hostinterface\n");
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 4123cffe3..3a561df6d 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -130,8 +130,7 @@ struct wilc_priv {
struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA];
u8 wilc_groupkey;
/* semaphores */
- struct semaphore SemHandleUpdateStats;
- struct semaphore hSemScanReq;
+ struct mutex scan_req_lock;
/* */
bool gbAutoRateAdjusted;
@@ -139,18 +138,17 @@ struct wilc_priv {
};
-typedef struct {
- u16 frame_type;
+struct frame_reg {
+ u16 type;
bool reg;
-
-} struct_frame_reg;
+};
struct wilc_vif {
u8 idx;
u8 iftype;
int monitor_flag;
int mac_opened;
- struct_frame_reg g_struct_frame_reg[num_reg_frame];
+ struct frame_reg frame_reg[num_reg_frame];
struct net_device_stats netstats;
struct wilc *wilc;
u8 src_addr[ETH_ALEN];
@@ -181,8 +179,7 @@ struct wilc {
struct semaphore cfg_event;
struct semaphore sync_event;
struct semaphore txq_event;
-
- struct semaphore txq_thread_started;
+ struct completion txq_thread_started;
struct task_struct *txq_thread;
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index fd938fb43..11e16d56a 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -150,11 +150,6 @@ static u32 pending_base;
static u32 tcp_session;
static u32 pending_acks;
-static inline int init_tcp_tracking(void)
-{
- return 0;
-}
-
static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq)
{
if (tcp_session < 2 * MAX_TCP_SESSION) {
@@ -330,8 +325,11 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
tqe->priv = NULL;
tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
- if (wilc_wlan_txq_add_to_head(vif, tqe))
+ if (wilc_wlan_txq_add_to_head(vif, tqe)) {
+ kfree(tqe);
return 0;
+ }
+
return 1;
}
@@ -626,13 +624,12 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if ((reg & 0x1) == 0) {
break;
- } else {
- counter++;
- if (counter > 200) {
- counter = 0;
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
- break;
- }
+ }
+ counter++;
+ if (counter > 200) {
+ counter = 0;
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
+ break;
}
} while (!wilc->quit);
@@ -658,9 +655,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if ((reg >> 2) & 0x1) {
entries = ((reg >> 3) & 0x3f);
break;
- } else {
- release_bus(wilc, RELEASE_ALLOW_SLEEP);
}
+ release_bus(wilc, RELEASE_ALLOW_SLEEP);
} while (--timeout);
if (timeout <= 0) {
ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0);
@@ -679,9 +675,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (!ret)
break;
break;
- } else {
- break;
}
+ break;
} while (1);
if (!ret)
@@ -900,8 +895,6 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
DATA_INT_CLR | ENABLE_RX_VMM);
ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
- if (!ret)
- goto _end_;
_end_:
if (ret) {
offset += size;
@@ -951,10 +944,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
blksz = BIT(12);
dma_buffer = kmalloc(blksz, GFP_KERNEL);
- if (!dma_buffer) {
- ret = -EIO;
- goto _fail_1;
- }
+ if (!dma_buffer)
+ return -EIO;
offset = 0;
do {
@@ -992,8 +983,6 @@ _fail_:
kfree(dma_buffer);
-_fail_1:
-
return (ret < 0) ? ret : 0;
}
@@ -1211,7 +1200,7 @@ static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
return 0;
}
-int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler)
{
u32 offset;
@@ -1226,7 +1215,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
offset = wilc->cfg_frame_offset;
ret_size = wilc_wlan_cfg_set_wid(wilc->cfg_frame.frame, offset,
- (u16)wid, buffer, buffer_size);
+ wid, buffer, buffer_size);
offset += ret_size;
wilc->cfg_frame_offset = offset;
@@ -1253,7 +1242,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
return ret_size;
}
-int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
u32 drv_handler)
{
u32 offset;
@@ -1267,8 +1256,7 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
wilc->cfg_frame_offset = 0;
offset = wilc->cfg_frame_offset;
- ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset,
- (u16)wid);
+ ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset, wid);
offset += ret_size;
wilc->cfg_frame_offset = offset;
@@ -1291,9 +1279,9 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
return ret_size;
}
-int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size)
+int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size)
{
- return wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size);
+ return wilc_wlan_cfg_get_wid_value(wid, buffer, buffer_size);
}
int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
@@ -1440,7 +1428,6 @@ int wilc_wlan_init(struct net_device *dev)
ret = -EIO;
goto _fail_;
}
- init_tcp_tracking();
return 1;
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index bcd4bfa5a..30e5312ee 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -284,11 +284,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
void wilc_handle_isr(struct wilc *wilc);
void wilc_wlan_cleanup(struct net_device *dev);
-int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler);
-int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
u32 drv_handler);
-int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size);
+int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size);
int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
u32 buffer_size, wilc_tx_complete_func_t func);
void wilc_chip_sleep_manually(struct wilc *wilc);
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index b3425b9ce..926fc1631 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -230,7 +230,7 @@ static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 siz
buf[1] = (u8)(id >> 8);
buf[2] = (u8)size;
- if ((str != NULL) && (size != 0))
+ if ((str) && (size != 0))
memcpy(&buf[3], str, size);
return (size + 3);
@@ -251,11 +251,10 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
buf[2] = (u8)size;
buf[3] = (u8)(size >> 8);
- if ((b != NULL) && (size != 0)) {
+ if ((b) && (size != 0)) {
memcpy(&buf[4], b, size);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < size; i++)
checksum += buf[i + 4];
- }
}
buf[size + 4] = checksum;
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 83cf84dd6..410bfc034 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -15,18 +15,6 @@
/********************************************
*
- * Debug Flags
- *
- ********************************************/
-
-#define N_INIT 0x00000001
-#define N_ERR 0x00000002
-#define N_TXQ 0x00000004
-#define N_INTR 0x00000008
-#define N_RXQ 0x00000010
-
-/********************************************
- *
* Host Interface Defines
*
********************************************/
@@ -37,15 +25,6 @@
/********************************************
*
- * Tx/Rx Buffer Size Defines
- *
- ********************************************/
-
-#define CE_TX_BUFFER_SIZE (64 * 1024)
-#define CE_RX_BUFFER_SIZE (384 * 1024)
-
-/********************************************
- *
* Wlan Interface Defines
*
********************************************/
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 8bad018ed..a6e6fb9f4 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -415,7 +415,7 @@ static int prism2_scan(struct wiphy *wiphy,
ie_len = ie_buf[1] + 2;
memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
bss = cfg80211_inform_bss(wiphy,
ieee80211_get_channel(wiphy, freq),
CFG80211_BSS_FTYPE_UNKNOWN,
@@ -758,9 +758,9 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev
priv->band.n_channels = ARRAY_SIZE(prism2_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
- priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.band = NL80211_BAND_2GHZ;
priv->band.ht_cap.ht_supported = false;
- wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
set_wiphy_dev(wiphy, dev);
wiphy->privid = prism2_wiphy_privid;
@@ -771,8 +771,10 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev
wiphy->n_cipher_suites = PRISM2_NUM_CIPHER_SUITES;
wiphy->cipher_suites = prism2_cipher_suites;
- if (wiphy_register(wiphy) < 0)
+ if (wiphy_register(wiphy) < 0) {
+ wiphy_free(wiphy);
return NULL;
+ }
return wiphy;
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 21a92df85..337810750 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -614,7 +614,7 @@ static hfa384x_usbctlx_t *usbctlx_alloc(void)
ctlx = kzalloc(sizeof(*ctlx),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- if (ctlx != NULL)
+ if (ctlx)
init_completion(&ctlx->done);
return ctlx;
@@ -797,7 +797,7 @@ static inline struct usbctlx_completor *init_rmem_completor(
----------------------------------------------------------------*/
static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
{
- if (ctlx->usercb != NULL) {
+ if (ctlx->usercb) {
hfa384x_cmdresult_t cmdresult;
if (ctlx->state != CTLX_COMPLETE) {
@@ -2738,7 +2738,7 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
/* Call the completion function that this
* command was assigned, assuming it has one.
*/
- if (ctlx->cmdcb != NULL) {
+ if (ctlx->cmdcb) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
ctlx->cmdcb(hw, ctlx);
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3629,7 +3629,7 @@ static void hfa384x_ctlxout_callback(struct urb *urb)
dbprint_urb(urb);
#endif
if ((urb->status == -ESHUTDOWN) ||
- (urb->status == -ENODEV) || (hw == NULL))
+ (urb->status == -ENODEV) || !hw)
return;
retry:
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0a8f3960d..6354036ff 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -75,8 +75,8 @@
#include "p80211ioctl.h"
#include "p80211req.h"
-static u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
-static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
+static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
+static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
/*----------------------------------------------------------------
* p80211pb_ether_to_80211
@@ -243,7 +243,6 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
for (i = 0; i < wlandev->spy_number; i++) {
if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
- memcpy(wlandev->spy_address[i], mac, ETH_ALEN);
wlandev->spy_stat[i].level = rxmeta->signal;
wlandev->spy_stat[i].noise = rxmeta->noise;
wlandev->spy_stat[i].qual =
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 88255ce28..90cc8cdcf 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -156,7 +156,7 @@ static int p80211knetdev_open(netdevice_t *netdev)
return -ENODEV;
/* Tell the MSD to open */
- if (wlandev->open != NULL) {
+ if (wlandev->open) {
result = wlandev->open(wlandev);
if (result == 0) {
netif_start_queue(wlandev->netdev);
@@ -186,7 +186,7 @@ static int p80211knetdev_stop(netdevice_t *netdev)
int result = 0;
wlandevice_t *wlandev = netdev->ml_priv;
- if (wlandev->close != NULL)
+ if (wlandev->close)
result = wlandev->close(wlandev);
netif_stop_queue(wlandev->netdev);
@@ -393,7 +393,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
netdev->stats.tx_packets++;
/* count only the packet payload */
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 810ee68aa..820a0e20a 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -158,7 +158,6 @@ extern int wlan_wext_write;
/* WLAN device type */
typedef struct wlandevice {
- struct wlandevice *next; /* link for list of devices */
void *priv; /* private data for MSD */
/* Subsystem State */
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 301457102..8233bf7af 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -278,7 +278,8 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
/* Build the PDA we're going to use. */
if (read_cardpda(&pda, wlandev)) {
netdev_err(wlandev->netdev, "load_cardpda failed, exiting.\n");
- return 1;
+ result = 1;
+ goto out;
}
/* read the card's PRI-SUP */
@@ -315,55 +316,58 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
if (result) {
netdev_err(wlandev->netdev,
"Failed to read the data exiting.\n");
- return 1;
+ goto out;
}
result = validate_identity();
-
if (result) {
netdev_err(wlandev->netdev, "Incompatible firmware image.\n");
- return 1;
+ goto out;
}
if (startaddr == 0x00000000) {
netdev_err(wlandev->netdev,
"Can't RAM download a Flash image!\n");
- return 1;
+ result = 1;
+ goto out;
}
/* Make the image chunks */
result = mkimage(fchunk, &nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to make image chunk.\n");
- return 1;
+ goto free_chunks;
}
/* Do any plugging */
result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda);
if (result) {
netdev_err(wlandev->netdev, "Failed to plug data.\n");
- return 1;
+ goto free_chunks;
}
/* Insert any CRCs */
- if (crcimage(fchunk, nfchunks, s3crc, ns3crc)) {
+ result = crcimage(fchunk, nfchunks, s3crc, ns3crc);
+ if (result) {
netdev_err(wlandev->netdev, "Failed to insert all CRCs\n");
- return 1;
+ goto free_chunks;
}
/* Write the image */
result = writeimage(wlandev, fchunk, nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to ramwrite image data.\n");
- return 1;
+ goto free_chunks;
}
+ netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
+
+free_chunks:
/* clear any allocated memory */
free_chunks(fchunk, &nfchunks);
free_srecs();
- netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
-
+out:
return result;
}
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 41358bbc6..b26d09ff8 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -8,7 +8,7 @@
{ USB_DEVICE(vid, pid), \
.driver_info = (unsigned long)name }
-static struct usb_device_id usb_prism_tbl[] = {
+static const struct usb_device_id usb_prism_tbl[] = {
PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"),
PRISM_DEV(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11"),
PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter"),
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 7eadf922b..d56ef1425 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1130,8 +1130,9 @@ static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
return (var->bits_per_pixel == 8) ? 256 : 16;
}
-static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp, struct fb_info *info)
+static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 26b539bc6..062ece22e 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -355,7 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
/* keep following setting sequence, each setting in
- * the same reg insert idle */
+ * the same reg insert idle
+ */
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x86, 0x00);
xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
@@ -551,7 +552,8 @@ static int XGINew_ReadWriteRest(unsigned short StopAddr,
writel(Position, fbaddr + Position);
}
- usleep_range(500, 1500); /* Fix #1759 Memory Size error in Multi-Adapter. */
+ /* Fix #1759 Memory Size error in Multi-Adapter. */
+ usleep_range(500, 1500);
Position = 0;
@@ -699,11 +701,11 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
break;
case XG42:
/*
- XG42 SR14 D[3] Reserve
- D[2] = 1, Dual Channel
- = 0, Single Channel
-
- It's Different from Other XG40 Series.
+ * XG42 SR14 D[3] Reserve
+ * D[2] = 1, Dual Channel
+ * = 0, Single Channel
+ *
+ * It's Different from Other XG40 Series.
*/
if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */
pVBInfo->ram_bus = 32; /* 32 bits */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index f97c77d88..50c8ea4f5 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -108,9 +108,9 @@ static void XGI_SetATTRegs(unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
ARdata = 0;
} else if ((pVBInfo->VBInfo &
- (SetCRT2ToTV | SetCRT2ToLCD)) &&
- (pVBInfo->VBInfo & SetInSlaveMode)) {
- ARdata = 0;
+ (SetCRT2ToTV | SetCRT2ToLCD)) &&
+ (pVBInfo->VBInfo & SetInSlaveMode)) {
+ ARdata = 0;
}
}
@@ -1992,7 +1992,8 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
}
/* LCD+TV can't support in slave mode
- * (Force LCDA+TV->LCDB) */
+ * (Force LCDA+TV->LCDB)
+ */
if ((tempbx & SetInSlaveMode) && (tempbx & XGI_SetCRT2ToLCDA)) {
tempbx ^= (SetCRT2ToLCD | XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge);
@@ -2983,7 +2984,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if ((pVBInfo->VBInfo & SetCRT2ToHiVision) &&
!(pVBInfo->VBType & VB_SIS301LV) && (resinfo == 7))
- temp -= 2;
+ temp -= 2;
}
/* 0x05 Horizontal Display Start */
@@ -3450,8 +3451,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->TVInfo &
(TVSetYPbPr525p | TVSetYPbPr750p)))
tempbx >>= 1;
- } else
+ } else {
tempbx >>= 1;
+ }
}
tempbx -= 2;
@@ -3839,9 +3841,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
if (pVBInfo->VGAVDE == 525) {
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C)) {
+ | VB_XGI301C))
temp = 0xC6;
- } else
+ else
temp = 0xC4;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
@@ -3851,9 +3853,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
if (pVBInfo->VGAVDE == 420) {
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C)) {
+ | VB_XGI301C))
temp = 0x4F;
- } else
+ else
temp = 0x4E;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
}
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 45f2c992c..c801deb14 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -58,8 +58,9 @@ static const unsigned char XGI27_cr41[24][3] = {
{0xC4, 0x40, 0x84}, /* 1 CR8A */
{0xC4, 0x40, 0x84}, /* 2 CR8B */
{0xB3, 0x13, 0xa4}, /* 3 CR40[7],
- CR99[2:0],
- CR45[3:0]*/
+ * CR99[2:0],
+ * CR45[3:0]
+ */
{0xf0, 0xf5, 0xf0}, /* 4 CR59 */
{0x90, 0x90, 0x24}, /* 5 CR68 */
{0x77, 0x67, 0x44}, /* 6 CR69 */
@@ -101,9 +102,11 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
{0x38, 0x0a1b, 0x0508, 0x08, 0x00, 0x16},
{0x3a, 0x0e3b, 0x0609, 0x09, 0x00, 0x1e},
{0x3c, 0x0e3b, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE [2003/10/07] */
+ * add CRT2MODE [2003/10/07]
+ */
{0x3d, 0x0e7d, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE */
+ * add CRT2MODE
+ */
{0x40, 0x9a1c, 0x0000, 0x00, 0x04, 0x00},
{0x41, 0x9a1d, 0x0000, 0x00, 0x04, 0x00},
{0x43, 0x0a1c, 0x0306, 0x06, 0x05, 0x06},
@@ -129,7 +132,8 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
{0x64, 0x0a7f, 0x0508, 0x08, 0x00, 0x16},
{0x65, 0x0eff, 0x0609, 0x09, 0x00, 0x1e},
{0x66, 0x0eff, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE */
+ * add CRT2MODE
+ */
{0x68, 0x067b, 0x080b, 0x0b, 0x00, 0x29},
{0x69, 0x06fd, 0x080b, 0x0b, 0x00, 0x29},
{0x6b, 0x07ff, 0x080b, 0x0b, 0x00, 0x29},
@@ -223,38 +227,38 @@ const struct XGI_CRT1TableStruct XGI_CRT1Table[] = {
0x0D, 0x3E, 0xE0, 0x83, 0xDF, 0x0E, 0x90} }, /* 0xb */
{ {0x65, 0x4F, 0x89, 0x57, 0x9F, 0x00, 0x01, 0x00,
0xFB, 0x1F, 0xE6, 0x8A, 0xDF, 0xFC, 0x10} }, /* 0xc */
- { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00, /* ;
- 0D (800x600,56Hz) */
- 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} }, /* ;
- (VCLK 36.0MHz) */
- { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00, /* ;
- 0E (800x600,60Hz) */
- 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} }, /* ;
- (VCLK 40.0MHz) */
- { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00, /* ;
- 0F (800x600,72Hz) */
- 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} }, /* ;
- (VCLK 50.0MHz) */
- { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00, /* ;
- 10 (800x600,75Hz) */
- 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} }, /* ;
- (VCLK 49.5MHz) */
- { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00, /* ;
- 11 (800x600,85Hz) */
- 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} }, /* ;
- (VCLK 56.25MHz) */
- { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60, /* ;
- 12 (800x600,100Hz) */
- 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} }, /* ;
- (VCLK 75.8MHz) */
- { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60, /* ;
- 13 (800x600,120Hz) */
- 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} }, /* ;
- (VCLK 79.411MHz) */
- { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60, /* ;
- 14 (800x600,160Hz) */
- 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} }, /* ;
- (VCLK 105.822MHz) */
+ /* 0D (800x600,56Hz) */
+ { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00,
+ /* (VCLK 36.0MHz) */
+ 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} },
+ /* 0E (800x600,60Hz) */
+ { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00,
+ /* (VCLK 40.0MHz) */
+ 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} },
+ /* 0F (800x600,72Hz) */
+ { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00,
+ /* (VCLK 50.0MHz) */
+ 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} },
+ /* 10 (800x600,75Hz) */
+ { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00,
+ /* (VCLK 49.5MHz) */
+ 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} },
+ /* 11 (800x600,85Hz) */
+ { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00,
+ /* (VCLK 56.25MHz) */
+ 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} },
+ /* 12 (800x600,100Hz) */
+ { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60,
+ /* (VCLK 75.8MHz) */
+ 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} },
+ /* 13 (800x600,120Hz) */
+ { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60,
+ /* (VCLK 79.411MHz) */
+ 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} },
+ /* 14 (800x600,160Hz) */
+ { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60,
+ /* (VCLK 105.822MHz) */
+ 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} },
{ {0x99, 0x7F, 0x9D, 0x84, 0x1A, 0x00, 0x02, 0x00,
0x96, 0x1F, 0x7F, 0x83, 0x7F, 0x97, 0x10} }, /* 0x15 */
{ {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00,
@@ -388,7 +392,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768Data[] = {
static const struct SiS_LCDData XGI_CetLCD1024x768Data[] = {
{1, 1, 1344, 806, 1344, 806}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1344, 806, 1344, 806}, /* 01 (320x350,640x350) */
{1, 1, 1344, 806, 1344, 806}, /* 02 (360x400,720x400) */
{1, 1, 1344, 806, 1344, 806}, /* 03 (720x350) */
@@ -421,7 +426,8 @@ static const struct SiS_LCDData XGI_ExtLCD1280x1024Data[] = {
static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
{1, 1, 1688, 1066, 1688, 1066}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1688, 1066, 1688, 1066}, /* 01 (320x350,640x350) */
{1, 1, 1688, 1066, 1688, 1066}, /* 02 (360x400,720x400) */
{1, 1, 1688, 1066, 1688, 1066}, /* 03 (720x350) */
@@ -434,7 +440,8 @@ static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
{211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */
{211, 100, 2100, 408, 1688, 1066}, /* 02 (360x400,720x400) */
{211, 64, 1536, 358, 1688, 1066}, /* 03 (720x350) */
@@ -442,13 +449,15 @@ static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
{211, 72, 1008, 609, 1688, 1066}, /* 05 (800x600x60Hz) */
{211, 128, 1400, 776, 1688, 1066}, /* 06 (1024x768x60Hz) */
{1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz
- w/o Scaling) */
+ * w/o Scaling)
+ */
{1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
};
static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
{4, 1, 1620, 420, 2160, 1250}, /* 00 (320x200,320x400,
- 640x200,640x400)*/
+ * 640x200,640x400)
+ */
{27, 7, 1920, 375, 2160, 1250}, /* 01 (320x350,640x350) */
{4, 1, 1620, 420, 2160, 1250}, /* 02 (360x400,720x400)*/
{27, 7, 1920, 375, 2160, 1250}, /* 03 (720x350) */
@@ -462,7 +471,8 @@ static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
static const struct SiS_LCDData XGI_StLCD1600x1200Data[] = {
{27, 4, 800, 500, 2160, 1250}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{27, 4, 800, 500, 2160, 1250}, /* 01 (320x350,640x350) */
{27, 4, 800, 500, 2160, 1250}, /* 02 (360x400,720x400) */
{27, 4, 800, 500, 2160, 1250}, /* 03 (720x350) */
@@ -489,7 +499,8 @@ static const struct SiS_LCDData XGI_NoScalingData[] = {
static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
{42, 25, 1536, 419, 1344, 806}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{48, 25, 1536, 369, 1344, 806}, /* ; 01 (320x350,640x350) */
{42, 25, 1536, 419, 1344, 806}, /* ; 02 (360x400,720x400) */
{48, 25, 1536, 369, 1344, 806}, /* ; 03 (720x350) */
@@ -500,7 +511,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
{1, 1, 1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */
{1, 1, 1312, 800, 1312, 800}, /* ; 02 (360x400,720x400) */
{1, 1, 1312, 800, 1312, 800}, /* ; 03 (720x350) */
@@ -511,7 +523,8 @@ static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
{211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */
{211, 60, 1024, 501, 1688, 1066}, /* ; 02 (360x400,720x400) */
{211, 60, 1024, 508, 1688, 1066}, /* ; 03 (720x350) */
@@ -525,7 +538,8 @@ static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
static const struct SiS_LCDData XGI_NoScalingDatax75[] = {
{1, 1, 800, 449, 800, 449}, /* ; 00 (320x200, 320x400,
- 640x200, 640x400) */
+ * 640x200, 640x400)
+ */
{1, 1, 800, 449, 800, 449}, /* ; 01 (320x350, 640x350) */
{1, 1, 900, 449, 900, 449}, /* ; 02 (360x400, 720x400) */
{1, 1, 900, 449, 900, 449}, /* ; 03 (720x350) */
@@ -732,7 +746,8 @@ static const struct XGI_LCDDesStruct XGI_StLCDDes1600x1200Data[] = {
static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] = {
{9, 657, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{9, 657, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
{9, 657, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
{9, 657, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -818,7 +833,8 @@ static const struct XGI_LCDDesStruct XGI_CetLCDDes1280x1024x75Data[] = {
/* Scaling LCD 75Hz */
static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[] = {
{9, 657, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{9, 657, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
{9, 738, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
{9, 738, 448, 355, 108, 2}, /* ; 03 (720x350) */
@@ -873,7 +889,8 @@ static const struct SiS_TVData XGI_ExtNTSCData[] = {
static const struct SiS_TVData XGI_St1HiTVData[] = {
{1, 1, 892, 563, 690, 800, 0, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
{1, 1, 1000, 563, 785, 800, 0, 0, 0}, /* 02 (360x400,720x400) */
{1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -883,7 +900,8 @@ static const struct SiS_TVData XGI_St1HiTVData[] = {
static const struct SiS_TVData XGI_St2HiTVData[] = {
{3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
{3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 02 (360x400,720x400) */
{1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -893,7 +911,8 @@ static const struct SiS_TVData XGI_St2HiTVData[] = {
static const struct SiS_TVData XGI_ExtHiTVData[] = {
{6, 1, 840, 563, 1632, 960, 0, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 01 (320x350,640x350) */
{3, 1, 840, 483, 1632, 960, 0, 0, 0}, /* 02 (360x400,720x400) */
{3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 03 (720x350) */
@@ -948,7 +967,8 @@ static const struct SiS_TVData XGI_StYPbPr525pData[] = {
static const struct SiS_TVData XGI_ExtYPbPr750pData[] = {
{ 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 01 (320x350,640x350) */
{ 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 02 (360x400,720x400) */
{24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 03 (720x350) */
@@ -1269,7 +1289,8 @@ static const struct SiS_LVDSData XGI_LVDSNoScalingDatax75[] = {
{1312, 800, 1312, 800}, /* ; 06 (1024x768x75Hz) */
{1688, 1066, 1688, 1066}, /* ; 07 (1280x1024x75Hz) */
{1688, 1066, 1688, 1066}, /* ; 08 (1400x1050x75Hz)
- ;;[ycchen] 12/19/02 */
+ * ;;[ycchen] 12/19/02
+ */
{2160, 1250, 2160, 1250}, /* ; 09 (1600x1200x75Hz) */
{1688, 806, 1688, 806}, /* ; 0A (1280x768x75Hz) */
};
@@ -1364,7 +1385,8 @@ static const struct SiS_LVDSData XGI_LVDS1600x1200Des_1[] = {
static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[] = {
{0, 648, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{0, 648, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
{0, 648, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
{0, 648, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -1435,7 +1457,8 @@ static const struct SiS_LVDSData XGI_LVDS1280x1024Des_2x75[] = {
/* Scaling LCD 75Hz */
static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = {
{0, 648, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{0, 648, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
{0, 729, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
{0, 729, 448, 355, 108, 2}, /* ; 03 (720x350) */
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index f613f54d5..08db58b39 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -13,7 +13,7 @@ static inline u8 xgifb_reg_get(unsigned long port, u8 index)
}
static inline void xgifb_reg_and_or(unsigned long port, u8 index,
- unsigned data_and, unsigned data_or)
+ unsigned int data_and, unsigned int data_or)
{
u8 temp;
@@ -22,7 +22,8 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
xgifb_reg_set(port, index, temp);
}
-static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and)
+static inline void xgifb_reg_and(unsigned long port, u8 index,
+ unsigned int data_and)
{
u8 temp;
@@ -31,7 +32,8 @@ static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and
xgifb_reg_set(port, index, temp);
}
-static inline void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or)
+static inline void xgifb_reg_or(unsigned long port, u8 index,
+ unsigned int data_or)
{
u8 temp;